From 1c8af1e0502815c81b320ea170c20535b4715c4c Mon Sep 17 00:00:00 2001 From: Stephen Mkandawire Date: Thu, 12 Oct 2023 15:01:43 +0000 Subject: [PATCH] Update csi logging Updates csi logging and adds k8s events for edgecache --- BLOB_CSI_VERSION | 2 +- charts/index.yaml | 11 +- charts/v4.4.0/blob-csi-driver-v4.4.0.tgz | Bin 0 -> 5871 bytes charts/v4.4.0/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 224 +++++++++++++ .../templates/csi-blob-driver.yaml | 14 + .../templates/csi-blob-node.yaml | 296 ++++++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 121 +++++++ .../templates/rbac-csi-blob-node.yaml | 44 +++ .../serviceaccount-csi-blob-controller.yaml | 17 + .../serviceaccount-csi-blob-node.yaml | 17 + charts/v4.4.0/blob-csi-driver/values.yaml | 173 ++++++++++ pkg/blob/blob.go | 1 + pkg/blob/controllerserver.go | 13 +- pkg/blob/nodeserver.go | 30 +- pkg/blobplugin/Dockerfile | 6 +- pkg/csi-common/utils.go | 134 ++++++++ pkg/edgecache/cachevolume/pvc_annotator.go | 6 +- 20 files changed, 1155 insertions(+), 13 deletions(-) create mode 100644 charts/v4.4.0/blob-csi-driver-v4.4.0.tgz create mode 100644 charts/v4.4.0/blob-csi-driver/Chart.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v4.4.0/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v4.4.0/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v4.4.0/blob-csi-driver/values.yaml diff --git a/BLOB_CSI_VERSION b/BLOB_CSI_VERSION index 4250ea042..c690692a7 100644 --- a/BLOB_CSI_VERSION +++ b/BLOB_CSI_VERSION @@ -1 +1 @@ -BLOB_CSI_VERSION=v4.3.0 +BLOB_CSI_VERSION=v4.4.0 diff --git a/charts/index.yaml b/charts/index.yaml index eec6e6a4a..e4fbd59f0 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -280,6 +280,15 @@ entries: urls: - https://raw.githubusercontent.com/avoltz/blob-csi-driver/staging/charts/v4.3.0/blob-csi-driver-v4.3.0.tgz version: v4.3.0 + - apiVersion: v1 + appVersion: v4.4.0 + created: "2023-10-12T19:27:00.820283000Z" + description: Azure Blob Storage CSI driver + digest: 86a1f17a630f37d818c4cb1dd69e6c1711c64c411de6f808bb7a006a40097b71 + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/avoltz/blob-csi-driver/staging/charts/v4.4.0/blob-csi-driver-v4.4.0.tgz + version: v4.4.0 - apiVersion: v1 appVersion: latest created: "2023-09-14T07:10:07.884046607Z" @@ -289,4 +298,4 @@ entries: urls: - https://raw.githubusercontent.com/avoltz/blob-csi-driver/staging/charts/latest/blob-csi-driver-v0.0.0.tgz version: v0.0.0 -generated: "2023-09-01T12:33:00.254303884Z" +generated: "2023-10-12T19:27:00.820283000Z" diff --git a/charts/v4.4.0/blob-csi-driver-v4.4.0.tgz b/charts/v4.4.0/blob-csi-driver-v4.4.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..b317b71ba88e8b20ad08d6c92865b51889999d11 GIT binary patch literal 5871 zcmVDc zVQyr3R8em|NM&qo0PH<&bK*GC`wUrWf z1G?CfSCW~q+5GpXB-!|#w-9D-A3Pr>*sboC+}&z*x8#w_MwTsz*VYBU;+vy&73f1}aJ{@*w{JO1wImLSp`1 z_H1I-0 z;^7IjDhQ3RfC2cdg*=JLAOx-CdOk|GTu z@nm5<)Nem<-9B@PJ#V4zxW{Clg8-@?+Jf{sn-OInfEN&8Lew9E5pW6;gd(BnS$^Co zCOJE>HpNHWAI zAjXl$I3NKw2PxF;5uiYTgT73GYBGdQkQPy)P;eNx&{RrawCV|_YlPJuW{bMS;z87H zM#p3laIk#lST>^)6AHYp@L3v}<#RH_5~3lU*DLydKXBcl;iiRt{jKr_S;)+3-aLGH z^A9jLAU*6sr6PdOhz*yv%>pXb_}Uz>gcg!K0Q3g^qPn8~ev3j_gblDjUx@3fKAR>E z8Z(aOEU2L&NHrW|>0QH+A_J!6IJw_#Gt_!@S-n3q0lDKX))XaQH77NogEZ~e&Hx+(YRKe+RDRFs4=BXV)aYQ z1a=(#pX(wy1>`eFRD9-K#!xNv^qfcI;t{yZFx+SW(->J6x(75{3I;w`+3Jl@pJ<-&~6e&eNGrBa(?Y%q45J1%A3+JQ!#}^z-MFzh=X7O zw*iSM<8oc;w;8p;m$3rq(WxsX@I%s;&jCE`%hfA>lAoS_BVsNjc$V!3LISsJP9(9h ztEdmb4ivPOH+Yn2qBvFK$$X!28Bq05GQvg^3gr-iM=m(Z2*ivdhk+Vbi5&2OI*3v9aoCNTSCEoePHfR#Z%uD zuq&rAShJ$12bb7bw2=N4&|CTuD|u)cgB0DoHvIHmOd)>cvWb#;w}nnME+fgpkSm(+ z6<;2aBKoD6_w?L!+GuUAA+gv)DyPwWA&u-r z{#rwa$77<@IW%#~`VT>wR>s3ylDWV`>#MO?=mF*}RMj&DXD-yz7Ru8SR1GOIU$s`) zs+~t7q=+8Ccnr2|p=yr}?I}2c3)QvohsxnAVSIHe)(gl~^wgR+OX~JlGf4Vrm%4<) zXXXYTe1Q3-ZG#ZM5IOBSK&2LkNXb3?_=(~fCa$thEp)V=bve%22cQbUmCHusKtP|f z#{`v8>bAy6sF2uoqXvr(#K4R2MfIq5Qah~{HER~MYsL+mwMOmOG!!5|;>TMb=sV&< zFYppAz08B^Kh4t5vHl?^N~1(h*i8F2j;#^OC^fgi1% z27sMkIFWEmZBUB#Zg@BMVWAf9K^kowN~~5SMhYs%yKJT0=$b7^O&f`D!EQWNT%guIlAw zTdA3HZ_F}5uT!{%g&-3O4)QR?6XkA&co8vv7NNSmP^%vaR4v7@(6>TgSre3MttZsl zO@nmX=nIik4YnE8kVLnw#4N<6yS4N9Pd5H1!Si+Orrx`~>kMo1QNEbQs`&r$Su+#= zJ3Tu+J;eX^aos7cSQgY*8ZZf$B@Xy5n}c3W>s4zg{IyEujv;#r_QMKfW|=Fo48&XH zyMP6##YC$T>c1^rWLY%wVlJMZP;CGjuD0-P(DSom$sn^V5t`+TDmRe1R_OV;iY)63 zmiF|Jen?&A`hPzK*9R_Y(tlwpV1@oSkIzmr`hRwM$p7uun((~4sxFF{~ zTzG0&>?yR+KkCoVm5K)IJo=bAdOD+F7*NlEI%TZko}Mh^z?e`#Rqd82xGJ*ZM>udL zs+laaR?XUHlW|gzw=>|JIDr0C-ZHh(*|GkUc*b8rRVzjY)X{%y-YVBRz-V|e+$mPW z)T5fKbzFHgk2yAhzFMW2aSaj^^Esh%jH-VV*55=m-2wA=xO_l=>iCJWQq}*dqUwEh zo6a!LW6F~-hE0?)83CApq``)LKeK2xUgFaeigY{mZfdBokzO%T+ceA?({h$g@U)Cy zH^HZRNT}@wPFZhCIJ24OGpc>sM65Lr=VLA{Hr}}Bp)+QU=IOpi5$}liarEXd7di7{ zgp2&Q!j^0DC;B~LGBv8t&lTm8*Tl50#jvOgp?ubo?1Y+>3{soNNV0!p&s_qsAe?%$ zM(zE;br(sRG2?M`SD@ne?8kYO#LJF3%qCkD;ambnH{zI1U}-zF$kC|bE$L0VV_)0m zeoI&R{9h0--?A)V`TXB(oSkO;|7PR-?EEnQ@8fzP)M=r%cI-W%vXU$a!oDx+v!hCs z|B7P{O4lM|5{m~7rw0QS#v#jG_zDVV3llui8MIYs5z0>+?qa(56H!9d5*nQ-aN&c_ zJ;wQrV)W5-Ln4}mT49V&+S8i^n`<*##A<$qi&;K14I}*v$tMP0$GTFSY{1L91NT#| zNo=*VF_&5ta7nf?F{lW%7DY7bI#^qkJXnOy4gSzNmyld!X(J1snaJYjOS+Jl0Me)v zDdf0KBKzDbm^w4f{zOTtnGP8NR5x`PsNf|QQDSRxZ3aXW{KC>(lif(h>k7xEV(r#~C`5HMY6(aTz)^y38Cm{cl+yO5By zuf{|R>40RJL?-P-0}oi7@JHDV!8Ndi6t|xYmiK3t4b&@lg{LTWgN*>5?N4N zka$_Sr%5k3dfn-~{(^M@wps!7Ia7<(_&!2v;o3rjfJ)*)+blJg_Ae2@j|r!u`Wy5( zc8X)3tVEJ8S+W+s<|!6^4tgnAucq~{md=#d;*zbTbqf``g{q0cHSMEvKJ@&Yd$tUB zYZbJ_6ZHICH3Q%ZSd*XK(M(Sm2w^@ENik5vUbSWy&1&wZc671ha1i3+`yVgc?LlWa z{CfeQ3)lf}L0C_;8e)x@WdRZ;sVtBeN6m&Pcnpt{W6RV|S&=Vrk)A{sz&hWHePaFY zi;L67GKf*Yg}gXwq=10VvcqVkHyozaGh{MBEmRK+25W-Z4_QIzCkh2uz2ms3F#URG zIs^7m7tCe(GwD#1Lyf9W z&B{bBOve{bB0wFV5h^df|M7a*z3=qe{o8Kuu4q~d%K{h5(x~Zwtzxo?BEn+ICT65w zd6IE+H(G!e&B8kTXqg=N1gI=%ovB|aMigVPgpR2fAj*_HlyIaAt4!UjWkQ`+wp)pT zDJ;9%40G%%tciY!7on@SEbx3ePthwuUswv3!L00GPC^$huBqsHiFrhe`Po4Hk$L8G zCYjCL7P`CcZ^K4fb_*$oAZt=CEU7Zr7!x<(a5v>3rp$FrXC5iNAwNWp{?+~V<3`mqn3FsHb2xf}Sup9Y=#>p`d8>D_fN zZ|>i9Z#qRz;XBT}Y+v>IXiO#_vHt<)0~qH4qSq3ZI&yF0oE^a`FuB|nKe zy=+JNx9$7h<;M+p9}{q$5~lStGs{LEx;A}I{0FyUtK^FzjDmzcL%zY!vd6 zoGXfb`+$r&0v?Uf7z{2rT1W%^N7SL+NVwjcCd zP&yQb!A|dUcW~SL*y-JA?eAaS{amsv1ZgMssMIx!LebHLYyT09t{$M(PV^$(xVNQa zwsEGTg@|I4w%fNNguBG&SDXpq)@|T2JIisv&MkH41!i)&CMB5FanhQp%tX~OIE>@{ zvYJ3{`48Ebm?hvpkh+a6$pazlI<#WT%p2K!V76FFDVbflf=ceeTrXT0?hM&^sIU-k zVaP6m+&)D3LR#xBv(^m)7KRY_5g7cznqum>U>O5u5gG`vfOEz#bPf0qUpH(Qjs3SC zvxr$OQ8&YALBPI{KfS=x+cBQ<`dVn>r%ag6q`mLe6NgDn64`9y!VGqTgV(A5SflUEY2nQ$L2RD#ttYtP>B^V2LVbxL)hD9?A+>kNYbK8ZF%z?#* zuD%)PE+(f(IPCf02CzYv?~^4!r0_+A+AL-{2M`5K&65%$L zgSno5m72ir)KVAFR@3zBPczJ1HBC7IArGJXwZHxYL z)nZ0Dwy%SW%Q+*`3fX1uuGyike4KW`37v%JMG!jaPAFU4?mU7W=#v;zE1D#OcfdW7 z%@A0|9CaRj4&liG1v!N9^b2YBpy#i6cYBY?E-i!!H;5NLQxs-|%K*DpytO-)zpT(D zt78k)ISsQ~rOl0Rw=z-pXGb-O8|$zd#Ooy`jNu6yR&;!Y<*W9G8jteeV=k1IIh8NE zZ032GI=MJyJw4QQ^@eI@Zz zuu2&vR07AgB(q#8qQTVd4eu^*ZtkycZm-^b8Wv_b(_^n?G3)^={a?yZ?Ckulw76r>Ap#_q|Uaw<4K4 zI;WJTqW2Kbt}abp4mln?Oc#XzHQ}d@qxQ*$XmfF zSi66y(1{7V zpu5Gm62fN^k73u*^K;#Ig9)LcUYe#TV&YYD*m2|38(EGb3A(KfEKtQm4gVf$_{}`G ztLpvqf-u=~u>jK5ic67zGXWdhQ?RspC**FSgeKi+=o-7QC>g9#aya$PSD zITWB%(8Rg;{>KlWt~&R<+jd76$QDuFcGJNcCH|V{*C_5UN4%u0|J&OpvCc*V6O~(I zi$PkDCLp77Cn2#E7X`W7qEj(T7}ZCqYR6Pa?D>|Fc!%&jt7WII7GpL_@;Ng)HbcdNJ~Pdv4N*yi_JGZfvUJ zlq0xXi9BrB`NlWwWa1`Ew(2bQ4s4uiUz!VATC$!5U6K!4RrX#K&#;!ZdQo^S?M1KH z*|5YMZO`IX9F$n27Z22Z{L#5>*eZ=2E@$jT+fA0()U}5rl}cTF{3I{Xb`C+3$ZekIoy1`v1LL84Xu2y=D%a(+78ln}1As?tr+MyRAv>D?L zp-Gh}qM`T3>}1d?mSt7)+ImH(L+Io+ls0pwsWl2oZNKF&82txq8EUQR$}m;hUx|Ih zO-}w@XG@Z@Het{#Z}MJZzap_%GVWzB?^R80c(%lUgPC2!MC{ngzKBBIeBLTcXvv_U=->ngVY2TK^xo2wTbdIx6|}^g3~ob=UtdIRs$U_y3xwr`hvAPL2=f zf9&O2vHsudM4)S=zojYQw~D^^#(eUSIe<(IA9;b0`jfu%pf)9}s@;uSpV*YKLlLlNKX{P>k7!=z|`E7M?dqx@bbzykgL znp5B5S~~xS(t?e&yd)E_V*WpBoMq?#w_y6{CB-^y?|Zqvn*EQx&x7SZSpLIxxDMChI$V3c{x1Lk|NsBGVP62G F001+>kh=f? literal 0 HcmV?d00001 diff --git a/charts/v4.4.0/blob-csi-driver/Chart.yaml b/charts/v4.4.0/blob-csi-driver/Chart.yaml new file mode 100644 index 000000000..4b3b02c8a --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: latest +description: Azure Blob Storage CSI driver +name: blob-csi-driver +version: v4.4.0 diff --git a/charts/v4.4.0/blob-csi-driver/templates/NOTES.txt b/charts/v4.4.0/blob-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..9ad135dd4 --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Blob Storage CSI driver is getting deployed to your cluster. + +To check Azure Blob Storage CSI driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v4.4.0/blob-csi-driver/templates/_helpers.tpl b/charts/v4.4.0/blob-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..d99392f32 --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/_helpers.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "blob.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "blob.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common selectors. +*/}} +{{- define "blob.selectorLabels" -}} +app.kubernetes.io/name: {{ template "blob.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels. +*/}} +{{- define "blob.labels" -}} +{{- include "blob.selectorLabels" . }} +app.kubernetes.io/component: csi-driver +app.kubernetes.io/part-of: {{ template "blob.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +helm.sh/chart: {{ template "blob.chart" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- end -}} + + +{{/* pull secrets for containers */}} +{{- define "blob.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v4.4.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v4.4.0/blob-csi-driver/templates/csi-blob-controller.yaml new file mode 100644 index 000000000..4d2353357 --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/csi-blob-controller.yaml @@ -0,0 +1,224 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--timeout=120s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v4.4.0/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v4.4.0/blob-csi-driver/templates/csi-blob-driver.yaml new file mode 100644 index 000000000..9a6aea64a --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/csi-blob-driver.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + labels: + {{- include "blob.labels" . | nindent 4 }} +spec: + attachRequired: false + podInfoOnMount: true + fsGroupPolicy: {{ .Values.feature.fsGroupPolicy }} + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/charts/v4.4.0/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v4.4.0/blob-csi-driver/templates/csi-blob-node.yaml new file mode 100644 index 000000000..91c02dda0 --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/csi-blob-node.yaml @@ -0,0 +1,296 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.node.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.node.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.node.enableBlobfuseProxy }} + hostPID: true +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.node.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + {{- if .Values.node.affinity }} +{{- toYaml .Values.node.affinity | nindent 8 }} + {{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.node.enableBlobfuseProxy }} + initContainers: + - name: install-blobfuse-proxy +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE + value: "{{ .Values.node.blobfuseProxy.installBlobfuse }}" + - name: BLOBFUSE_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuseVersion }}" + - name: INSTALL_BLOBFUSE2 + value: "{{ .Values.node.blobfuseProxy.installBlobfuse2 }}" + - name: BLOBFUSE2_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuse2Version }}" + - name: SET_MAX_OPEN_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.setMaxOpenFileNum }}" + - name: MAX_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.maxOpenFileNum }}" + - name: DISABLE_UPDATEDB + value: "{{ .Values.node.blobfuseProxy.disableUpdateDB }}" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-etc + mountPath: /host/etc +{{- end }} + containers: + - name: liveness-probe + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--edgecache-mount-endpoint=$(EDGECACHE_MOUNT_ENDPOINT)" + - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}" + - "--append-timestamp-cache-dir={{ .Values.node.appendTimeStampInCacheDir }}" + - "--mount-permissions={{ .Values.node.mountPermissions }}" + - "--allow-inline-volume-key-access-with-idenitity={{ .Values.node.allowInlineVolumeKeyAccessWithIdentity }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: EDGECACHE_MOUNT_ENDPOINT + value: unix:///csi/csi_mounts.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} + volumes: +{{- if .Values.node.enableBlobfuseProxy }} + - name: host-usr + hostPath: + path: /usr + - name: host-etc + hostPath: + path: /etc +{{- end }} + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: {{ .Values.node.blobfuseCachePath }} + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..f27935671 --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml @@ -0,0 +1,121 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..6676656cf --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml @@ -0,0 +1,44 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + + # the node plugin must apply annotations to the PVC for edgecache volumes + # it gets the PVC's through the PV's + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml new file mode 100644 index 000000000..7433bccf1 --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml new file mode 100644 index 000000000..a25090e30 --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v4.4.0/blob-csi-driver/values.yaml b/charts/v4.4.0/blob-csi-driver/values.yaml new file mode 100644 index 000000000..1ff9bbfaf --- /dev/null +++ b/charts/v4.4.0/blob-csi-driver/values.yaml @@ -0,0 +1,173 @@ +image: + baseRepo: mcr.microsoft.com + blob: + repository: /k8s/csi/blob-csi + tag: latest + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.5.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.10.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.8.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.8.0 + pullPolicy: IfNotPresent + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: myRegistryKeySecretName + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-blob-controller-sa # Name of Service Account to be created or used + node: csi-blob-node-sa # Name of Service Account to be created or used + +rbac: + create: true + name: blob + +## Collection of annotations to add to all the pods +podAnnotations: {} +## Collection of labels to add to all the pods +podLabels: {} +# -- Custom labels to add into metadata +customLabels: {} + # k8s-app: blob-csi-driver + +## Leverage a PriorityClass to ensure your pods survive resource shortages +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: system-cluster-critical +## Security context give the opportunity to run container as nonroot by setting a securityContext +## by example : +## securityContext: { runAsUser: 1001 } +securityContext: {} + +controller: + name: csi-blob-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + metricsPort: 29634 + livenessProbe: + healthPort: 29632 + replicas: 2 + runOnMaster: false + runOnControlPlane: false + logLevel: 5 + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + +node: + name: csi-blob-node + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + allowInlineVolumeKeyAccessWithIdentity: false + maxUnavailable: 1 + livenessProbe: + healthPort: 29633 + logLevel: 5 + enableBlobfuseProxy: false + blobfuseProxy: + installBlobfuse: true + blobfuseVersion: "1.4.5" + installBlobfuse2: true + blobfuse2Version: "2.0.3" + setMaxOpenFileNum: true + maxOpenFileNum: "9000000" + disableUpdateDB: true + blobfuseCachePath: /mnt + appendTimeStampInCacheDir: false + mountPermissions: 0777 + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - operator: "Exists" + +feature: + fsGroupPolicy: ReadWriteOnceWithFSType + enableGetVolumeStats: false + +driver: + name: blob.csi.azure.com + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + +linux: + kubelet: /var/lib/kubelet + distro: debian + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index be78a2159..0d3dd27a5 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -289,6 +289,7 @@ func (d *Driver) Run(endpoint, kubeconfig string, testBool bool) { klog.V(2).Infof("driver userAgent: %s", userAgent) d.cloud, err = getCloudProvider(kubeconfig, d.NodeID, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, userAgent, d.allowEmptyCloudConfig, d.kubeAPIQPS, d.kubeAPIBurst) if err != nil { + csicommon.SendKubeEvent(v1.EventTypeWarning, csicommon.FailedToInitializeDriver, csicommon.CSIEventSourceStr, fmt.Sprintf("failed to get Azure Cloud Provider, error: %v", err)) klog.Fatalf("failed to get Azure Cloud Provider, error: %v", err) } klog.V(2).Infof("cloud: %s, location: %s, rg: %s, VnetName: %s, VnetResourceGroup: %s, SubnetName: %s", d.cloud.Cloud, d.cloud.Location, d.cloud.ResourceGroup, d.cloud.VnetName, d.cloud.VnetResourceGroup, d.cloud.SubnetName) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index eb7beabd6..78a8cc5e9 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -35,10 +35,12 @@ import ( azstorage "github.com/Azure/azure-sdk-for-go/storage" "github.com/container-storage-interface/spec/lib/go/csi" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/pointer" + csicommon "sigs.k8s.io/blob-csi-driver/pkg/csi-common" "sigs.k8s.io/blob-csi-driver/pkg/util" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" @@ -427,6 +429,9 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } } else { klog.V(2).Infof("begin to create container(%s) on account(%s) type(%s) subsID(%s) rg(%s) location(%s) size(%d)", validContainerName, accountName, storageAccountType, subsID, resourceGroup, location, requestGiB) + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.CreatingBlobContainer, csicommon.CSIEventSourceStr, + fmt.Sprintf("Controller CreateVolume: Creating blob container %s in %q storage account", validContainerName, accountName)) + if err := d.CreateBlobContainer(ctx, subsID, resourceGroup, accountName, validContainerName, secrets); err != nil { return nil, status.Errorf(codes.Internal, "failed to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d), error: %v", validContainerName, accountName, storageAccountType, resourceGroup, location, requestGiB, err) } @@ -455,7 +460,9 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) uuid = volName } volumeID = fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, validContainerName, uuid, secretNamespace, subsID) - klog.V(2).Infof("create container %s on storage account %s successfully", validContainerName, accountName) + klog.V(2).Infof("created container %s on storage account %s successfully", validContainerName, accountName) + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.CreatedBlobContainer, csicommon.CSIEventSourceStr, + fmt.Sprintf("Controller CreateVolume: Created blob container %s in %q storage account", validContainerName, accountName)) if useDataPlaneAPI { d.dataPlaneAPIVolCache.Set(volumeID, "") @@ -519,12 +526,16 @@ func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) resourceGroupName = d.cloud.ResourceGroup } klog.V(2).Infof("deleting container(%s) rg(%s) account(%s) volumeID(%s)", containerName, resourceGroupName, accountName, volumeID) + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.DeletingBlobContainer, csicommon.CSIEventSourceStr, + fmt.Sprintf("Controller DeleteVolume: Deleting container %s from %q storage account", containerName, accountName)) if err := d.DeleteBlobContainer(ctx, subsID, resourceGroupName, accountName, containerName, secrets); err != nil { return nil, status.Errorf(codes.Internal, "failed to delete container(%s) under rg(%s) account(%s) volumeID(%s), error: %v", containerName, resourceGroupName, accountName, volumeID, err) } isOperationSucceeded = true klog.V(2).Infof("container(%s) under rg(%s) account(%s) volumeID(%s) is deleted successfully", containerName, resourceGroupName, accountName, volumeID) + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.DeletedBlobContainer, csicommon.CSIEventSourceStr, + fmt.Sprintf("Controller DeleteVolume: Deleted container %s from %q storage account", containerName, accountName)) return &csi.DeleteVolumeResponse{}, nil } diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 668ea5681..ddd8ce441 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -48,6 +48,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" mount_azure_blob "sigs.k8s.io/blob-csi-driver/pkg/blobfuse-proxy/pb" + csicommon "sigs.k8s.io/blob-csi-driver/pkg/csi-common" ) const ( @@ -146,6 +147,8 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu } klog.V(2).Infof("NodePublishVolume: volume %s mounting %s at %s with mountOptions: %v", volumeID, source, target, mountOptions) + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodePublishingVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodePublishVolume: Mounting volume %s", volumeID)) if d.enableBlobMockMount { klog.Warningf("NodePublishVolume: mock mount on volumeID(%s), this is only for TESTING!!!", volumeID) if err := blobcsiutil.MakeDir(target, os.FileMode(mountPermissions)); err != nil { @@ -162,7 +165,8 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err) } klog.V(2).Infof("NodePublishVolume: volume %s mount %s at %s successfully", volumeID, source, target) - + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodePublishedVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodePublishVolume: Mounted volume %s", volumeID)) return &csi.NodePublishVolumeResponse{}, nil } @@ -226,11 +230,15 @@ func (d *Driver) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublish } klog.V(2).Infof("NodeUnpublishVolume: unmounting volume %s on %s", volumeID, targetPath) + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodeUnPublishingVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodeUnpublishVolume: Unmounting volume %s", volumeID)) err := mount.CleanupMountPoint(targetPath, d.mounter, true /*extensiveMountPointCheck*/) if err != nil { return nil, status.Errorf(codes.Internal, "failed to unmount target %q: %v", targetPath, err) } klog.V(2).Infof("NodeUnpublishVolume: unmount volume %s on %s successfully", volumeID, targetPath) + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodeUnPublishedVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodeUnpublishVolume: Unmounted volume %s", volumeID)) return &csi.NodeUnpublishVolumeResponse{}, nil } @@ -352,8 +360,8 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe return nil, err } - klog.V(2).Infof("edgecache will be used for volume %s", volumeID) - klog.V(3).Infof("edgecache attrib %v", attrib) + klog.V(2).Infof("NodeStageVolume: edgecache will be used for volume %s", volumeID) + klog.V(3).Infof("NodeStageVolume: edgecache attrib %v", attrib) pvName, exists := attrib[pvNameKey] var pv *v1.PersistentVolume var err error @@ -373,12 +381,19 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe if err == cv.ErrVolumeAlreadyBeingProvisioned { klog.V(2).Infof("NodeStageVolume: volume has already been provisioned") } else if err != nil { + csicommon.SendKubeEvent(v1.EventTypeWarning, csicommon.FailedToProvisionVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodeStageVolume: failed to provision volume. error: %v", err)) return nil, err } - + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodeStagingVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodeStageVolume: Mounting volume %s", volumeID)) + klog.V(2).Infof("NodeStageVolume: Mounting volume(%s) on %s", volumeID, targetPath) if err = d.edgeCacheManager.MountVolume(accountName, containerName, storageEndpointSuffix, targetPath); err != nil { return nil, err } + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodeStagedVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodeStageVolume: Mounted volume %s", volumeID)) + klog.V(2).Infof("NodeStageVolume: Mounted volume(%s) on %s", volumeID, targetPath) return &csi.NodeStageVolumeResponse{}, nil } @@ -531,6 +546,8 @@ func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolu } if !isNotEdgeCacheVolume { // This is an edgecache mount path so unmount it and clean it up + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodeUnStagingVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodeUnstageVolume: Unmounting volume %s", volumeID)) if err := d.edgeCacheManager.UnmountVolume(volumeID, edgeCacheTargetPath); err != nil { return nil, err } @@ -544,8 +561,9 @@ func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolu if err != nil { return nil, status.Errorf(codes.Internal, "failed to unmount staging target %q: %v", stagingTargetPath, err) } - klog.V(2).Infof("NodeUnstageVolume: volume %s unmount on %s successfully", volumeID, stagingTargetPath) - + csicommon.SendKubeEvent(v1.EventTypeNormal, csicommon.NodeUnStagedVolume, csicommon.CSIEventSourceStr, + fmt.Sprintf("NodeUnstageVolume: Unmounted volume %s", volumeID)) + klog.V(2).Infof("NodeUnstageVolume: Unmounted volume(%s) TargetPath(%s)", volumeID, stagingTargetPath) isOperationSucceeded = true return &csi.NodeUnstageVolumeResponse{}, nil } diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index d579f0b55..9f572f968 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -28,10 +28,10 @@ RUN chmod +x /blobfuse-proxy/init.sh && \ chmod +x /blobfuse-proxy/blobfuse-proxy.service && \ chmod +x /blobfuse-proxy/blobfuse-proxy -# Currently no CBL-Mariner image with fix for "curl" CVE-2023-38545/CVE-2023-38546. -# So, temporarily do update here. Remove "curl" when image is updated. +# Currently no CBL-Mariner image with fix for "curl"/"zlib" CVE-2023-38545/CVE-2023-38546/CVE-2023-45853. +# So, temporarily do update here. Remove "curl"/"zlib" when image is updated. RUN tdnf updateinfo && \ - tdnf install -y util-linux e2fsprogs nfs-utils quota-rpc rpcbind blobfuse2 fuse3 libcap-ng libcap ca-certificates curl && \ + tdnf install -y util-linux e2fsprogs nfs-utils quota-rpc rpcbind blobfuse2 fuse3 libcap-ng libcap ca-certificates curl zlib && \ tdnf clean all LABEL maintainers="andyzhangx" diff --git a/pkg/csi-common/utils.go b/pkg/csi-common/utils.go index b85531cfe..3b7a437a8 100644 --- a/pkg/csi-common/utils.go +++ b/pkg/csi-common/utils.go @@ -18,15 +18,66 @@ package csicommon import ( "fmt" + "os" + "path/filepath" "strings" + "sync" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" "golang.org/x/net/context" "google.golang.org/grpc" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + typedv1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/homedir" "k8s.io/klog/v2" ) +const ( + NameSpaceEnvVar = "KUBERNETES_NAMESPACE" + PodNameEnvVar = "POD_NAME" + CSIEventSourceStr = "blob-csi-driver" +) + +const ( + // Driver "Normal" event Reason list + NodeStagingVolume = "NodeStagingVolume" + NodeStagedVolume = "NodeStagedVolume" + NodeUnStagingVolume = "NodeUnStagingVolume" + NodeUnStagedVolume = "NodeUnStagedVolume" + NodePublishingVolume = "NodePublishingVolume" + NodePublishedVolume = "NodePublishedVolume" + NodeUnPublishingVolume = "NodeUnPublishingVolume" + NodeUnPublishedVolume = "NodeUnPublishedVolume" + CreatingBlobContainer = "CreatingBlobContainer" + CreatedBlobContainer = "CreatedBlobContainer" + DeletingBlobContainer = "DeletingBlobContainer" + DeletedBlobContainer = "DeletedBlobContainer" +) + +const ( + // Driver "Warning" event Reason list + FailedToInitializeDriver = "Failed" + FailedToProvisionVolume = "Failed" + FailedAuthentication = "FailedAuthentication" + InvalidAuthentication = "InvalidAuthentication" +) + +// Event correlation is done on the client side: need to use a global variable for the +// event broadcaster(eventBroadcaster) +// https://pkg.go.dev/k8s.io/client-go/tools/record#NewEventCorrelator +// https://pkg.go.dev/k8s.io/client-go/tools/record#EventCorrelator +var ( + eventBroadcaster record.EventBroadcaster = nil // revive:disable:var-declaration + eventBroadcasterInitLock sync.Mutex +) + func ParseEndpoint(ep string) (string, string, error) { if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") { s := strings.SplitN(ep, "://", 2) @@ -83,3 +134,86 @@ func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, h } return resp, err } + +// Gets a Kubernetes client set. +func GetKubeClient(inCluster bool) (*kubernetes.Clientset, error) { + config, err := GetKubeConfig(inCluster) + if err != nil { + return nil, err + } + return kubernetes.NewForConfig(config) +} + +// Gets a Kubernetes config. +func GetKubeConfig(inCluster bool) (*rest.Config, error) { + if inCluster { + return getInClusterKubeConfig() + } + + return getLocalKubeConfig() +} + +// Gets the Kubernetes config for local use. +func getLocalKubeConfig() (*rest.Config, error) { + kubeconfig := filepath.Join(homedir.HomeDir(), ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, err + } + return config, nil +} + +// Gets the Kubernetes client for in-cluster use. +func getInClusterKubeConfig() (*rest.Config, error) { + var config *rest.Config + var err error + if config, err = rest.InClusterConfig(); err != nil { + return nil, err + } + return config, nil +} + +func SendKubeEvent(eventType string, reasonCode string, eventSource string, messageStr string) { + var err error + client, err := GetKubeClient(true) + if err != nil { + klog.Errorf(err.Error()) + return + } + + nameSpace := os.Getenv(NameSpaceEnvVar) + if nameSpace == "" { + klog.Errorf("%s environment variable not set", NameSpaceEnvVar) + return + } + + podName := os.Getenv(PodNameEnvVar) + if podName == "" { + klog.Errorf("%s environment variable not set", PodNameEnvVar) + return + } + + scheme := runtime.NewScheme() + if err := v1.AddToScheme(scheme); err != nil { + klog.Errorf(err.Error()) + return + } + if eventBroadcaster == nil { + eventBroadcasterInitLock.Lock() + if eventBroadcaster == nil { // In case eventBroadcaster was just set + eventBroadcaster = record.NewBroadcaster() // https://pkg.go.dev/k8s.io/client-go/tools/record#EventBroadcaster + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedv1core.EventSinkImpl{Interface: client.CoreV1().Events(nameSpace)}) + } + eventBroadcasterInitLock.Unlock() + } + + pod, err := client.CoreV1().Pods(nameSpace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + klog.Errorf(err.Error()) + return + } + + eventRecorder := eventBroadcaster.NewRecorder(scheme, v1.EventSource{Component: eventSource}) + eventRecorder.Event(pod, eventType, reasonCode, messageStr) +} diff --git a/pkg/edgecache/cachevolume/pvc_annotator.go b/pkg/edgecache/cachevolume/pvc_annotator.go index a5d8be6a2..0003e0ffd 100644 --- a/pkg/edgecache/cachevolume/pvc_annotator.go +++ b/pkg/edgecache/cachevolume/pvc_annotator.go @@ -24,6 +24,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/utils/strings/slices" + csicommon "sigs.k8s.io/blob-csi-driver/pkg/csi-common" blobcsiutil "sigs.k8s.io/blob-csi-driver/pkg/util" "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" @@ -98,6 +99,7 @@ func (c *PVCAnnotator) buildAnnotations(pv *v1.PersistentVolume, cfg config.Azur if providedAuth.authType == "WorkloadIdentity" && !cfg.UseFederatedWorkloadIdentityExtension { err := fmt.Errorf("workload identity was requested by the csi driver didn't initialize with the workload identity env vars") klog.Error(err) + csicommon.SendKubeEvent(v1.EventTypeWarning, csicommon.FailedAuthentication, csicommon.CSIEventSourceStr, err.Error()) return nil, err } else if providedAuth.authType == "AccountKey" { @@ -113,6 +115,7 @@ func (c *PVCAnnotator) buildAnnotations(pv *v1.PersistentVolume, cfg config.Azur if !secretNameOk || !secretNamespaceOk { // if keyName doesn't exist in the PV annotations err := fmt.Errorf("failed to discover storage account key secret; name: '%s' ns: '%s'", secretName, secretNamespace) klog.Error(err) + csicommon.SendKubeEvent(v1.EventTypeWarning, csicommon.FailedAuthentication, csicommon.CSIEventSourceStr, err.Error()) return nil, err } } @@ -152,6 +155,7 @@ func (c *PVCAnnotator) SendProvisionVolume(pv *v1.PersistentVolume, cloudConfig if valid := c.requestAuthIsValid(providedAuth.authType); !valid { err := fmt.Errorf("requested storage auth %s is not a member of valid auths %+v", providedAuth.authType, validStorageAuthentications) klog.Error(err) + csicommon.SendKubeEvent(v1.EventTypeWarning, csicommon.InvalidAuthentication, csicommon.CSIEventSourceStr, err.Error()) return err } @@ -170,6 +174,6 @@ func (c *PVCAnnotator) SendProvisionVolume(pv *v1.PersistentVolume, cloudConfig if err != nil { return err } - + klog.V(2).Infof("Successfully verified authentication for PV '%s' and added annotations.", pv.Name) return nil }