diff --git a/.autocorrectrc b/.autocorrectrc index 1913051e3c..8ae3d2536f 100644 --- a/.autocorrectrc +++ b/.autocorrectrc @@ -20,5 +20,6 @@ spellcheck: - JucieFS = JuiceFS - JueicFS = JuiceFS - JuiecFS = JuiceFS + - JuiceSF = JuiceFS - Kuberenetes = Kubernetes - mountpoint = mount point diff --git a/.github/scripts/e2e-test.py b/.github/scripts/e2e-test.py index 3b1ffd6799..cebaa3f6f4 100644 --- a/.github/scripts/e2e-test.py +++ b/.github/scripts/e2e-test.py @@ -89,11 +89,11 @@ test_dynamic_expand() test_multi_pvc() test_mountpod_recreated() + test_config() + test_recreate_mountpod_reload_config() test_secret_has_owner_reference() if without_kubelet: test_pod_resource_err() - test_config() - test_recreate_mountpod_reload_config() elif test_mode == "pod-mount-share": if not IS_CE: @@ -144,10 +144,10 @@ test_quota_using_storage_rw() test_dynamic_expand() test_multi_pvc() - if without_kubelet: - test_pod_resource_err() test_config() test_recreate_mountpod_reload_config() + if without_kubelet: + test_pod_resource_err() elif test_mode == "webhook": test_deployment_use_pv_rw() diff --git a/.github/scripts/k8s-deps.sh b/.github/scripts/k8s-deps.sh index 9f20fb3b3d..746972f5a9 100755 --- a/.github/scripts/k8s-deps.sh +++ b/.github/scripts/k8s-deps.sh @@ -14,7 +14,7 @@ function die() { function install_deps() { sudo apt-get update && sudo apt-get install -y snapd curl netcat-openbsd bc dnsutils redis-tools librados2 python3 - sudo apt install -y python3-kubernetes + sudo pip install kubernetes==18.20.0 curl -fsSL -o /tmp/kustomize.tar.gz "$KUSTOMIZE_URL" \ && tar -xf /tmp/kustomize.tar.gz -C /usr/local/bin \ && chmod a+x /usr/local/bin/kustomize \ diff --git a/.github/scripts/test_case.py b/.github/scripts/test_case.py index 54ea4da05b..a7db83f8bd 100644 --- a/.github/scripts/test_case.py +++ b/.github/scripts/test_case.py @@ -24,7 +24,7 @@ from model import PVC, PV, Pod, StorageClass, Deployment, Job, Secret from util import check_mount_point, wait_dir_empty, wait_dir_not_empty, \ get_only_mount_pod_name, get_mount_pods, check_pod_ready, check_mount_pod_refs, gen_random_string, get_vol_uuid, \ - get_voldel_job, check_quota, is_quota_supported, update_config, wait_get_only_mount_pod_name + get_voldel_job, check_quota, is_quota_supported, update_config def test_deployment_using_storage_rw(): @@ -691,15 +691,13 @@ def test_dynamic_delete_pod(): is_ready = False for i in range(0, 60): try: - new_mount_pod = Pod(name=get_only_mount_pod_name(unique_id), deployment_name="", replicas=1, namespace=KUBE_SYSTEM) - is_ready = new_mount_pod.is_ready() + is_ready = mount_pod.is_ready() if is_ready: break time.sleep(5) except Exception as e: LOG.info(e) - time.sleep(5) - continue + raise e if not is_ready: raise Exception("Mount pod {} didn't recovery within 5 min.".format(mount_pod.name)) @@ -771,15 +769,13 @@ def test_static_delete_pod(): is_ready = False for i in range(0, 60): try: - new_mount_pod = Pod(name=get_only_mount_pod_name(volume_id), deployment_name="", replicas=1, namespace=KUBE_SYSTEM) - is_ready = new_mount_pod.is_ready() + is_ready = mount_pod.is_ready() if is_ready: break time.sleep(5) except Exception as e: LOG.info(e) - time.sleep(5) - continue + raise e if not is_ready: raise Exception("Mount pod {} didn't recovery within 5 min.".format(mount_pod.name)) @@ -2602,20 +2598,10 @@ def test_mountpod_recreated(): # wait for mountpod recreated LOG.info("Wait for mountpod recreated..") - is_ready = False for i in range(0, 60): - try: - new_mount_pod = Pod(name=get_only_mount_pod_name(volume_id), deployment_name="", replicas=1, namespace=KUBE_SYSTEM) - is_ready = new_mount_pod.is_ready() - if is_ready: - break - time.sleep(5) - except Exception as e: - LOG.info(e) - time.sleep(5) - continue - if not is_ready: - raise Exception("Mount pod {} didn't recovery within 5 min.".format(mount_pod.name)) + if mount_pod.watch_for_success(): + break + time.sleep(5) # check mount point LOG.info("Check mount point..") @@ -2699,7 +2685,6 @@ def test_config(): ["kubectl", "annotate", "pods", "--overwrite", "-n", KUBE_SYSTEM, "-l", "app=juicefs-csi-node", "updatedAt=" + str(int(time.time()))]) - time.sleep(2) # deploy pvc pvc1 = PVC(name="pvc-config-without-labels", access_mode="ReadWriteMany", storage_name=STORAGECLASS_NAME, pv="") LOG.info("Deploy pvc {}".format(pvc1.name)) @@ -2878,8 +2863,6 @@ def test_recreate_mountpod_reload_config(): subprocess.check_call( ["kubectl", "annotate", "pods", "--overwrite", "-n", KUBE_SYSTEM, "-l", "app=juicefs-csi-node", "updatedAt=" + str(int(time.time()))]) - # sleep 2s to wait config update - time.sleep(2) LOG.info("Start to delete mountpod..") mount_pod = Pod(name=get_only_mount_pod_name(volume_id), deployment_name="", replicas=1, namespace=KUBE_SYSTEM) @@ -2887,19 +2870,11 @@ def test_recreate_mountpod_reload_config(): # wait for mountpod recreated LOG.info("Wait for mountpod recreated..") - - result = pod.watch_for_success() - if not result: - if MOUNT_MODE == "webhook": - pods = client.CoreV1Api().list_namespaced_pod( - namespace="default", - label_selector="deployment={}".format(deployment.name) - ) - for po in pods.items: - pod_name = po.metadata.name - if not check_pod_ready(po): - subprocess.check_call(["kubectl", "get", "po", pod_name, "-o", "yaml", "-n", "default"]) - raise Exception("Pods of deployment {} are not ready within 10 min.".format(deployment.name)) + time.sleep(20) + for i in range(0, 60): + if mount_pod.watch_for_success(): + break + time.sleep(5) # check mount point LOG.info("Check mount point..") @@ -2907,7 +2882,7 @@ def test_recreate_mountpod_reload_config(): if not result: raise Exception("mount Point of /jfs/{}/out.txt are not ready within 5 min.".format(volume_id)) - mount_pod = Pod(name=wait_get_only_mount_pod_name(volume_id), deployment_name="", replicas=1, namespace=KUBE_SYSTEM) + mount_pod = Pod(name=get_only_mount_pod_name(volume_id), deployment_name="", replicas=1, namespace=KUBE_SYSTEM) if mount_pod.get_metadata().labels.get("apply") != "updated_config": raise Exception("mountpod config labels not set") if mount_pod.get_metadata().labels.get("volume_id") != volume_id: diff --git a/.github/scripts/util.py b/.github/scripts/util.py index 1c9862438e..0d06d05504 100644 --- a/.github/scripts/util.py +++ b/.github/scripts/util.py @@ -117,7 +117,7 @@ def check_mount_point(check_path): def check_quota(name, expected): output = "" - for i in range(0, 30): + for i in range(0, 10): process = subprocess.run([ "kubectl", "exec", name, "-c", "app", "-n", "default", "-t", "--", "df", "-h"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) @@ -184,15 +184,6 @@ def get_only_mount_pod_name(volume_id): return running_pods[0].metadata.name -def wait_get_only_mount_pod_name(volume_id, timeout=60): - for i in range(0, timeout): - try: - return get_only_mount_pod_name(volume_id) - except Exception as e: - time.sleep(1) - continue - - def get_mount_pods(volume_id): pods = client.CoreV1Api().list_namespaced_pod( namespace=KUBE_SYSTEM, diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5d1c9c9483..f6438bf899 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -16,6 +16,10 @@ on: branches: [ master ] paths: - '**.go' + - '!docs/**' + - '!**.md' + - '!.autocorrectrc' + - '!.markdownlint-cli2.jsonc' pull_request: # The branches below must be a subset of the branches above branches: [ master ] diff --git a/.github/workflows/dashboard-ci.yaml b/.github/workflows/dashboard-ci.yaml index 5400a1271c..8027a7b0a8 100644 --- a/.github/workflows/dashboard-ci.yaml +++ b/.github/workflows/dashboard-ci.yaml @@ -11,6 +11,10 @@ on: - 'dashboard-ui-v2/**' - 'cmd/dashboard/**' - 'pkg/dashboard/**' + - '!docs/**' + - '!**.md' + - '!.autocorrectrc' + - '!.markdownlint-cli2.jsonc' pull_request: branches: - master @@ -21,6 +25,10 @@ on: - 'dashboard-ui-v2/**' - 'cmd/dashboard/**' - 'pkg/dashboard/**' + - '!docs/**' + - '!**.md' + - '!.autocorrectrc' + - '!.markdownlint-cli2.jsonc' jobs: build: diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index ed0fb19bb4..4e21ccbb83 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -12,6 +12,10 @@ on: - '**.sh' - '**.mod' - '**.sum' + - '!docs/**' + - '!**.md' + - '!.autocorrectrc' + - '!.markdownlint-cli2.jsonc' pull_request: branches: - master @@ -23,6 +27,10 @@ on: - '**.sh' - '**.mod' - '**.sum' + - '!docs/**' + - '!**.md' + - '!.autocorrectrc' + - '!.markdownlint-cli2.jsonc' workflow_dispatch: inputs: debug: @@ -75,7 +83,7 @@ jobs: value=`printf '%s\n' "${testmode[@]}" | jq -R . | jq -cs .` echo "value: $value" echo "matrix=$value" >> $GITHUB_OUTPUT - + wtestmode=("pod" "pod-mount-share" "pod-provisioner") value=`printf '%s\n' "${wtestmode[@]}" | jq -R . | jq -cs .` echo "value without kubelet: $value" diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index f8e1e0d711..8a22dfd1ab 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -15,7 +15,7 @@ }, "line-length": false, "no-duplicate-heading": { - "allow_different_nesting": true + "siblings_only": true }, "no-inline-html": { "allowed_elements": [ diff --git a/cmd/app/mount_manager.go b/cmd/app/mount_manager.go index 579b40064b..7a501da610 100644 --- a/cmd/app/mount_manager.go +++ b/cmd/app/mount_manager.go @@ -31,7 +31,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" mountctrl "github.com/juicedata/juicefs-csi-driver/pkg/controller" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -72,10 +71,10 @@ func NewMountManager( Scheme: scheme, SelectorsByObject: cache.SelectorsByObject{ &corev1.Pod{}: { - Label: labels.SelectorFromSet(labels.Set{common.PodTypeKey: common.PodTypeValue}), + Label: labels.SelectorFromSet(labels.Set{config.PodTypeKey: config.PodTypeValue}), }, &batchv1.Job{}: { - Label: labels.SelectorFromSet(labels.Set{common.PodTypeKey: common.JobTypeValue}), + Label: labels.SelectorFromSet(labels.Set{config.PodTypeKey: config.JobTypeValue}), }, }, }), diff --git a/cmd/app/pod_manager.go b/cmd/app/pod_manager.go index 381d408c24..0ef4bb8738 100644 --- a/cmd/app/pod_manager.go +++ b/cmd/app/pod_manager.go @@ -26,7 +26,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" - "github.com/juicedata/juicefs-csi-driver/pkg/common" + "github.com/juicedata/juicefs-csi-driver/pkg/config" mountctrl "github.com/juicedata/juicefs-csi-driver/pkg/controller" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) @@ -55,7 +55,7 @@ func NewPodManager() (*PodManager, error) { Scheme: scheme, SelectorsByObject: cache.SelectorsByObject{ &corev1.Pod{}: { - Label: labels.SelectorFromSet(labels.Set{common.PodTypeKey: common.PodTypeValue}), + Label: labels.SelectorFromSet(labels.Set{config.PodTypeKey: config.PodTypeValue}), }, }, }), diff --git a/cmd/app/webhook.go b/cmd/app/webhook.go index d033fac025..d61ac2e46b 100644 --- a/cmd/app/webhook.go +++ b/cmd/app/webhook.go @@ -26,7 +26,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" mountctrl "github.com/juicedata/juicefs-csi-driver/pkg/controller" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -61,7 +60,7 @@ func NewWebhookManager(certDir string, webhookPort int, leaderElection bool, Scheme: scheme, SelectorsByObject: cache.SelectorsByObject{ &corev1.Pod{}: { - Label: labels.SelectorFromSet(labels.Set{common.InjectSidecarDone: common.True}), + Label: labels.SelectorFromSet(labels.Set{config.InjectSidecarDone: config.True}), }, }, }), diff --git a/cmd/main.go b/cmd/main.go index cd0da2cb8d..a424d267c2 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -102,8 +102,6 @@ func main() { klog.InitFlags(goFlag) cmd.PersistentFlags().AddGoFlagSet(goFlag) - cmd.AddCommand(upgradeCmd) - if err := cmd.Execute(); err != nil { os.Exit(1) } diff --git a/cmd/node.go b/cmd/node.go index 94bb3ed5f3..996395ef76 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -31,8 +31,7 @@ import ( "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/controller" "github.com/juicedata/juicefs-csi-driver/pkg/driver" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/grace" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" + "github.com/juicedata/juicefs-csi-driver/pkg/fuse" k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" ) @@ -126,16 +125,11 @@ func parseNodeConfig() { os.Exit(1) } config.CSIPod = *pod - err = passfd.InitGlobalFds(context.TODO(), k8sclient, "/tmp") + err = fuse.InitGlobalFds(context.TODO(), "/tmp") if err != nil { log.Error(err, "Init global fds error") os.Exit(1) } - err = grace.ServeGfShutdown(config.ShutdownSockPath) - if err != nil { - log.Error(err, "Serve graceful shutdown error") - os.Exit(1) - } } func nodeRun(ctx context.Context) { diff --git a/cmd/upgrade.go b/cmd/upgrade.go deleted file mode 100644 index 1ba9566745..0000000000 --- a/cmd/upgrade.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - Copyright 2023 Juicedata Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package main - -import ( - "os" - - "github.com/spf13/cobra" - - "github.com/juicedata/juicefs-csi-driver/pkg/config" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/grace" -) - -var ( - restart = false -) - -var upgradeCmd = &cobra.Command{ - Use: "upgrade", - Short: "upgrade mount pod smoothly", - Run: func(cmd *cobra.Command, args []string) { - if len(args) < 1 { - log.Info("please specify the name of the mount pod which you want to upgrade", "node", config.NodeName) - os.Exit(1) - } - name := args[0] - if err := grace.TriggerShutdown(config.ShutdownSockPath, name, restart); err != nil { - log.Error(err, "failed to upgrade mount pod") - os.Exit(1) - } - }, -} - -func init() { - upgradeCmd.Flags().BoolVar(&restart, "restart", false, "smoothly upgrade the mount pod with restart") -} diff --git a/dashboard-ui-v2/src/components/config-modal.tsx b/dashboard-ui-v2/src/components/config-modal.tsx index 8f81e026b5..71ba24a1eb 100644 --- a/dashboard-ui-v2/src/components/config-modal.tsx +++ b/dashboard-ui-v2/src/components/config-modal.tsx @@ -18,7 +18,7 @@ import { memo, ReactNode, useEffect, useState } from 'react' import Editor from '@monaco-editor/react' import { Button, Modal, Space } from 'antd' import { FormattedMessage } from 'react-intl' -import YAML, { YAMLParseError } from 'yaml' +import YAML from 'yaml' import { useConfig, useUpdateConfig } from '@/hooks/cm-api' @@ -44,11 +44,12 @@ const ConfigModal: React.FC<{ const [config, setConfig] = useState('') useEffect(() => { if (data?.data) { - try { - setConfig(YAML.stringify(YAML.parse(data?.data?.['config.yaml']))) - } catch (e) { - setConfig((e as YAMLParseError).message) - } + setConfig( + YAML.stringify(data?.data?.['config.yaml']) + .split('\n') + .slice(1) + .join('\n'), + ) } }, [data]) @@ -77,13 +78,12 @@ const ConfigModal: React.FC<{ onClick={() => { mutate() if (data?.data) { - try { - setConfig( - YAML.stringify(YAML.parse(data?.data?.['config.yaml'])), - ) - } catch (e) { - setConfig((e as YAMLParseError).message) - } + setConfig( + YAML.stringify(data?.data?.['config.yaml']) + .split('\n') + .slice(1) + .join('\n'), + ) setUpdated(false) } }} diff --git a/dashboard-ui-v2/src/components/containers.tsx b/dashboard-ui-v2/src/components/containers.tsx index 4494f5692d..3dfee03bf7 100644 --- a/dashboard-ui-v2/src/components/containers.tsx +++ b/dashboard-ui-v2/src/components/containers.tsx @@ -15,7 +15,7 @@ */ import { ProCard } from '@ant-design/pro-components' -import { Button, Space, Table, Tag, Tooltip,} from 'antd' +import { Button, Space, Table, Tag, Tooltip } from 'antd' import { ContainerStatus } from 'kubernetes-types/core/v1' import { FormattedMessage } from 'react-intl' import { useParams } from 'react-router-dom' @@ -28,13 +28,12 @@ import { AccessLogIcon, DebugIcon, LogIcon, - TerminalIcon, UpgradeIcon, + TerminalIcon, WarmupIcon, } from '@/icons' import { DetailParams } from '@/types' import { Pod } from '@/types/k8s' -import { isMountPod, supportBinarySmoothUpgrade, supportDebug } from '@/utils' -import UpgradeModal from '@/components/upgrade-modal.tsx' +import { isMountPod, supportDebug } from '@/utils' const Containers: React.FC<{ pod: Pod @@ -166,24 +165,6 @@ const Containers: React.FC<{ )} - - {supportBinarySmoothUpgrade(c.image) ? ( - - {({ onClick }) => ( - - - - - - )} - onOk={handleOk} - onCancel={handleCancel} - > - - - ) : null} - - ) -}) - -export default UpgradeModal diff --git a/dashboard-ui-v2/src/hooks/use-api.ts b/dashboard-ui-v2/src/hooks/use-api.ts index fac32f91f6..6692a6e43d 100644 --- a/dashboard-ui-v2/src/hooks/use-api.ts +++ b/dashboard-ui-v2/src/hooks/use-api.ts @@ -57,10 +57,6 @@ export function useSysAppPods(args: SysPagingListArgs) { ) } -export function useMountPodImage(isMountPod: boolean, namespace?: string, name?: string) { - return useSWR(isMountPod ? `/api/v1/pod/${namespace}/${name}/latestimage` : '') -} - export function useAppPod(namespace?: string, name?: string) { return useSWR(`/api/v1/pod/${namespace}/${name}/`) } @@ -81,7 +77,7 @@ export function usePods( namespace?: string, name?: string, source: 'pod' | 'pv' | 'pvc' = 'pod', - type: 'mountpods' | 'apppods' | 'csi-nodes' = 'apppods', + type: 'mountpods' | 'apppods' = 'apppods', ) { return useSWR( source === 'pv' diff --git a/dashboard-ui-v2/src/icons/index.tsx b/dashboard-ui-v2/src/icons/index.tsx index 59fc3c7715..5e6c92fef0 100644 --- a/dashboard-ui-v2/src/icons/index.tsx +++ b/dashboard-ui-v2/src/icons/index.tsx @@ -70,8 +70,7 @@ const LocaleIcon = (props: Partial) => ( ( - + )} {...props} @@ -82,9 +81,7 @@ const TerminalIcon = (props: Partial) => ( ( - - {' '} + {' '} )} {...props} @@ -196,19 +193,6 @@ const WarmupIcon = (props: Partial) => ( /> ) -const UpgradeIcon = (props: Partial) => ( - ( - - - - )} - {...props} - /> -) - export { DSIcon, PODIcon, @@ -224,5 +208,4 @@ export { YamlIcon, DebugIcon, WarmupIcon, - UpgradeIcon, } diff --git a/dashboard-ui-v2/src/pages/app-pod-list.tsx b/dashboard-ui-v2/src/pages/app-pod-list.tsx index 105c2641d9..a80db86692 100644 --- a/dashboard-ui-v2/src/pages/app-pod-list.tsx +++ b/dashboard-ui-v2/src/pages/app-pod-list.tsx @@ -114,7 +114,7 @@ const columns: ProColumns[] = [ color={getPodStatusBadge(podStatus(mountPod) || '')} text={ {mountPod?.metadata?.name} @@ -130,7 +130,7 @@ const columns: ProColumns[] = [ color={getPodStatusBadge(podStatus(mountPod) || '')} text={ {mountPod?.metadata?.name} @@ -173,7 +173,7 @@ const columns: ProColumns[] = [ color={getPodStatusBadge(podStatus(pod.csiNode) || '')} text={ {pod.csiNode.metadata?.name} diff --git a/dashboard-ui-v2/src/pages/pod-detail.tsx b/dashboard-ui-v2/src/pages/pod-detail.tsx index 449e8e57c6..83976e621a 100644 --- a/dashboard-ui-v2/src/pages/pod-detail.tsx +++ b/dashboard-ui-v2/src/pages/pod-detail.tsx @@ -65,13 +65,6 @@ const PodDetail: React.FC<{ namespace={namespace!} name={name!} /> - ) diff --git a/dashboard-ui-v2/src/pages/sys-pod-list.tsx b/dashboard-ui-v2/src/pages/sys-pod-list.tsx index 11ab0d1662..d914477d33 100644 --- a/dashboard-ui-v2/src/pages/sys-pod-list.tsx +++ b/dashboard-ui-v2/src/pages/sys-pod-list.tsx @@ -39,9 +39,7 @@ const columns: ProColumns[] = [ const podFailReason = failedReasonOfMountPod(pod) || '' if (podFailReason === '') { return ( - + {pod.metadata?.name} ) @@ -49,9 +47,7 @@ const columns: ProColumns[] = [ const failReason = return (
- + {pod.metadata?.name} diff --git a/dashboard-ui-v2/src/utils/index.ts b/dashboard-ui-v2/src/utils/index.ts index 0ecd177f3e..db1084a25f 100644 --- a/dashboard-ui-v2/src/utils/index.ts +++ b/dashboard-ui-v2/src/utils/index.ts @@ -559,34 +559,6 @@ export function isEEImage(image: string): boolean { return tag.startsWith('ee') } -export function supportPodSmoothUpgrade(image: string): boolean { - if (image === '') { - return false - } - const version = image.split(':')[1] - if (!version) { - return false - } - if (version.includes('ce')) { - return compareImageVersion(version.replace('ce-v', ''), '1.2.1') >= 0 - } - return compareImageVersion(version.replace('ee-', ''), '5.1.0') >= 0 -} - -export function supportBinarySmoothUpgrade(image: string): boolean { - if (image === '') { - return false - } - const version = image.split(':')[1] - if (!version) { - return false - } - if (version.includes('ce')) { - return compareImageVersion(version.replace('ce-v', ''), '1.2.0') >= 0 - } - return compareImageVersion(version.replace('ee-', ''), '5.0.0') >= 0 -} - // image is the image name with tag, e.g. 'juicedata/mount:ce-v1.1.0' export function supportDebug(image: string): boolean { const version = image.split(':')[1] @@ -596,7 +568,7 @@ export function supportDebug(image: string): boolean { if (version.includes('ce')) { return compareImageVersion(version.replace('ce-v', ''), '1.2.0') >= 0 } - return compareImageVersion(version.replace('ee-', ''), '5.0.23') >= 0 + return compareImageVersion(version.replace('ee-v', ''), '5.0.23') >= 0 } // compareImageVersion compares two image versions and returns: diff --git a/deploy/k8s.yaml b/deploy/k8s.yaml index ff13e0636e..802c9e9e95 100644 --- a/deploy/k8s.yaml +++ b/deploy/k8s.yaml @@ -41,6 +41,7 @@ rules: - apiGroups: - "" resources: + - pods - persistentvolumes - persistentvolumeclaims - persistentvolumeclaims/status @@ -49,15 +50,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - update - apiGroups: - storage.k8s.io resources: @@ -202,19 +194,6 @@ rules: verbs: - get - list -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create -- apiGroups: - - "" - resources: - - events - verbs: - - create - - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/deploy/k8s_before_v1_18.yaml b/deploy/k8s_before_v1_18.yaml index 6a7c5bb9fb..4cfced3502 100644 --- a/deploy/k8s_before_v1_18.yaml +++ b/deploy/k8s_before_v1_18.yaml @@ -41,6 +41,7 @@ rules: - apiGroups: - "" resources: + - pods - persistentvolumes - persistentvolumeclaims - persistentvolumeclaims/status @@ -49,15 +50,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - update - apiGroups: - storage.k8s.io resources: @@ -202,19 +194,6 @@ rules: verbs: - get - list -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create -- apiGroups: - - "" - resources: - - events - verbs: - - create - - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/deploy/kubernetes/base/resources.yaml b/deploy/kubernetes/base/resources.yaml index 4758869bc5..5e5f1b4691 100644 --- a/deploy/kubernetes/base/resources.yaml +++ b/deploy/kubernetes/base/resources.yaml @@ -185,19 +185,6 @@ rules: verbs: - get - list - - apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - apiGroups: - - "" - resources: - - events - verbs: - - create - - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -226,6 +213,7 @@ rules: - apiGroups: - "" resources: + - pods - persistentvolumes - persistentvolumeclaims - persistentvolumeclaims/status @@ -234,15 +222,6 @@ rules: - get - list - watch - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - update - apiGroups: - storage.k8s.io resources: diff --git a/deploy/webhook-with-certmanager.yaml b/deploy/webhook-with-certmanager.yaml index b6db1d72ac..b7a54ae6fc 100644 --- a/deploy/webhook-with-certmanager.yaml +++ b/deploy/webhook-with-certmanager.yaml @@ -31,6 +31,7 @@ rules: - apiGroups: - "" resources: + - pods - persistentvolumes - persistentvolumeclaims - persistentvolumeclaims/status @@ -39,15 +40,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - update - apiGroups: - storage.k8s.io resources: diff --git a/deploy/webhook.yaml b/deploy/webhook.yaml index 1039658aff..18b42ab086 100644 --- a/deploy/webhook.yaml +++ b/deploy/webhook.yaml @@ -31,6 +31,7 @@ rules: - apiGroups: - "" resources: + - pods - persistentvolumes - persistentvolumeclaims - persistentvolumeclaims/status @@ -39,15 +40,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - update - apiGroups: - storage.k8s.io resources: diff --git a/docker/Makefile b/docker/Makefile index a9bba2f1b3..82f6e456e9 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -25,9 +25,6 @@ JUICEFS_CE_LATEST_VERSION=$(shell curl -fsSL https://api.github.com/repos/juiced JUICEFS_EE_LATEST_VERSION=$(shell curl -sSL https://juicefs.com/static/juicefs -o juicefs-ee && chmod +x juicefs-ee && ./juicefs-ee version | cut -d' ' -f3) JFS_CHAN=${JFSCHAN} JFS_PKG_URL=${JFSPKGURL} -ifeq ($(strip $(JFS_PKG_URL)),) - JFS_PKG_URL := https://static.juicefs.com/release/bin_pkgs/latest_stable_full.tar.gz -endif JUICEFS_REPO_URL?=https://github.com/juicedata/juicefs JUICEFS_REPO_REF?=$(JUICEFS_CE_LATEST_VERSION) MOUNT_TAG=${MOUNTTAG} diff --git a/docs/en/administration/going-production.md b/docs/en/administration/going-production.md index a8ec6623ec..a95dfd80fb 100644 --- a/docs/en/administration/going-production.md +++ b/docs/en/administration/going-production.md @@ -384,7 +384,7 @@ Under the premise of fully understanding the risks of `--writeback`, if your sce * Configure cache persistence to ensure that the cache directory will not be lost when the container is destroyed. For specific configuration methods, read [Cache settings](../guide/cache.md#cache-settings); * Choose one of the following methods (you can also adopt both) to ensure that the JuiceFS client has enough time to complete the data upload when the application container exits: * Enable [Delayed mount pod deletion](../guide/resource-optimization.md#delayed-mount-pod-deletion). Even if the application pod exits, the mount pod will wait for the specified time before being destroyed by the CSI Node. Set a reasonable delay to ensure that data is uploaded in a timely manner; - * Since v0.24, the CSI Driver supports [customizing](../guide/configurations.md#customize-mount-pod) all aspects of the Mount Pod, so you can modify `terminationGracePeriodSeconds`. By using [`preStop`](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks), you can ensure that the Mount Pod waits for data uploads to finish before exiting, as demonstrated below: + * Since v0.24, the CSI Driver supports [customizing](../guide/configurations.md#customize-mount-pod) all aspects of the mount pod, so you can modify `terminationGracePeriodSeconds`, and then use [`preStop`](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks) to wait for the data upload to complete before the mount pod exits. The demonstration is as follows: :::warning * After `preStop` is configured, if the write cache is not uploaded successfully, the mount pod will wait for the time set by the `terminationGracePeriodSeconds` parameter and cannot exit for a long time. This will affect the normal execution of certain operations (such as upgrading mount pod). Please fully test and understand the corresponding risks; diff --git a/docs/en/administration/troubleshooting-cases.md b/docs/en/administration/troubleshooting-cases.md index 09c85b1983..f904b7d5d1 100644 --- a/docs/en/administration/troubleshooting-cases.md +++ b/docs/en/administration/troubleshooting-cases.md @@ -64,7 +64,7 @@ Preempted in order to admit critical pod Default resource requests for mount pod is 1 CPU, 1GiB memory, mount pod will refuse to start or preempt application when allocatable resources is low, consider [adjusting resources for mount pod](../guide/resource-optimization.md#mount-pod-resources), or upgrade the worker node to work with more resources. -Insufficient cluster IPs may also cause the Mount Pod to remain in a `Pending` state. By default, the Mount Pod starts with `hostNetwork: false`, which may consume a large amount of cluster IP resources. If the cluster IP resources are insufficient, this may prevent the Mount Pod from starting successfully. Please contact your cloud service provider to increase the number of IPs for your Kubernetes cluster, or start with `hostNetwork: true`. For details, see [Customize Mount Pod and sidecar container](../guide/configurations.md#customize-mount-pod). +Insufficient cluster IPs may also cause the Mount Pod to remain in a `Pending` state. The Mount Pod started by default with `hostNetwork: false`, which may consume a large amount of cluster IP resources. If the cluster IP resources are insufficient, it may result in the failure of the mount pod to start. Please contact the cloud service provider to expand the number of IPs in the Kubernetes cluster, or start with `hostNetwork: true`. Refer to: [Customize mount pod and sidecar container](../guide/configurations.md#customize-mount-pod). diff --git a/docs/en/administration/upgrade-csi-driver.md b/docs/en/administration/upgrade-csi-driver.md index a91dd5cdd0..4224198f98 100644 --- a/docs/en/administration/upgrade-csi-driver.md +++ b/docs/en/administration/upgrade-csi-driver.md @@ -81,6 +81,55 @@ kubectl apply -f ./k8s.yaml Dealing with exceptions like this, alongside with comparing and merging YAML files can be wearisome, that's why [install via Helm](../getting_started.md#helm) is much more recommended on a production environment. +### Migrate to Helm installation {#migrate-to-helm} + +Helm installation requires filling in `values.yaml` first - all changes you have made to `k8s.yaml`, as long as they are within the scope of normal use, can find the corresponding configuration fields in `values.yaml`. What you need to do just sort out the current configuration and fill them in `values.yaml`. Of course, if you have not customized `k8s.yaml` (nor directly modified the configuration of the production environment), then the migration will be very simple. Just skip the grooming step and just follow the instructions below to uninstall and reinstall. + +#### Sort out the configuration and fill in `values.yaml` {#sort-out-the-configuration-and-fill-in-values-yaml} + +Before you start, you need to determine the CSI Driver version you are currently using. You can directly use the method at the beginning of this document to determine. The following takes the upgrade from v0.18.0 to v0.21.0 as an example to explain how to sort out the configuration line by line and fill in `values.yaml`. + +1. Use a browser to access GitHub and open the diffs of the two versions. This process requires manually entering the link, pay attention to the version number at the end of the link, for example [https://github.com/juicedata/juicefs-csi-driver/compare/v0.18.0..v0.21.0](https://github.com/juicedata/juicefs-csi-driver/compare/v0.18.0..v0.21.0), found `k8s.yaml` in the file list. All `k8s.yaml` changes introduced by the version update will be displayed on the page. Keep this page, when sorting out the configuration later, if you are not sure which changes are your cluster's customized configuration and which are modifications brought about by the upgrade, you can refer to this page to judge; +1. Find the `k8s.yaml` used in the current online cluster installation, and rename its copy to `k8s-online.yaml`. This document will also use this name to refer to the current online installation file later in this document. It must be noted that the file must accurately reflect the "current online configuration". If your team has temporarily modified the online configuration (such as using `kubectl edit` to temporarily add environment variables and modify the image), you need to confirm these changes and append to `k8s-online.yaml`; +1. Install the new version (the link here takes v0.21.0 as an example) of the CSI Driver [`k8s.yaml`](https://github.com/juicedata/juicefs-csi-driver/blob/94d4f95a5d0f15a7a430ea31257d725306e90ca4/deploy/k8s.yaml) downloaded to the local, and compared with the online configuration, you can directly run `vimdiff k8s.yaml k8s-online.yaml`; +1. Compare the configuration files line by line to determine whether each configuration modification is brought about by the upgrade or customized by your team. Determine whether these customizations need to be retained, and then fill them in `values.yaml`. If you're not sure how to fill it out, you can usually find clues by carefully reading the annotated documentation in `values.yaml`. + +We have the following suggestions for writing `values.yaml`: + +If the default Mount Pod image is [overridden](../guide/custom-image.md#overwrite-mount-pod-image) in `k8s-online.yaml` (you can pass the `JUICEFS_EE_MOUNT_IMAGE` environment variable, or the `juicefs/mount-image` field of StorageClass ), and an older version of the Mount Pod image is specified, we encourage you to discard this configuration, let the cluster upgrade with the CSI Driver, and enable the new version of the Mount Pod image, which is equivalent to upgrading the JuiceFS client along with the CSI Driver upgrade. + +Dynamic provisioning requires [create StorageClass](../guide/pv.md#create-storage-class), while in Helm Values, StorageClass and [volume credentials](../guide/pv.md#volume-credentials) are managed together. In order to avoid leaving sensitive information in `values.yaml`, we generally recommend manually managing the file system authentication information and StorageClass, and then disabling StorageClass in `values.yaml`: + +```yaml title="values.yaml" +storageClasses: +- enabled: false +``` + +#### Uninstall and reinstall {#uninstall-and-reinstall} + +If you use the default container mounting or sidecar mode, uninstalling the CSI Driver will not affect the current service (new PVs cannot be created or mounted during this period). Only [process mount mode](../introduction.md#by-process) will interrupt services due to uninstallation. If you are not using this mode, the migration process has no impact on the running PV and can be performed with confidence. + +If your environment is an offline cluster and you cannot directly pull the image from the external network, you also need to [move the image](./offline.md) in advance. + +Prepare the operation and maintenance commands that need to be run in advance, such as: + +```shell +# Uninstall the CSI Driver +kubectl delete -f k8s-online.yaml + +# Reinstall with Helm. The configuration of different clusters can be managed using different values.yaml files. For example values-dev.yaml, values-prod.yaml. +# The CSI Driver has no special requirements for the installation namespace. You can modify it as needed, such as jfs-system. +helm upgrade --install juicefs-csi-driver . -f values.yaml -n kube-system +``` + +Run these commands, and after reinstallation, immediately observe the startup status of each component of the CSI Driver: + +```shell +kubectl -n kube-system get pods -l app.kubernetes.io/name=juicefs-csi-driver +``` + +Wait for all components to be started, and then simply create an application pod for verification. You can refer to [our demonstration](../guide/pv.md#static-provisioning). + ## Upgrade CSI Driver (mount by process mode) {#mount-by-process-upgrade} [Mount by process](../introduction.md#by-process) means that JuiceFS Client runs inside CSI Node Service Pod, under this mode, upgrading CSI Driver will inevitably interrupt existing mounts, use one of below methods to carry out the upgrade. diff --git a/docs/en/guide/cache.md b/docs/en/guide/cache.md index 0c172864d4..3a226c2bca 100644 --- a/docs/en/guide/cache.md +++ b/docs/en/guide/cache.md @@ -15,6 +15,7 @@ After cache directory is set, it'll be accessible in the mount pod via `hostPath * In CSI Driver, `cache-dir` parameter does not support wildcard character, if you need to use multiple disks as storage devices, specify multiple directories joined by the `:` character. See [JuiceFS Community Edition](https://juicefs.com/docs/community/command_reference/#mount) and [JuiceFS Cloud Service](https://juicefs.com/docs/cloud/reference/commands_reference/#mount). * For scenario that does intensive small writes, we usually recommend users to temporarily enable client write cache, but due to its inherent risks, this is advised against when using CSI Driver, because pod lifecycle is significantly more unstable, and can cause data loss if pod exists unexpectedly. + ::: Cache related settings is configured in [mount options](./configurations.md#mount-options), you can also refer to the straightforward examples below. After PV is created and mounted, you can also [check the mount pod command](../administration/troubleshooting.md#check-mount-pod) to make sure the options contain the newly set cache directory. @@ -223,9 +224,13 @@ spec: restartPolicy: Never ``` -## Cache cleanup {#mount-pod-clean-cache} +## Clean cache when mount pod exits {#mount-pod-clean-cache} + +Local cache can be a precious resource, especially when dealing with large scale data. JuiceFS CSI Driver does not delete cache by default when mount pod exits. If this behavior doesn't suit you, make adjustment so that local cache is cleaned when mount pod exits. -Local cache can be a precious resource, especially when dealing with large scale data. For this reason, JuiceFS CSI Driver does not delete cache by default when the Mount Pod exits. If this behavior does not fit your needs, you can configure it to clear the local cache when the Mount Pod exits. +:::note +This feature requires JuiceFS CSI Driver 0.14.1 and above. +::: ### Static provisioning diff --git a/docs/en/guide/configurations.md b/docs/en/guide/configurations.md index 26f521ae13..2ec70adbf1 100644 --- a/docs/en/guide/configurations.md +++ b/docs/en/guide/configurations.md @@ -9,10 +9,10 @@ This chapter introduces JuiceFS PV configurations, as well as CSI Driver configu Since CSI Driver v0.24, you can define and adjust settings in a ConfigMap called `juicefs-csi-driver-config`. Various settings are supported to customize mount pod & sidecar container, as well as settings for CSI Driver components. CM is updated dynamically: for mount pod customizations you no longer have to re-create PV & PVCs, and for CSI Driver settings there's no need to restart any CSI Driver components on update. -ConfigMap is powerful and flexible. It will replace (or have already replaced) existing configuration methods in older versions of CSI Driver. Sections labeled "deprecated" provide examples of these outdated and less flexible approaches, which are no longer recommended. **If a setting is configurable via ConfigMap, it will take the highest priority within the ConfigMap. It is recommended to always use the ConfigMap method over any practices from legacy versions.** +ConfigMap is powerful and flexible, it will replace (or have already replaced) existing configuration methods that's been around in older versions of CSI Driver, below sections that's titled "deprecated" are all examples of outdated, less flexible methods and should be eschewed. **If something can be configured in ConfigMap, you should always prefer the ConfigMap way, rather than practices available in legacy versions.** :::info Update delay -When ConfigMap is updated, changes do not take effect immediately, because CM mounted in a pod is not updated in real time; instead, it is synced periodically (see [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically)). +When ConfigMap changes, the changes won't take effect immediately, this is because CM mounted in a pod isn't updated in real-time, but synced periodically (see [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically)). If you wish for a force update, try adding a temporary label to CSI components: @@ -20,21 +20,18 @@ If you wish for a force update, try adding a temporary label to CSI components: kubectl -n kube-system annotate pods -l app.kubernetes.io/name=juicefs-csi-driver useless-annotation=true ``` -After ConfigMap is updated across CSI components, subsequent Mount Pods will apply the new configuration, but **existing Mount Pods will not automatically update**. Depending on what was changed, users must re-create the application pod or the Mount Pod for the changes to take effect. Refer to the sections below for more details. ::: -:::info Sidecar headsup -If a customization item appears to be a valid sidecar setting, it will work for the sidecar; otherwise, it will be ignored. For example: +All supported fields are demonstrated in the [example config](https://github.com/juicedata/juicefs-csi-driver/blob/master/juicefs-csi-driver-config.example.yaml), and also introduced in detail in our docs. -* `resources` applies to both the Mount Pod and the sidecar, so it works for both. -* `custom-labels` adds customized labels to the pod. However, since labels are an exclusive pod attribute, this setting does not apply to the sidecar. -::: +### Customize mount pod and sidecar container {#customize-mount-pod} -All supported fields are demonstrated in the [example configuration](https://github.com/juicedata/juicefs-csi-driver/blob/master/juicefs-csi-driver-config.example.yaml) and are explained in detail in our documentation. +Since mount pods are created by CSI Node, and sidecar containers injected by [webhook](#webhook), users cannot directly control their definition. To customize, refer to the following methods. -
+The `mountPodPatch` field from the [ConfigMap](#configmap) controls all mount pod & sidecar container customization, all supported fields are demonstrated below, but before use please notice: -Examples +* **Changes do not take effect immediately**, Kubernetes periodically syncs ConfigMap mounts, see [update delay](#configmap) +* For sidecar mount mode, if a customization field appears to be a valid sidecar setting, it'll work with sidecar. otherwise it'll be ignored. For example, `custom-labels` adds customized labels to pod, since labels are an exclusive pod attribute, this setting is not applicable to sidecar ```yaml title="values-mycluster.yaml" globalConfig: @@ -112,7 +109,7 @@ globalConfig: # Clean cache when mount pod exits juicefs-clean-cache: "true" - # Define an environment variable for the Mount Pod + # Define an environment variable for mount pod - pvcSelector: matchLabels: ... @@ -122,7 +119,7 @@ globalConfig: - name: DEMO_FAREWELL value: "Such a sweet sorrow" - # Mount some volumes to mount pod + # Mount some volumes to mount pod - pvcSelector: matchLabels: ... @@ -134,250 +131,65 @@ globalConfig: persistentVolumeClaim: claimName: block-pv - # Select by StorageClass + # Select by StorageClass - pvcSelector: matchStorageClassName: juicefs-sc terminationGracePeriodSeconds: 60 - # Select by PVC + # Select by PVC - pvcSelector: matchName: pvc-name terminationGracePeriodSeconds: 60 ``` -
- -## Customize Mount Pod and Sidecar {#customize-mount-pod} - -Although all supported configuration items and PVC selectors are listed in the example snippet from the above section, the behavior of each item may vary, so they are introduced in the sections below. Please read carefully before use. - -### Custom mount image {#custom-image} - -#### Via ConfigMap - -The minimum required version is CSI Driver v0.24.0. Upon modification, application pods or Mount Pods need to be re-created for changes to take effect. If you decide to re-create Mount Pods, be sure to enable [automatic mount point recovery](./configurations.md#automatic-mount-point-recovery) in advance, to avoid permanent loss of mount point within the application pod. - -```yaml {2-4} -globalConfig: - mountPodPatch: - - ceMountImage: juicedata/mount:ce-v1.2.0 - eeMountImage: juicedata/mount:ee-5.1.0-053aa0b -``` - -If you need to use a custom image or would like to find the latest available JuiceFS mount image, refer to [customize container image](./custom-image.md). - -### Environment variables {#custom-env} - -#### Via ConfigMap - -The minimum required version is CSI Driver v0.24.5. Upon modification, application pods need to be re-created for changes to take effect. - -```yaml {2-6} - mountPodPatch: - - env: - - name: DEMO_GREETING - value: "Hello from the environment" - - name: DEMO_FAREWELL - value: "Such a sweet sorrow" -``` - -#### Via Secret - -```yaml {11} -apiVersion: v1 -kind: Secret -metadata: - name: juicefs-secret - namespace: default - labels: - # Add this label to enable secret validation - juicefs.com/validate-secret: "true" -type: Opaque -stringData: - envs: '{"BASE_URL": "http://10.0.0.1:8080/static"}' -``` - -### Resource definition {#custom-resources} - -#### Via ConfigMap - -The minimum required version is CSI Driver v0.24.0. Upon modification, application pods or Mount Pods need to be re-created for changes to take effect. If you decide to re-create Mount Pods, be sure to enable [automatic mount point recovery](./configurations.md#automatic-mount-point-recovery) in advance, to avoid permanent loss of mount point within the application pod. - -```yaml {2-5} - mountPodPatch: - - resources: - requests: - cpu: 100m - memory: 512Mi -``` - -Read [resource optimization](./resource-optimization.md) to learn how to properly set resource requests and limits. - -### Mount options {#mount-options} - -Each JuiceFS mount point is created by the `juicefs mount` command, and within the CSI Driver system, `mountOptions` manages all mount options. - -`mountOptions` supports both JuiceFS mount options and FUSE options. Note that although FUSE options are specified with `-o` in the JuiceFS command line, you must omit `-o` inside CSI `mountOptions` and just append each option directly in the YAML list. For example, a mount command like this: - -```shell -juicefs mount ... --cache-size=204800 -o writeback_cache,debug -``` - -It would translate to CSI `mountOptions` as follows: - -```yaml -mountOptions: - # JuiceFS mount options - - cache-size=204800 - # Extra FUSE options - - writeback_cache - - debug -``` +### Inherit from CSI Node (deprecated) {#inherit-from-csi-node} :::tip -Mount options are different between the Community Edition and Cloud Service. See: - -- [Community Edition](https://juicefs.com/docs/community/command_reference#mount) -- [Cloud Service](https://juicefs.com/docs/cloud/reference/commands_reference/#mount) +Starting from v0.24, CSI Driver can customize mount pods and sidecar containers in the [ConfigMap](#configmap), legacy method introduced in this section is not recommended. ::: -#### Via ConfigMap - -The minimum required version is CSI Driver v0.24.7. Upon modification, application pods need to be re-created for changes to take effect. - -Items inside ConfigMap comes with the highest priority, and mount options defined in CM will recursively overwrite those defined in PV. To avoid confusion, please migrate all mount options to ConfigMap and avoid using PV-level `mountOptions`. - -By using `pvcSelector`, you can control mount options for multiple PVCs. - -```yaml - mountPodPatch: - - pvcSelector: - matchLabels: - # Applies to all PVCs with this label - need-update-options: "true" - mountOptions: - - writeback - - cache-size=204800 -``` - -#### Via PV definition (deprecated) {#static-mount-options} - -After modifying the mount options for existing PVs, a rolling upgrade or re-creation of the application pod is required to apply the changes. This ensures CSI Driver re-creates the Mount Pod for the changes to take effect. - -```yaml {8-9} -apiVersion: v1 -kind: PersistentVolume -metadata: - name: juicefs-pv - labels: - juicefs-name: ten-pb-fs -spec: - mountOptions: - - cache-size=204800 - ... -``` - -#### Via StorageClass definition (deprecated) {#dynamic-mount-options} - -You can customize mount options in `StorageClass` definition. If different applications require different mount options, create multiple `StorageClass`, each with its own mount options. - -Since StorageClass serves as a template for creating PVs, **modifying mount options in StorageClass will not affect existing PVs**. If you need to adjust mount options for dynamic provisioning, you have to delete existing PVCs, or [directly modify mount options in existing PVs](#static-mount-options). - -```yaml {6-7} -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: juicefs-sc -provisioner: csi.juicefs.com -mountOptions: - - cache-size=204800 -parameters: - ... -``` - -### Health check & pod lifecycle {#custom-probe-lifecycle} +Mount pod specs are mostly inherited from CSI Node, for example if you need to enable `hostNetwork` for mount pods, you have to instead add the config to CSI Node: -The minimum required version is CSI Driver v0.24.0. Upon modification, application pods or Mount Pods need to be re-created for changes to take effect. If you decide to re-create Mount Pods, ensure that [automatic mount point recovery](./configurations.md#automatic-mount-point-recovery) is enabled to prevent the permanent loss of the mount point within the application pod. - -Targeted scenarios: - -- Use `readinessProbe` to set up health checks for the Mount Pod, supporting monitoring and alerting. -- Customize `preStop` in sidecars to ensure the mount container exits after the application container. Refer to [sidecar recommendations](../administration/going-production.md#sidecar) for details. - -```yaml - - pvcSelector: - matchLabels: - custom-probe: "true" - readinessProbe: - exec: - command: - - stat - - ${MOUNT_POINT}/${SUB_PATH} - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 5 - successThreshold: 1 +```yaml title="values-mycluster.yaml" +node: + hostNetwork: true ``` -### Mount extra volumes {#custom-volumes} +After the change, newly created mount pods will use hostNetwork. -Targeted scenarios: +As mentioned earlier, "most" specs are inherited from CSI-node, this leaves component specific content like labels, annotations, etc. These fields will not work through inheritance so we provide separate methods for customization, read the next section for more. -- Some object storage providers (like Google Cloud Storage) require extra credential files for authentication. This means you will have to create a separate Secret to store these files and reference it in volume credentials (JuiceFS-secret in below examples), so that CSI Driver will mount these files into the mount pod. The relevant environment variable needs to be added to specify the added files for authentication. -- JuiceFS Enterprise Edition supports [shared block storage device](https://juicefs.com/docs/cloud/guide/block-device), which can be used as cache storage or permanent data storage. +### Customize via annotations (deprecated) {#others} -#### Via ConfigMap +:::tip +Starting from v0.24, CSI Driver can customize mount pods and sidecar containers in the [ConfigMap](#configmap), legacy method introduced in this section is not recommended. +::: -The minimum required version is CSI Driver v0.24.7. Upon modification, application pods need to be re-created for changes to take effect. +Some of the fields that doesn't support CSI Node inheritance, are customized using the following fields in the code block, they can be defined both in storageClass parameters (for dynamic provisioning), and also PVC annotations (static provisioning). ```yaml - # Mount some volumes to the Mount Pod - - pvcSelector: - matchLabels: - need-block-device: "true" - volumeDevices: - - name: block-devices - devicePath: /dev/sda1 - volumes: - - name: block-devices - persistentVolumeClaim: - claimName: block-pv - - pvcSelector: - matchLabels: - need-mount-secret: "true" - volumeMounts: - - name: config-1 - mountPath: /root/.config/gcloud - volumes: - - name: gc-secret - secret: - secretName: gc-secret - defaultMode: 420 -``` +juicefs/mount-cpu-limit: "" +juicefs/mount-memory-limit: "" +juicefs/mount-cpu-request: "" +juicefs/mount-memory-request: "" -#### Via secret +juicefs/mount-labels: "" +juicefs/mount-annotations: "" +juicefs/mount-service-account: "" +juicefs/mount-image: "" +juicefs/mount-delete-delay: "" -JuiceFS Secret only supports configuring extra secret mounts within the `configs` field. Shared block device mounts are not supported here. +# Clean cache at mount pod exit +juicefs/clean-cache: "" +juicefs/mount-cache-pvc: "" +juicefs/mount-cache-emptydir: "" +juicefs/mount-cache-inline-volume: "" -```yaml {8-9} -apiVersion: v1 -kind: Secret -metadata: - name: juicefs-secret -type: Opaque -stringData: - ... - # Set Secret name and mount directory in configs. This mounts the whole Secret into the specified directory - configs: "{gc-secret: /root/.config/gcloud}" +# Mount the hosts file or directory to pod +# Container mount path will be the same as host path, this doesn't support customization +juicefs/host-path: "/data/file.txt" ``` -### Other features - -Many features are closely relevant to other topics. For more information: - -* Configure delayed deletion for Mount Pods to reduce startup overhead in short application pod lifecycles. read [delayed deletion](./resource-optimization.md#delayed-mount-pod-deletion). -* Clean cache upon mount pod exit. See [cache cleanup](./resource-optimization.md#clean-cache-when-mount-pod-exits). - ## Format options / auth options {#format-options} Format options / auth options are options used in `juicefs [format|auth]` commands, in which: @@ -411,7 +223,7 @@ stringData: JuiceFS Enterprise Edition: -```yaml {13} +```yaml {11} apiVersion: v1 kind: Secret metadata: @@ -446,6 +258,48 @@ spec: ... ``` +### Dynamic provisioning {#dynamic-mount-options} + +Customize mount options in `StorageClass` definition. If you need to use different mount options for different applications, you'll need to create multiple `StorageClass`, each with different mount options. + +Due to StorageClass being the template used for creating PVs, **modifying mount options in StorageClass will not affect existing PVs**, if you need to adjust mount options for dynamic provisioning, you'll have to delete existing PVCs, or [directly modify mount options in existing PVs](#static-mount-options). + +```yaml {6-7} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: juicefs-sc +provisioner: csi.juicefs.com +mountOptions: + - cache-size=204800 +parameters: + ... +``` + +### Parameter descriptions + +Mount options are different between Community Edition and Cloud Service, see: + +- [Community Edition](https://juicefs.com/docs/community/command_reference#mount) +- [Cloud Service](https://juicefs.com/docs/cloud/reference/commands_reference/#mount) + +`mountOptions` in PV/StorageClass supports both JuiceFS mount options and FUSE options. Keep in mind that although FUSE options is specified using `-o` when using JuiceFS command line, the `-o` is to be omitted inside CSI `mountOptions`, just append each option directly in the YAML list. For a mount command example like below: + +```shell +juicefs mount ... --cache-size=204800 -o writeback_cache,debug +``` + +Translated to CSI `mountOptions`: + +```yaml +mountOptions: + # JuiceFS mount options + - cache-size=204800 + # Extra FUSE options + - writeback_cache + - debug +``` + ## Share directory among applications {#share-directory} If you have existing data in JuiceFS, and would like to mount into container for application use, or plan to use a shared directory for multiple applications, here's what you can do: @@ -501,7 +355,7 @@ There are two ways to mount subdirectory, one is through the `--subdir` mount op If you'd like to share the same file system across different namespaces, use the same set of volume credentials (Secret) in the PV definition: -```yaml {10-12,24-26} +```yaml {9-11,22-24} apiVersion: v1 kind: PersistentVolume metadata: @@ -595,7 +449,7 @@ helm upgrade juicefs-csi-driver juicefs/juicefs-csi-driver -n kube-system -f ./v ### kubectl -Helm is absolutely recommended since kubectl installation means a lot of complex manual edits. Please migrate to Helm installation as soon as possible. +If you use the kubectl installation method, enabling this feature requires manual editing of the CSI Controller, which is complicated. Therefore, it is recommended to [migrate to Helm installation method](../administration/upgrade-csi-driver.md#migrate-to-helm). Manually edit CSI Controller: @@ -676,7 +530,7 @@ kubectl annotate --overwrite node minikube myjfs.juicefs.com/cacheGroup=region-1 And then modify relevant fields in SC: -```yaml {11-13} +```yaml {12-14} apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: diff --git a/docs/en/guide/custom-image.md b/docs/en/guide/custom-image.md index 1610c04a2e..0f58ad3f9e 100644 --- a/docs/en/guide/custom-image.md +++ b/docs/en/guide/custom-image.md @@ -35,6 +35,7 @@ With mount pod image overwritten, note that: * Existing mount pods won't be affected, new images will run only if you rolling upgrade app pods, or delete mount pod. * By default, if you [upgrad CSI Driver](../administration/upgrade-csi-driver.md), it'll use the latest stable mount image included with the release. But if you overwrite the mount image using steps provided in this section, then it'll be a fixated config and no longer related to CSI Driver upgrades + ::: ### configmap modify {#overwrite-in-configmap} diff --git a/docs/en/guide/pv.md b/docs/en/guide/pv.md index 7bdedb4648..23a9659be3 100644 --- a/docs/en/guide/pv.md +++ b/docs/en/guide/pv.md @@ -339,7 +339,10 @@ After pod is up and running, you'll see `out.txt` being created by the container [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes) handles configurations to create different PVs, think of it as a profile for dynamic provisioning: each StorageClass may contain different volume credentials and mount options, so that you can use multiple settings under dynamic provisioning. Thus if you decide to use JuiceFS CSI Driver via [dynamic provisioning](#dynamic-provisioning), you'll need to create a StorageClass in advance. -Due to StorageClass being the template used for creating PVs, **modifying mount options in StorageClass will not affect existing PVs,** if you need to adjust mount options under dynamic provisioning, you'll have to delete existing PVCs, or [directly modify mount options in existing PVs](./configurations.md#static-mount-options). +Note that StorageClass is only a "template" used to create PV under dynamic provisioning. Therefore, you must pay attention when using it: + +* **Modifying mount options in StorageClass will not affect existing PVs,** if you need to adjust mount options under dynamic provisioning, you'll have to delete existing PVCs, or [directly modify mount options in existing PVs](./configurations.md#static-mount-options). +* Starting from v0.24.3, you can use `matchStorageClassName` in [ConfigMap](./configurations.md#configmap) to conveniently select existing PVCs. It is more recommended to use this method to modify StorageClass related configurations. ### Create via kubectl {#kubectl-sc} @@ -367,6 +370,7 @@ reclaimPolicy: Retain * Managing StorageClass via Helm requires putting credentials directly in `values.yaml`, thus is usually advised against in production environments. * As is demonstrated with the `backend` field in the below examples, when StorageClass is created by Helm, volume credentials is created along the way, you should manage directly in Helm, rather than [creating volume credentials separately](#volume-credentials). + ::: Configuration are different between Cloud Service and Community Edition, below example is for Community Edition, but you will find full description at [Helm chart](https://github.com/juicedata/charts/blob/main/charts/juicefs-csi-driver/values.yaml#L122). @@ -403,7 +407,7 @@ Read [Usage](../introduction.md#usage) to learn about dynamic provisioning. Dyna Create PVC and example pod: -```yaml {13} +```yaml {14} kubectl apply -f - < +在 [ConfigMap](#configmap) 中,`mountPodPatch` 这个字段专门用于定制 mount pod 或者 sidecar 容器,可供定制的部分均已在示范中列出。使用前需要注意: -示例 +* **修改后并不会立即生效**,Kubernetes 会定期同步 ConfigMap 的挂载。详见 [ConfigMap 的更新时效](#configmap) +* 对于 sidecar 场景,相关的字段只要是合法的 sidecar 容器配置,那么对于 sidecar 容器同样生效:比如 `resources` 是 mount pod 和 sidecar 容器都具备的配置,因此对两种场景都生效;`custom-labels` 的作用是为 pod 添加自定义标签,而「标签」是 pod 独有的属性,container 是没有标签的,因此 `custom-labels` 就只对 mount pod 生效,sidecar 场景则会忽略该配置 ```yaml title="values-mycluster.yaml" globalConfig: @@ -110,7 +107,7 @@ globalConfig: # 退出时清理 cache juicefs-clean-cache: "true" - # 为 mount pod 注入 env + # 为 mount pod 注入环境变量 - pvcSelector: matchLabels: ... @@ -120,7 +117,7 @@ globalConfig: - name: DEMO_FAREWELL value: "Such a sweet sorrow" - # 挂载 volumes 到 mount pod + # 挂载 volumes 到 mount pod - pvcSelector: matchLabels: ... @@ -132,133 +129,117 @@ globalConfig: persistentVolumeClaim: claimName: block-pv - # 选择特定的 StorageClass + # 选择特定的 StorageClass - pvcSelector: matchStorageClassName: juicefs-sc terminationGracePeriodSeconds: 60 - # 选择特定的 PVC + # 选择特定的 PVC - pvcSelector: matchName: pvc-name terminationGracePeriodSeconds: 60 ``` - - -## 定制 Mount Pod 或者 Sidecar 容器 {#customize-mount-pod} - -虽然在[「ConfigMap 配置」](#configmap)的示范代码块里已经罗列了所有支持的定制项目和 PVC 选择器,但每一个配置项的修改生效条件和写法不尽相同,因此在本节一一罗列,使用前请详细阅读。 - -### 容器镜像 {#custom-image} +### 通过继承 CSI Node 配置进行定制(不推荐) {#inherit-from-csi-node} -#### 使用 Configmap +:::tip +从 v0.24 开始,CSI 驱动支持在 [ConfigMap](#configmap) 中定制 mount pod 和 sidecar 容器,本小节所介绍的方式已经不再推荐使用。 +::: -该功能最低需要 CSI 驱动版本 v0.24.0,修改后需重建业务 Pod 或者删除 Mount Pod 生效。若要使用重建 Mount Pod 的方式,务必提前配置好[「挂载点自动恢复」](./configurations.md#automatic-mount-point-recovery),避免重建 Mount Pod 后,应用 Pod 中的挂载点永久丢失。 +Mount Pod 自身的资源定义(Kubernetes manifests,也就是 Pod YAML)大部分继承自 CSI Node,比方说如果希望给 mount pod 启用 hostNetwork,可以先为 CSI Node 启用 hostNetwork: -```yaml {2-4} -globalConfig: - mountPodPatch: - - ceMountImage: juicedata/mount:ce-v1.2.0 - eeMountImage: juicedata/mount:ee-5.1.0-053aa0b +```yaml title="values-mycluster.yaml" +node: + hostNetwork: true ``` -如果需要使用定制版本的容器镜像,或者寻找最新版本的 JuiceFS 客户端镜像,请阅读[定制容器镜像](./custom-image.md)。 - -### 环境变量 {#custom-env} +让配置生效,后续生成的 mount pod 就会继承 hostNetwork 配置了。 -#### 使用 Configmap +之所以说「大部分」配置继承自 CSI Node,是因为 labels、annotations 等字段包含组件特定内容,无法简单继承。因此为这部分配置提供单独的定制手段,继续阅读下一小节了解。 -该功能最低需要 CSI 驱动版本 v0.24.5,修改后需要重建业务 Pod 生效。 +### 通过 Annotation 进行定制(不推荐) {#others} -```yaml {2-6} - mountPodPatch: - - env: - - name: DEMO_GREETING - value: "Hello from the environment" - - name: DEMO_FAREWELL - value: "Such a sweet sorrow" -``` +:::tip +从 v0.24 开始,CSI 驱动支持在 [ConfigMap](#configmap) 中定制 mount pod 和 sidecar 容器,本小节所介绍的方式已经不再推荐使用。 +::: -#### 使用 Secret +部分无法用继承方式定制的配置项,我们提供了额外的手段来单独定制。具体而言,就是下方代码块列出的字段,既可以将他们配置在 storageClass 的 parameters 参数中(动态配置),也可放在 PVC 的 annotations 中(静态配置)。 -```yaml {11} -apiVersion: v1 -kind: Secret -metadata: - name: juicefs-secret - namespace: default - labels: - # 增加该标签以启用认证信息校验 - juicefs.com/validate-secret: "true" -type: Opaque -stringData: - envs: '{"BASE_URL": "http://10.0.0.1:8080/static"}' -``` - -### 资源限制 {#custom-resources} +```yaml +juicefs/mount-cpu-limit: "" +juicefs/mount-memory-limit: "" +juicefs/mount-cpu-request: "" +juicefs/mount-memory-request: "" -#### 使用 Configmap +juicefs/mount-labels: "" +juicefs/mount-annotations: "" +juicefs/mount-service-account: "" +juicefs/mount-image: "" +juicefs/mount-delete-delay: "" -该功能最低需要 CSI 驱动版本 v0.24.0,修改后需要重建业务 Pod 或者重建 Mount Pod 生效。若要使用重建 Mount Pod 的方式,务必提前配置好[「挂载点自动恢复」](./configurations.md#automatic-mount-point-recovery),避免重建 Mount Pod 后,应用 Pod 中的挂载点永久丢失。 +# 退出时清理缓存 +juicefs/clean-cache: "" +juicefs/mount-cache-pvc: "" +juicefs/mount-cache-emptydir: "" +juicefs/mount-cache-inline-volume: "" -```yaml {2-5} - mountPodPatch: - - resources: - requests: - cpu: 100m - memory: 512Mi +# 在 mount pod 中挂载宿主机文件或目录 +# 容器内路径将会等同宿主机路径,无法定制 +juicefs/host-path: "/data/file.txt" ``` -阅读[资源优化](./resource-optimization.md)以了解如何恰当设置资源定义,来兼顾性能和资源占用。 - -### 挂载参数 {#mount-options} - -每一个 JuiceFS 挂载点都是 `juicefs mount` 命令创建的,在 CSI 驱动体系中,需要通过 `mountOptions` 字段填写需要调整的挂载配置。 - -`mountOptions` 同时支持 JuiceFS 本身的挂载参数和 FUSE 相关选项。但要注意,虽然 FUSE 参数在命令行使用时会用 `-o` 传入,但在 `mountOptions` 中需要省略 `-o`,直接在列表中追加参数即可。以下方挂载命令为例: +## 格式化参数/认证参数 {#format-options} -```shell -juicefs mount ... --cache-size=204800 -o writeback_cache,debug -``` +「格式化参数/认证参数」是 `juicefs [format|auth]` 命令所接受的参数,其中: -翻译成 CSI 中的 `mountOptions`,格式如下: +* 社区版的 [`format`](https://juicefs.com/docs/zh/community/command_reference/#format) 是用于创建新文件系统的命令。社区版需要用户自行用客户端 `format` 命令创建文件系统,然后才能挂载; +* 企业版的 [`auth`](https://juicefs.com/docs/zh/cloud/reference/command_reference/#auth) 命令是负责向控制台发起认证、获取客户端配置文件。他在使用流程中的作用和 `format` 有些相似,这涉及到两个版本在使用上的区别:和社区版需要先格式化创建文件系统不同,企业版需要在 Web 控制台创建文件系统,客户端并不具备创建文件系统的能力,但是挂载时需要向控制台发起认证,这也就是 `auth` 命令的功能。 -```yaml -mountOptions: - # JuiceFS mount options - - cache-size=204800 - # 额外的 FUSE 相关选项 - - writeback_cache - - debug -``` +考虑到这两个命令的相似性,不论你使用社区版还是企业版,对应的命令运行参数都填入 `format-options`,示范如下。 :::tip -JuiceFS 社区版与云服务的挂载参数有所区别,请参考文档: - -- [社区版](https://juicefs.com/docs/zh/community/command_reference#mount) -- [云服务](https://juicefs.com/docs/zh/cloud/reference/commands_reference/#mount) +修改 `format-options` 并不影响已有的挂载客户端,即便重启 Mount Pod 也不会生效,需要滚升/重启应用 Pod,或者重建 PVC,方能生效。 ::: -#### 使用 ConfigMap - -该功能最小需要 CSI 驱动 v0.24.7。修改 ConfigMap 相关配置后,需重建业务 Pod 生效。 +社区版: -ConfigMap 中的配置具备最高优先级,他会递归合并覆盖 PV 中的 `mountOptions`,因此为了避免出现“修改了却不生效”的误用情况,建议将所有配置迁移到 ConfigMap,不再继续使用 PV 级别的 `mountOptions`。 +```yaml {13} +apiVersion: v1 +kind: Secret +metadata: + name: juicefs-secret +type: Opaque +stringData: + name: + metaurl: + storage: s3 + bucket: https://.s3..amazonaws.com + access-key: + secret-key: + format-options: trash-days=1 +``` -灵活使用 `pvcSelector` 可实现批量修改 `mountOptions` 的目的。 +企业版: -```yaml - mountPodPatch: - - pvcSelector: - matchLabels: - # 所有含有此 label 的 PVC 都将应用此配置 - need-update-options: "true" - mountOptions: - - writeback - - cache-size=204800 +```yaml {11} +apiVersion: v1 +kind: Secret +metadata: + name: juicefs-secret +type: Opaque +stringData: + name: ${JUICEFS_NAME} + token: ${JUICEFS_TOKEN} + access-key: ${ACCESS_KEY} + secret-key: ${SECRET_KEY} + format-options: bucket2=xxx,access-key2=xxx,secret-key2=xxx ``` -#### 通过 PV 定义(不推荐) {#static-mount-options} +## 挂载参数 {#mount-options} + +「挂载参数」,也就是 `juicefs mount` 命令所接受的参数,可以用于调整挂载配置。你需要通过 `mountOptions` 字段填写需要调整的挂载配置,在静态配置和动态配置下填写的位置不同,见下方示范。 + +### 静态配置 {#static-mount-options} 注意,如果是修改已有 PV 的挂载配置,修改后需要重建应用 Pod,才会触发重新创建 Mount Pod,令变动生效。 @@ -275,11 +256,11 @@ spec: ... ``` -#### 通过 StorageClass 定义(不推荐) {#dynamic-mount-options} +### 动态配置 {#dynamic-mount-options} 在 `StorageClass` 定义中调整挂载参数。如果需要为不同应用使用不同挂载参数,则需要创建多个 `StorageClass`,单独添加所需参数。 -注意,StorageClass 仅仅是动态配置下用于创建 PV 的「模板」,也正因此,**在 StorageClass 中修改挂载配置,不影响已经创建的 PV。**如果你需要调整挂载配置,需要删除 PVC 重建,或者直接[在 PV 级别调整挂载配置](#static-mount-options)。 +注意,StorageClass 仅仅是动态配置下用于创建 PV 的「模板」,也正因此,**在 StorageClass 中修改挂载配置,不影响已经创建的 PV**。如果你需要调整挂载配置,需要删除 PVC 重建,或者直接[在 PV 级别调整挂载配置](#static-mount-options)。 ```yaml {6-7} apiVersion: storage.k8s.io/v1 @@ -293,134 +274,28 @@ parameters: ... ``` -### 健康检查 & 容器回调 {#custom-probe-lifecycle} +### 参数详解 -该功能最低需要 CSI 驱动版本 v0.24.0,修改后需重建业务 Pod 或者删除 Mount Pod 生效。若要使用重建 Mount Pod 的方式,务必提前配置好[「挂载点自动恢复」](./configurations.md#automatic-mount-point-recovery),避免重建 Mount Pod 后,应用 Pod 中的挂载点永久丢失。 +JuiceFS 社区版与云服务的挂载参数有所区别,请参考文档: -使用场景: +- [社区版](https://juicefs.com/docs/zh/community/command_reference#mount) +- [云服务](https://juicefs.com/docs/zh/cloud/reference/commands_reference/#mount) -- 配合 `readinessProbe` 配合监控体系,建立告警机制; -- 定制 `preStopHook`,避免 sidecar 场景中,挂载容器早于业务容器退出,造成业务波动。详见 [Sidecar 模式推荐设置](../administration/going-production.md#sidecar)。 +PV/StorageClass 中的 `mountOptions` 同时支持 JuiceFS 本身的挂载参数和 FUSE 相关选项。但要注意,虽然 FUSE 参数在命令行使用时会用 `-o` 传入,但在 `mountOptions` 中需要省略 `-o`,直接在列表中追加参数即可。以下方挂载命令为例: -```yaml - - pvcSelector: - matchLabels: - custom-probe: "true" - readinessProbe: - exec: - command: - - stat - - ${MOUNT_POINT}/${SUB_PATH} - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 5 - successThreshold: 1 +```shell +juicefs mount ... --cache-size=204800 -o writeback_cache,debug ``` -### 挂载额外的 Volume {#custom-volumes} - -使用场景: - -- 部分对象存储服务(比如 Google 云存储)在访问时需要提供额外的认证文件,这就需要你用创建单独的 Secret 保存这些文件,然后在认证信息中引用。这样一来,CSI 驱动便会将这些文件挂载进 Mount Pod,然后在 Mount Pod 中添加对应的环境变量,令 JuiceFS 挂载时使用该文件进行对象存储的认证。 -- JuiceFS 企业版支持挂载[共享块设备](https://juicefs.com/docs/zh/cloud/guide/block-device),既可以作为缓存存储,也可以配置成数据块的永久存储。 - -#### 使用 ConfigMap - -该功能最低需要 CSI 驱动版本 v0.24.7,修改后需重建业务 Pod 生效。 +翻译成 CSI 中的 `mountOptions`,格式如下: ```yaml - # mount some volumes to the mount pod - - pvcSelector: - matchLabels: - need-block-device: "true" - volumeDevices: - - name: block-devices - devicePath: /dev/sda1 - volumes: - - name: block-devices - persistentVolumeClaim: - claimName: block-pv - - pvcSelector: - matchLabels: - need-mount-secret: "true" - volumeMounts: - - name: config-1 - mountPath: /root/.config/gcloud - volumes: - - name: gc-secret - secret: - secretName: gc-secret - defaultMode: 420 -``` - -#### 使用 secret - -在 JuiceFS Secret 的 `configs` 字段中,只能挂载额外的 Secret,无法配置共享块设备的挂载。 - -```yaml {8-9} -apiVersion: v1 -kind: Secret -metadata: - name: juicefs-secret -type: Opaque -stringData: - ... - # 在 configs 中填写 Secret 名称和挂载目录,将该 Secret 整体挂载进指定的目录 - configs: "{gc-secret: /root/.config/gcloud}" -``` - -### 其他功能定制 - -不少其他功能和其他话题高度相关,不在本章详细介绍,请阅读对应章节以详细了解: - -* 为 Mount Pod 配置延迟退出,在应用 Pod 生命周期极短时节约 Mount Pod 启动开销,阅读[延迟退出](./resource-optimization.md#delayed-mount-pod-deletion); -* 在 Mount Pod 退出时清理缓存,请阅读[清理缓存](./resource-optimization.md#clean-cache-when-mount-pod-exits)。 - -## 格式化参数/认证参数 {#format-options} - -「格式化参数/认证参数」是 `juicefs [format|auth]` 命令所接受的参数,其中: - -* 社区版的 [`format`](https://juicefs.com/docs/zh/community/command_reference/#format) 是用于创建新文件系统的命令。社区版需要用户自行用客户端 `format` 命令创建文件系统,然后才能挂载; -* 企业版的 [`auth`](https://juicefs.com/docs/zh/cloud/reference/command_reference/#auth) 命令是负责向控制台发起认证、获取客户端配置文件。他在使用流程中的作用和 `format` 有些相似,这涉及到两个版本在使用上的区别:和社区版需要先格式化创建文件系统不同,企业版需要在 Web 控制台创建文件系统,客户端并不具备创建文件系统的能力,但是挂载时需要向控制台发起认证,这也就是 `auth` 命令的功能。 - -考虑到这两个命令的相似性,不论你使用社区版还是企业版,对应的命令运行参数都填入 `format-options`,示范如下。 - -:::tip -修改 `format-options` 并不影响已有的挂载客户端,即便重启 Mount Pod 也不会生效,需要滚升/重启应用 Pod,或者重建 PVC,方能生效。 -::: - -社区版: - -```yaml {13} -apiVersion: v1 -kind: Secret -metadata: - name: juicefs-secret -type: Opaque -stringData: - name: - metaurl: - storage: s3 - bucket: https://.s3..amazonaws.com - access-key: - secret-key: - format-options: trash-days=1 -``` - -企业版: - -```yaml {13} -apiVersion: v1 -kind: Secret -metadata: - name: juicefs-secret -type: Opaque -stringData: - name: ${JUICEFS_NAME} - token: ${JUICEFS_TOKEN} - access-key: ${ACCESS_KEY} - secret-key: ${SECRET_KEY} - format-options: bucket2=xxx,access-key2=xxx,secret-key2=xxx +mountOptions: + # JuiceFS mount options + - cache-size=204800 + # 额外的 FUSE 相关选项 + - writeback_cache + - debug ``` ## 应用间共享存储 {#share-directory} @@ -478,7 +353,7 @@ stringData: 如果想要在不同命名空间中共享同一个文件系统,只需要让不同 PV 使用相同的文件系统认证信息(Secret)即可: -```yaml {10-12,24-26} +```yaml {9-11,22-24} apiVersion: v1 kind: PersistentVolume metadata: @@ -551,8 +426,8 @@ CSI 驱动提供两种方式进行 PV 初始化: 此特性默认关闭,需要手动启用。启用的方式就是为 CSI Controller 增添 `--provisioner=true` 启动参数,并且删去原本的 sidecar 容器,相当于让 CSI Controller 主进程自行监听资源变更,并执行相应的初始化操作。请根据 CSI Controller 的安装方式,按照下方步骤启用。 -:::info -[进程挂载模式](../introduction.md#by-process)不支持该功能。 +:::tip +[进程挂载模式](../introduction.md#by-process)不支持高级 PV 初始化功能。 ::: ### Helm @@ -572,7 +447,9 @@ helm upgrade juicefs-csi-driver juicefs/juicefs-csi-driver -n kube-system -f ./v ### kubectl -如果是 kubectl 安装方式,启用该功能需要手动编辑 CSI Controller,操作较为复杂,因此建议[迁移到 Helm 安装方式](../administration/upgrade-csi-driver.md#migrate-to-helm)。 +如果使用 kubectl 安装方式,启用该功能需要手动编辑 CSI Controller,操作较为复杂,因此建议[迁移到 Helm 安装方式](../administration/upgrade-csi-driver.md#migrate-to-helm)。 + +手动修改 CSI Controller: ```shell kubectl edit sts -n kube-system juicefs-csi-controller @@ -651,7 +528,7 @@ kubectl annotate --overwrite node minikube myjfs.juicefs.com/cacheGroup=region-1 然后在 `StorageClass` 中修改相关配置: -```yaml {11-13} +```yaml {12-14} apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: diff --git a/docs/zh_cn/guide/custom-image.md b/docs/zh_cn/guide/custom-image.md index 1c8d777cd9..c0468420f8 100644 --- a/docs/zh_cn/guide/custom-image.md +++ b/docs/zh_cn/guide/custom-image.md @@ -35,6 +35,7 @@ JuiceFS CSI 驱动 0.17.1 及以上版本支持自定义 Mount Pod 镜像,有 * 已有的 Mount Pod 不会受影响,需要随着应用 Pod 滚动升级或者删除 Mount Pod 重建,才会采用新的镜像 * [升级 CSI 驱动](../administration/upgrade-csi-driver.md)时,默认会连带升级到 mount 镜像的最新稳定版。但如果你覆盖了 mount 镜像,那么这就是固定的配置了,继续升级 CSI 驱动,也不会引入连带的 mount 镜像升级 + ::: ### configmap 修改 {#overwrite-in-configmap} diff --git a/docs/zh_cn/guide/pv.md b/docs/zh_cn/guide/pv.md index 627a3c67b2..0879435c1d 100644 --- a/docs/zh_cn/guide/pv.md +++ b/docs/zh_cn/guide/pv.md @@ -369,6 +369,7 @@ reclaimPolicy: Retain * 通过 Helm 创建 StorageClass,要求用户将认证信息明文填入 `values.yaml`,考虑到安全性,生产环境一般推荐[用 kubectl 创建](#kubectl-sc)。 * 如下方示范中 `backend` 字段所示,用 Helm 创建 StorageClass 时,文件系统认证信息也会一并创建,请在 Helm 里直接管理,无需再[单独创建文件系统认证信息](#volume-credentials)。 + ::: JuiceFS 社区版和云服务的配置项略有不同,下方示范面向社区版,但你可以在 [Helm chart](https://github.com/juicedata/charts/blob/main/charts/juicefs-csi-driver/values.yaml#L122) 中找到全面示范。 @@ -405,7 +406,7 @@ storageClasses: 创建 PVC 和应用 Pod,示范如下: -```yaml {13} +```yaml {14} kubectl apply -f - <=18.0.0" } }, + "node_modules/@sindresorhus/merge-streams": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", + "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@svgr/babel-plugin-add-jsx-attribute": { "version": "6.5.1", "resolved": "https://registry.npmmirror.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", @@ -2225,9 +2236,9 @@ } }, "node_modules/@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==" }, "node_modules/@types/is-empty": { "version": "1.2.1", @@ -2585,20 +2596,20 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmmirror.com/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", - "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.0.tgz", + "integrity": "sha512-Rmb62sR1Zpjql25eSanFGEhAxcFwfA1K0GuQcLoaJBAcENegrQut3hYdhXFF1obQfiDyqIW/cLM5HSJ/9k884A==", "funding": [ { "type": "opencollective", @@ -2614,8 +2625,8 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001646", - "electron-to-chromium": "^1.5.4", + "caniuse-lite": "^1.0.30001663", + "electron-to-chromium": "^1.5.28", "node-releases": "^2.0.18", "update-browserslist-db": "^1.1.0" }, @@ -2648,9 +2659,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001653", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001653.tgz", - "integrity": "sha512-XGWQVB8wFQ2+9NZwZ10GxTYC5hk0Fa+q8cSkr0tgvMhYhMHP/QC+WTgrePMDBWiWc/pV+1ik82Al20XOK25Gcw==", + "version": "1.0.30001664", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001664.tgz", + "integrity": "sha512-AmE7k4dXiNKQipgn7a2xg558IRqPN3jMQY/rOsbxDhrd0tyChwbITBfiwtnqz8bi2M5mIWbxAYBvk7W7QBUS2g==", "funding": [ { "type": "opencollective", @@ -2931,9 +2942,9 @@ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" }, "node_modules/electron-to-chromium": { - "version": "1.5.13", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.13.tgz", - "integrity": "sha512-lbBcvtIJ4J6sS4tb5TLp1b4LyfCdMkwStzXPyAgVgTRAsep4bvrAGaBOP7ZJtQMNJpSQ9SqG4brWOroNaQtm7Q==" + "version": "1.5.29", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.29.tgz", + "integrity": "sha512-PF8n2AlIhCKXQ+gTpiJi0VhcHDb69kYX4MtCiivctc2QD3XuNZ/XIOlbGzt7WAjjEev0TtaH6Cu3arZExm5DOw==" }, "node_modules/emoji-regex": { "version": "9.2.2", @@ -2982,9 +2993,9 @@ "integrity": "sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==" }, "node_modules/escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "engines": { "node": ">=6" } @@ -3086,9 +3097,9 @@ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.2", + "resolved": "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -3137,9 +3148,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -3330,9 +3341,9 @@ } }, "node_modules/ignore": { - "version": "5.2.1", - "resolved": "https://registry.npmmirror.com/ignore/-/ignore-5.2.1.tgz", - "integrity": "sha512-d2qQLzTJ9WxQftPAuEQpSPmKqzxePjzVbpAVv62AQ64NTL+wR4JkrVqR/LqFsFEUsHDAiId52mJteHDFuDkElA==", + "version": "5.3.2", + "resolved": "https://registry.npmmirror.com/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", "engines": { "node": ">= 4" } @@ -3450,7 +3461,7 @@ }, "node_modules/is-number": { "version": "7.0.0", - "resolved": "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "engines": { "node": ">=0.12.0" @@ -3539,6 +3550,11 @@ "node": ">=6" } }, + "node_modules/jsonc-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmmirror.com/jsonc-parser/-/jsonc-parser-3.3.1.tgz", + "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==" + }, "node_modules/jsonfile": { "version": "6.1.0", "resolved": "https://registry.npmmirror.com/jsonfile/-/jsonfile-6.1.0.tgz", @@ -3580,11 +3596,11 @@ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" }, "node_modules/linkify-it": { - "version": "4.0.1", - "resolved": "https://registry.npmmirror.com/linkify-it/-/linkify-it-4.0.1.tgz", - "integrity": "sha512-C7bfi1UZmoj8+PQx22XyeXCuBlokoyWQL5pWSP+EI6nzRylyThouddufc2c1NDIcP9k5agmN9fLpA7VNJfIiqw==", + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/linkify-it/-/linkify-it-5.0.0.tgz", + "integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", "dependencies": { - "uc.micro": "^1.0.1" + "uc.micro": "^2.0.0" } }, "node_modules/load-plugin": { @@ -3641,97 +3657,119 @@ } }, "node_modules/markdown-it": { - "version": "13.0.1", - "resolved": "https://registry.npmmirror.com/markdown-it/-/markdown-it-13.0.1.tgz", - "integrity": "sha512-lTlxriVoy2criHP0JKRhO2VDG9c2ypWCsT237eDiLqi09rmbKoUetyGHq2uOIRoRS//kfoJckS0eUzzkDR+k2Q==", + "version": "14.1.0", + "resolved": "https://registry.npmmirror.com/markdown-it/-/markdown-it-14.1.0.tgz", + "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", "dependencies": { "argparse": "^2.0.1", - "entities": "~3.0.1", - "linkify-it": "^4.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" }, "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/markdown-it/node_modules/entities": { - "version": "3.0.1", - "resolved": "https://registry.npmmirror.com/entities/-/entities-3.0.1.tgz", - "integrity": "sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==", - "engines": { - "node": ">=0.12" + "markdown-it": "bin/markdown-it.mjs" } }, "node_modules/markdownlint": { - "version": "0.26.2", - "resolved": "https://registry.npmmirror.com/markdownlint/-/markdownlint-0.26.2.tgz", - "integrity": "sha512-2Am42YX2Ex5SQhRq35HxYWDfz1NLEOZWWN25nqd2h3AHRKsGRE+Qg1gt1++exW792eXTrR4jCNHfShfWk9Nz8w==", + "version": "0.35.0", + "resolved": "https://registry.npmmirror.com/markdownlint/-/markdownlint-0.35.0.tgz", + "integrity": "sha512-wgp8yesWjFBL7bycA3hxwHRdsZGJhjhyP1dSxKVKrza0EPFYtn+mHtkVy6dvP1kGSjovyG5B8yNP6Frj0UFUJg==", "dependencies": { - "markdown-it": "13.0.1" + "markdown-it": "14.1.0", + "markdownlint-micromark": "0.1.10" }, "engines": { - "node": ">=14" + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/DavidAnson" } }, "node_modules/markdownlint-cli2": { - "version": "0.5.1", - "resolved": "https://registry.npmmirror.com/markdownlint-cli2/-/markdownlint-cli2-0.5.1.tgz", - "integrity": "sha512-f3Nb1GF/c8YSrV/FntsCWzpa5mLFJRlO+wzEgv+lkNQjU6MZflUwc2FbyEDPTo6oVhP2VyUOkK0GkFgfuktl1w==", + "version": "0.14.0", + "resolved": "https://registry.npmmirror.com/markdownlint-cli2/-/markdownlint-cli2-0.14.0.tgz", + "integrity": "sha512-2cqdWy56frU2FTpbuGb83mEWWYuUIYv6xS8RVEoUAuKNw/hXPar2UYGpuzUhlFMngE8Omaz4RBH52MzfRbGshw==", "dependencies": { - "globby": "13.1.2", - "markdownlint": "0.26.2", - "markdownlint-cli2-formatter-default": "0.0.3", - "micromatch": "4.0.5", - "strip-json-comments": "5.0.0", - "yaml": "2.1.1" + "globby": "14.0.2", + "js-yaml": "4.1.0", + "jsonc-parser": "3.3.1", + "markdownlint": "0.35.0", + "markdownlint-cli2-formatter-default": "0.0.5", + "micromatch": "4.0.8" }, "bin": { - "markdownlint-cli2": "markdownlint-cli2.js", - "markdownlint-cli2-config": "markdownlint-cli2-config.js", - "markdownlint-cli2-fix": "markdownlint-cli2-fix.js" + "markdownlint-cli2": "markdownlint-cli2.js" }, "engines": { - "node": ">=14" + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/DavidAnson" } }, "node_modules/markdownlint-cli2-formatter-default": { - "version": "0.0.3", - "resolved": "https://registry.npmmirror.com/markdownlint-cli2-formatter-default/-/markdownlint-cli2-formatter-default-0.0.3.tgz", - "integrity": "sha512-QEAJitT5eqX1SNboOD+SO/LNBpu4P4je8JlR02ug2cLQAqmIhh8IJnSK7AcaHBHhNADqdGydnPpQOpsNcEEqCw==", + "version": "0.0.5", + "resolved": "https://registry.npmmirror.com/markdownlint-cli2-formatter-default/-/markdownlint-cli2-formatter-default-0.0.5.tgz", + "integrity": "sha512-4XKTwQ5m1+Txo2kuQ3Jgpo/KmnG+X90dWt4acufg6HVGadTUG5hzHF/wssp9b5MBYOMCnZ9RMPaU//uHsszF8Q==", + "funding": { + "url": "https://github.com/sponsors/DavidAnson" + }, "peerDependencies": { "markdownlint-cli2": ">=0.0.4" } }, "node_modules/markdownlint-cli2/node_modules/globby": { - "version": "13.1.2", - "resolved": "https://registry.npmmirror.com/globby/-/globby-13.1.2.tgz", - "integrity": "sha512-LKSDZXToac40u8Q1PQtZihbNdTYSNMuWe+K5l+oa6KgDzSvVrHXlJy40hUP522RjAIoNLJYBJi7ow+rbFpIhHQ==", + "version": "14.0.2", + "resolved": "https://registry.npmmirror.com/globby/-/globby-14.0.2.tgz", + "integrity": "sha512-s3Fq41ZVh7vbbe2PN3nrW7yC7U7MFVc5c98/iTl9c2GawNMKx/J648KQRW6WKkuU8GIbbh2IXfIRQjOZnXcTnw==", "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" + "@sindresorhus/merge-streams": "^2.1.0", + "fast-glob": "^3.3.2", + "ignore": "^5.2.4", + "path-type": "^5.0.0", + "slash": "^5.1.0", + "unicorn-magic": "^0.1.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/markdownlint-cli2/node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmmirror.com/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "node_modules/markdownlint-cli2/node_modules/path-type": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/path-type/-/path-type-5.0.0.tgz", + "integrity": "sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==", "engines": { "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/markdownlint-cli2/node_modules/yaml": { - "version": "2.1.1", - "resolved": "https://registry.npmmirror.com/yaml/-/yaml-2.1.1.tgz", - "integrity": "sha512-o96x3OPo8GjWeSLF+wOAbrPfhFOGY0W00GNaxCDv+9hkcDJEnev1yh8S7pgHF0ik6zc8sQLuL8hjHjJULZp8bw==", + "node_modules/markdownlint-cli2/node_modules/slash": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/slash/-/slash-5.1.0.tgz", + "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", "engines": { - "node": ">= 14" + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdownlint-micromark": { + "version": "0.1.10", + "resolved": "https://registry.npmmirror.com/markdownlint-micromark/-/markdownlint-micromark-0.1.10.tgz", + "integrity": "sha512-no5ZfdqAdWGxftCLlySHSgddEjyW4kui4z7amQcGsSKfYC5v/ou+8mIQVyg9KQMeEZLNtz9OPDTj7nnTnoR4FQ==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/DavidAnson" } }, "node_modules/markdownlint-rule-enhanced-proper-names": { @@ -3802,9 +3840,9 @@ "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" }, "node_modules/mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmmirror.com/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/mdurl/-/mdurl-2.0.0.tgz", + "integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==" }, "node_modules/merge-stream": { "version": "2.0.0", @@ -4032,11 +4070,11 @@ "integrity": "sha512-DCfg/T8fcrhrRKTPjRrw/5LLvdGV7BHySf/1LOZx7TzWZdYRjogNtyNq885z3nNallwr3QUKARjqvHqX1/7t+w==" }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { @@ -4194,9 +4232,9 @@ } }, "node_modules/picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -4230,6 +4268,14 @@ "node": ">=6" } }, + "node_modules/punycode.js": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/punycode.js/-/punycode.js-2.3.1.tgz", + "integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==", + "engines": { + "node": ">=6" + } + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -4633,14 +4679,6 @@ "node": ">=0.10.0" } }, - "node_modules/strip-json-comments": { - "version": "5.0.0", - "resolved": "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-5.0.0.tgz", - "integrity": "sha512-V1LGY4UUo0jgwC+ELQ2BNWfPa17TIuwBLg+j1AA/9RPzKINl1lhxVEu2r+ZTTO8aetIsUzE5Qj6LMSBkoGYKKw==", - "engines": { - "node": ">=14.16" - } - }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", @@ -4694,9 +4732,9 @@ } }, "node_modules/terser": { - "version": "5.31.6", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.31.6.tgz", - "integrity": "sha512-PQ4DAriWzKj+qgehQ7LK5bQqCFNMmlhjR2PFFLuqGCpuCAauxemVBWwWOxo3UIwWQx8+Pr61Df++r76wDmkQBg==", + "version": "5.34.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.34.0.tgz", + "integrity": "sha512-y5NUX+U9HhVsK/zihZwoq4r9dICLyV2jXGOriDAVOeKhq3LKVjgJbGO90FisozXLlJfvjHqgckGmJFBb9KYoWQ==", "dependencies": { "@jridgewell/source-map": "^0.3.3", "acorn": "^8.8.2", @@ -4763,7 +4801,7 @@ }, "node_modules/to-regex-range": { "version": "5.0.1", - "resolved": "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-5.0.1.tgz", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dependencies": { "is-number": "^7.0.0" @@ -4797,9 +4835,9 @@ "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==" }, "node_modules/uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmmirror.com/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/uc.micro/-/uc.micro-2.1.0.tgz", + "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==" }, "node_modules/unicode-canonical-property-names-ecmascript": { "version": "2.0.0", @@ -4837,6 +4875,17 @@ "node": ">=4" } }, + "node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/unified": { "version": "10.1.2", "resolved": "https://registry.npmmirror.com/unified/-/unified-10.1.2.tgz", @@ -4969,9 +5018,12 @@ } }, "node_modules/unified-engine/node_modules/yaml": { - "version": "2.1.3", - "resolved": "https://registry.npmmirror.com/yaml/-/yaml-2.1.3.tgz", - "integrity": "sha512-AacA8nRULjKMX2DvWvOAdBZMOfQlypSFkjcOcu9FalllIDJ1kvlREzcdIZmidQUqqeMv7jorHjq2HlLv/+c2lg==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.5.1.tgz", + "integrity": "sha512-bLQOjaX/ADgQ20isPJRvF0iRUHIxVhYvr53Of7wGcWlO2jvtUlH5m87DsmulFVxRpNLOnI4tB6p/oh8D7kpn9Q==", + "bin": { + "yaml": "bin.mjs" + }, "engines": { "node": ">= 14" } @@ -5180,9 +5232,9 @@ } }, "node_modules/webpack": { - "version": "5.94.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz", - "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==", + "version": "5.95.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.95.0.tgz", + "integrity": "sha512-2t3XstrKULz41MNMBF+cJ97TyHdyQ8HCt//pqErqDvNjU9YQBnZxIHa11VXsi7F3mb5/aO2tuDxdeTPdU7xu9Q==", "dependencies": { "@types/estree": "^1.0.5", "@webassemblyjs/ast": "^1.12.1", @@ -6636,6 +6688,11 @@ "resolved": "https://registry.npmmirror.com/@npmcli/name-from-folder/-/name-from-folder-2.0.0.tgz", "integrity": "sha512-pwK+BfEBZJbKdNYpHHRTNBwBoqrN/iIMO0AiGvYsp3Hoaq0WbgGSWQR6SCldZovoDpY3yje5lkFUe6gsDgJ2vg==" }, + "@sindresorhus/merge-streams": { + "version": "2.3.0", + "resolved": "https://registry.npmmirror.com/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", + "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==" + }, "@svgr/babel-plugin-add-jsx-attribute": { "version": "6.5.1", "resolved": "https://registry.npmmirror.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", @@ -6778,9 +6835,9 @@ } }, "@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==" }, "@types/is-empty": { "version": "1.2.1", @@ -7098,20 +7155,20 @@ } }, "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmmirror.com/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "requires": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" } }, "browserslist": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", - "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.0.tgz", + "integrity": "sha512-Rmb62sR1Zpjql25eSanFGEhAxcFwfA1K0GuQcLoaJBAcENegrQut3hYdhXFF1obQfiDyqIW/cLM5HSJ/9k884A==", "requires": { - "caniuse-lite": "^1.0.30001646", - "electron-to-chromium": "^1.5.4", + "caniuse-lite": "^1.0.30001663", + "electron-to-chromium": "^1.5.28", "node-releases": "^2.0.18", "update-browserslist-db": "^1.1.0" } @@ -7132,9 +7189,9 @@ "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==" }, "caniuse-lite": { - "version": "1.0.30001653", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001653.tgz", - "integrity": "sha512-XGWQVB8wFQ2+9NZwZ10GxTYC5hk0Fa+q8cSkr0tgvMhYhMHP/QC+WTgrePMDBWiWc/pV+1ik82Al20XOK25Gcw==" + "version": "1.0.30001664", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001664.tgz", + "integrity": "sha512-AmE7k4dXiNKQipgn7a2xg558IRqPN3jMQY/rOsbxDhrd0tyChwbITBfiwtnqz8bi2M5mIWbxAYBvk7W7QBUS2g==" }, "chalk": { "version": "4.1.2", @@ -7348,9 +7405,9 @@ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" }, "electron-to-chromium": { - "version": "1.5.13", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.13.tgz", - "integrity": "sha512-lbBcvtIJ4J6sS4tb5TLp1b4LyfCdMkwStzXPyAgVgTRAsep4bvrAGaBOP7ZJtQMNJpSQ9SqG4brWOroNaQtm7Q==" + "version": "1.5.29", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.29.tgz", + "integrity": "sha512-PF8n2AlIhCKXQ+gTpiJi0VhcHDb69kYX4MtCiivctc2QD3XuNZ/XIOlbGzt7WAjjEev0TtaH6Cu3arZExm5DOw==" }, "emoji-regex": { "version": "9.2.2", @@ -7390,9 +7447,9 @@ "integrity": "sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==" }, "escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==" }, "escape-string-regexp": { "version": "1.0.5", @@ -7462,9 +7519,9 @@ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.2", + "resolved": "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "requires": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -7504,9 +7561,9 @@ } }, "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmmirror.com/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "requires": { "to-regex-range": "^5.0.1" } @@ -7653,9 +7710,9 @@ } }, "ignore": { - "version": "5.2.1", - "resolved": "https://registry.npmmirror.com/ignore/-/ignore-5.2.1.tgz", - "integrity": "sha512-d2qQLzTJ9WxQftPAuEQpSPmKqzxePjzVbpAVv62AQ64NTL+wR4JkrVqR/LqFsFEUsHDAiId52mJteHDFuDkElA==" + "version": "5.3.2", + "resolved": "https://registry.npmmirror.com/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==" }, "import-fresh": { "version": "3.3.0", @@ -7746,7 +7803,7 @@ }, "is-number": { "version": "7.0.0", - "resolved": "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" }, "is-plain-obj": { @@ -7807,6 +7864,11 @@ "resolved": "https://registry.npmmirror.com/json5/-/json5-2.2.2.tgz", "integrity": "sha512-46Tk9JiOL2z7ytNQWFLpj99RZkVgeHf87yGQKsIkaPz1qSH9UczKH1rO7K3wgRselo0tYMUNfecYpm/p1vC7tQ==" }, + "jsonc-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmmirror.com/jsonc-parser/-/jsonc-parser-3.3.1.tgz", + "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==" + }, "jsonfile": { "version": "6.1.0", "resolved": "https://registry.npmmirror.com/jsonfile/-/jsonfile-6.1.0.tgz", @@ -7837,11 +7899,11 @@ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" }, "linkify-it": { - "version": "4.0.1", - "resolved": "https://registry.npmmirror.com/linkify-it/-/linkify-it-4.0.1.tgz", - "integrity": "sha512-C7bfi1UZmoj8+PQx22XyeXCuBlokoyWQL5pWSP+EI6nzRylyThouddufc2c1NDIcP9k5agmN9fLpA7VNJfIiqw==", + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/linkify-it/-/linkify-it-5.0.0.tgz", + "integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", "requires": { - "uc.micro": "^1.0.1" + "uc.micro": "^2.0.0" } }, "load-plugin": { @@ -7889,75 +7951,76 @@ "integrity": "sha512-ysxwsnTKdAx96aTRdhDOCQfDgbHnt8SK0KY8SEjO0wHinhWOFTESbjVCMPbU1uGXg/ch4lifqx0wfjOawU2+WA==" }, "markdown-it": { - "version": "13.0.1", - "resolved": "https://registry.npmmirror.com/markdown-it/-/markdown-it-13.0.1.tgz", - "integrity": "sha512-lTlxriVoy2criHP0JKRhO2VDG9c2ypWCsT237eDiLqi09rmbKoUetyGHq2uOIRoRS//kfoJckS0eUzzkDR+k2Q==", + "version": "14.1.0", + "resolved": "https://registry.npmmirror.com/markdown-it/-/markdown-it-14.1.0.tgz", + "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", "requires": { "argparse": "^2.0.1", - "entities": "~3.0.1", - "linkify-it": "^4.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "dependencies": { - "entities": { - "version": "3.0.1", - "resolved": "https://registry.npmmirror.com/entities/-/entities-3.0.1.tgz", - "integrity": "sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==" - } + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" } }, "markdownlint": { - "version": "0.26.2", - "resolved": "https://registry.npmmirror.com/markdownlint/-/markdownlint-0.26.2.tgz", - "integrity": "sha512-2Am42YX2Ex5SQhRq35HxYWDfz1NLEOZWWN25nqd2h3AHRKsGRE+Qg1gt1++exW792eXTrR4jCNHfShfWk9Nz8w==", + "version": "0.35.0", + "resolved": "https://registry.npmmirror.com/markdownlint/-/markdownlint-0.35.0.tgz", + "integrity": "sha512-wgp8yesWjFBL7bycA3hxwHRdsZGJhjhyP1dSxKVKrza0EPFYtn+mHtkVy6dvP1kGSjovyG5B8yNP6Frj0UFUJg==", "requires": { - "markdown-it": "13.0.1" + "markdown-it": "14.1.0", + "markdownlint-micromark": "0.1.10" } }, "markdownlint-cli2": { - "version": "0.5.1", - "resolved": "https://registry.npmmirror.com/markdownlint-cli2/-/markdownlint-cli2-0.5.1.tgz", - "integrity": "sha512-f3Nb1GF/c8YSrV/FntsCWzpa5mLFJRlO+wzEgv+lkNQjU6MZflUwc2FbyEDPTo6oVhP2VyUOkK0GkFgfuktl1w==", + "version": "0.14.0", + "resolved": "https://registry.npmmirror.com/markdownlint-cli2/-/markdownlint-cli2-0.14.0.tgz", + "integrity": "sha512-2cqdWy56frU2FTpbuGb83mEWWYuUIYv6xS8RVEoUAuKNw/hXPar2UYGpuzUhlFMngE8Omaz4RBH52MzfRbGshw==", "requires": { - "globby": "13.1.2", - "markdownlint": "0.26.2", - "markdownlint-cli2-formatter-default": "0.0.3", - "micromatch": "4.0.5", - "strip-json-comments": "5.0.0", - "yaml": "2.1.1" + "globby": "14.0.2", + "js-yaml": "4.1.0", + "jsonc-parser": "3.3.1", + "markdownlint": "0.35.0", + "markdownlint-cli2-formatter-default": "0.0.5", + "micromatch": "4.0.8" }, "dependencies": { "globby": { - "version": "13.1.2", - "resolved": "https://registry.npmmirror.com/globby/-/globby-13.1.2.tgz", - "integrity": "sha512-LKSDZXToac40u8Q1PQtZihbNdTYSNMuWe+K5l+oa6KgDzSvVrHXlJy40hUP522RjAIoNLJYBJi7ow+rbFpIhHQ==", + "version": "14.0.2", + "resolved": "https://registry.npmmirror.com/globby/-/globby-14.0.2.tgz", + "integrity": "sha512-s3Fq41ZVh7vbbe2PN3nrW7yC7U7MFVc5c98/iTl9c2GawNMKx/J648KQRW6WKkuU8GIbbh2IXfIRQjOZnXcTnw==", "requires": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" + "@sindresorhus/merge-streams": "^2.1.0", + "fast-glob": "^3.3.2", + "ignore": "^5.2.4", + "path-type": "^5.0.0", + "slash": "^5.1.0", + "unicorn-magic": "^0.1.0" } }, - "slash": { - "version": "4.0.0", - "resolved": "https://registry.npmmirror.com/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==" + "path-type": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/path-type/-/path-type-5.0.0.tgz", + "integrity": "sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==" }, - "yaml": { - "version": "2.1.1", - "resolved": "https://registry.npmmirror.com/yaml/-/yaml-2.1.1.tgz", - "integrity": "sha512-o96x3OPo8GjWeSLF+wOAbrPfhFOGY0W00GNaxCDv+9hkcDJEnev1yh8S7pgHF0ik6zc8sQLuL8hjHjJULZp8bw==" + "slash": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/slash/-/slash-5.1.0.tgz", + "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==" } } }, "markdownlint-cli2-formatter-default": { - "version": "0.0.3", - "resolved": "https://registry.npmmirror.com/markdownlint-cli2-formatter-default/-/markdownlint-cli2-formatter-default-0.0.3.tgz", - "integrity": "sha512-QEAJitT5eqX1SNboOD+SO/LNBpu4P4je8JlR02ug2cLQAqmIhh8IJnSK7AcaHBHhNADqdGydnPpQOpsNcEEqCw==", + "version": "0.0.5", + "resolved": "https://registry.npmmirror.com/markdownlint-cli2-formatter-default/-/markdownlint-cli2-formatter-default-0.0.5.tgz", + "integrity": "sha512-4XKTwQ5m1+Txo2kuQ3Jgpo/KmnG+X90dWt4acufg6HVGadTUG5hzHF/wssp9b5MBYOMCnZ9RMPaU//uHsszF8Q==", "requires": {} }, + "markdownlint-micromark": { + "version": "0.1.10", + "resolved": "https://registry.npmmirror.com/markdownlint-micromark/-/markdownlint-micromark-0.1.10.tgz", + "integrity": "sha512-no5ZfdqAdWGxftCLlySHSgddEjyW4kui4z7amQcGsSKfYC5v/ou+8mIQVyg9KQMeEZLNtz9OPDTj7nnTnoR4FQ==" + }, "markdownlint-rule-enhanced-proper-names": { "version": "0.0.1", "resolved": "https://registry.npmmirror.com/markdownlint-rule-enhanced-proper-names/-/markdownlint-rule-enhanced-proper-names-0.0.1.tgz", @@ -8023,9 +8086,9 @@ "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" }, "mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmmirror.com/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/mdurl/-/mdurl-2.0.0.tgz", + "integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==" }, "merge-stream": { "version": "2.0.0", @@ -8250,11 +8313,11 @@ "integrity": "sha512-DCfg/T8fcrhrRKTPjRrw/5LLvdGV7BHySf/1LOZx7TzWZdYRjogNtyNq885z3nNallwr3QUKARjqvHqX1/7t+w==" }, "micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "requires": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" } }, @@ -8373,9 +8436,9 @@ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==" }, "picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==" }, "picomatch": { "version": "2.3.1", @@ -8400,6 +8463,11 @@ "resolved": "https://registry.npmmirror.com/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, + "punycode.js": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/punycode.js/-/punycode.js-2.3.1.tgz", + "integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==" + }, "queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -8729,11 +8797,6 @@ "resolved": "https://registry.npmmirror.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz", "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==" }, - "strip-json-comments": { - "version": "5.0.0", - "resolved": "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-5.0.0.tgz", - "integrity": "sha512-V1LGY4UUo0jgwC+ELQ2BNWfPa17TIuwBLg+j1AA/9RPzKINl1lhxVEu2r+ZTTO8aetIsUzE5Qj6LMSBkoGYKKw==" - }, "supports-color": { "version": "7.2.0", "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz", @@ -8772,9 +8835,9 @@ "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==" }, "terser": { - "version": "5.31.6", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.31.6.tgz", - "integrity": "sha512-PQ4DAriWzKj+qgehQ7LK5bQqCFNMmlhjR2PFFLuqGCpuCAauxemVBWwWOxo3UIwWQx8+Pr61Df++r76wDmkQBg==", + "version": "5.34.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.34.0.tgz", + "integrity": "sha512-y5NUX+U9HhVsK/zihZwoq4r9dICLyV2jXGOriDAVOeKhq3LKVjgJbGO90FisozXLlJfvjHqgckGmJFBb9KYoWQ==", "requires": { "@jridgewell/source-map": "^0.3.3", "acorn": "^8.8.2", @@ -8813,7 +8876,7 @@ }, "to-regex-range": { "version": "5.0.1", - "resolved": "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-5.0.1.tgz", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "requires": { "is-number": "^7.0.0" @@ -8844,9 +8907,9 @@ "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==" }, "uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmmirror.com/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/uc.micro/-/uc.micro-2.1.0.tgz", + "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==" }, "unicode-canonical-property-names-ecmascript": { "version": "2.0.0", @@ -8872,6 +8935,11 @@ "resolved": "https://registry.npmmirror.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==" }, + "unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmmirror.com/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==" + }, "unified": { "version": "10.1.2", "resolved": "https://registry.npmmirror.com/unified/-/unified-10.1.2.tgz", @@ -8988,9 +9056,9 @@ } }, "yaml": { - "version": "2.1.3", - "resolved": "https://registry.npmmirror.com/yaml/-/yaml-2.1.3.tgz", - "integrity": "sha512-AacA8nRULjKMX2DvWvOAdBZMOfQlypSFkjcOcu9FalllIDJ1kvlREzcdIZmidQUqqeMv7jorHjq2HlLv/+c2lg==" + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.5.1.tgz", + "integrity": "sha512-bLQOjaX/ADgQ20isPJRvF0iRUHIxVhYvr53Of7wGcWlO2jvtUlH5m87DsmulFVxRpNLOnI4tB6p/oh8D7kpn9Q==" } } }, @@ -9153,9 +9221,9 @@ } }, "webpack": { - "version": "5.94.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz", - "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==", + "version": "5.95.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.95.0.tgz", + "integrity": "sha512-2t3XstrKULz41MNMBF+cJ97TyHdyQ8HCt//pqErqDvNjU9YQBnZxIHa11VXsi7F3mb5/aO2tuDxdeTPdU7xu9Q==", "requires": { "@types/estree": "^1.0.5", "@webassemblyjs/ast": "^1.12.1", diff --git a/package.json b/package.json index 3e37fb950e..fc7006087b 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ "markdown-lint-fix": "./node_modules/.bin/markdownlint-cli2-fix './docs/**/*.md'" }, "dependencies": { - "markdownlint-cli2": "^0.5.1", + "markdownlint-cli2": "^0.14.0", "markdownlint-rule-enhanced-proper-names": "^0.0.1", "markdownlint-rule-no-trailing-slash-in-links": "^0.0.1", "remark-cli": "^11.0.0", diff --git a/pkg/common/common.go b/pkg/common/common.go deleted file mode 100644 index 1021f7bd63..0000000000 --- a/pkg/common/common.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright 2024 Juicedata Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package common - -const ( - // DriverName to be registered - CSINodeLabelKey = "app" - CSINodeLabelValue = "juicefs-csi-node" - PodTypeKey = "app.kubernetes.io/name" - PodTypeValue = "juicefs-mount" - PodUniqueIdLabelKey = "volume-id" - PodJuiceHashLabelKey = "juicefs-hash" - Finalizer = "juicefs.com/finalizer" - JuiceFSUUID = "juicefs-uuid" - UniqueId = "juicefs-uniqueid" - CleanCache = "juicefs-clean-cache" - MountContainerName = "jfs-mount" - JobTypeValue = "juicefs-job" - JfsInsideContainer = "JFS_INSIDE_CONTAINER" - - // CSI Secret - ProvisionerSecretName = "csi.storage.k8s.io/provisioner-secret-name" - ProvisionerSecretNamespace = "csi.storage.k8s.io/provisioner-secret-namespace" - PublishSecretName = "csi.storage.k8s.io/node-publish-secret-name" - PublishSecretNamespace = "csi.storage.k8s.io/node-publish-secret-namespace" - ControllerExpandSecretName = "csi.storage.k8s.io/controller-expand-secret-name" - ControllerExpandSecretNamespace = "csi.storage.k8s.io/controller-expand-secret-namespace" - - // webhook - WebhookName = "juicefs-admission-webhook" - True = "true" - False = "false" - inject = ".juicefs.com/inject" - injectSidecar = ".sidecar" + inject - InjectSidecarDone = "done" + injectSidecar - InjectSidecarDisable = "disable" + injectSidecar - - // config in pv - MountPodCpuLimitKey = "juicefs/mount-cpu-limit" - MountPodMemLimitKey = "juicefs/mount-memory-limit" - MountPodCpuRequestKey = "juicefs/mount-cpu-request" - MountPodMemRequestKey = "juicefs/mount-memory-request" - MountPodLabelKey = "juicefs/mount-labels" - MountPodAnnotationKey = "juicefs/mount-annotations" - MountPodServiceAccount = "juicefs/mount-service-account" - MountPodImageKey = "juicefs/mount-image" - DeleteDelay = "juicefs/mount-delete-delay" - CleanCacheKey = "juicefs/clean-cache" - CachePVC = "juicefs/mount-cache-pvc" - CacheEmptyDir = "juicefs/mount-cache-emptydir" - CacheInlineVolume = "juicefs/mount-cache-inline-volume" - MountPodHostPath = "juicefs/host-path" - - // DeleteDelayTimeKey mount pod annotation - DeleteDelayTimeKey = "juicefs-delete-delay" - DeleteDelayAtKey = "juicefs-delete-at" - - // default value - DefaultMountPodCpuLimit = "2000m" - DefaultMountPodMemLimit = "5Gi" - DefaultMountPodCpuRequest = "1000m" - DefaultMountPodMemRequest = "1Gi" - - // secret labels - JuicefsSecretLabelKey = "juicefs/secret" - - PodInfoName = "csi.storage.k8s.io/pod.name" - PodInfoNamespace = "csi.storage.k8s.io/pod.namespace" -) diff --git a/pkg/config/config.go b/pkg/config/config.go index a2f69e9578..3c7fcf87a0 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -17,7 +17,6 @@ limitations under the License. package config import ( - "context" "encoding/json" "fmt" "hash/fnv" @@ -34,8 +33,6 @@ import ( "k8s.io/klog/v2" corev1 "k8s.io/api/core/v1" - - k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) var ( @@ -78,13 +75,77 @@ var ( JfsMountPath = "/sbin/mount.juicefs" DefaultClientConfPath = "/root/.juicefs" ROConfPath = "/etc/juicefs" - ShutdownSockPath = "/tmp/juicefs-csi-shutdown.sock" - JfsFuseFdPathName = "jfs-fuse-fd" DefaultCEMountImage = "juicedata/mount:ce-nightly" // mount pod ce image, override by ENV DefaultEEMountImage = "juicedata/mount:ee-nightly" // mount pod ee image, override by ENV ) +const ( + // DriverName to be registered + CSINodeLabelKey = "app" + CSINodeLabelValue = "juicefs-csi-node" + PodTypeKey = "app.kubernetes.io/name" + PodTypeValue = "juicefs-mount" + PodUniqueIdLabelKey = "volume-id" + PodJuiceHashLabelKey = "juicefs-hash" + Finalizer = "juicefs.com/finalizer" + JuiceFSUUID = "juicefs-uuid" + UniqueId = "juicefs-uniqueid" + CleanCache = "juicefs-clean-cache" + MountContainerName = "jfs-mount" + JobTypeValue = "juicefs-job" + JfsInsideContainer = "JFS_INSIDE_CONTAINER" + + // CSI Secret + ProvisionerSecretName = "csi.storage.k8s.io/provisioner-secret-name" + ProvisionerSecretNamespace = "csi.storage.k8s.io/provisioner-secret-namespace" + PublishSecretName = "csi.storage.k8s.io/node-publish-secret-name" + PublishSecretNamespace = "csi.storage.k8s.io/node-publish-secret-namespace" + ControllerExpandSecretName = "csi.storage.k8s.io/controller-expand-secret-name" + ControllerExpandSecretNamespace = "csi.storage.k8s.io/controller-expand-secret-namespace" + + // webhook + WebhookName = "juicefs-admission-webhook" + True = "true" + False = "false" + inject = ".juicefs.com/inject" + injectSidecar = ".sidecar" + inject + InjectSidecarDone = "done" + injectSidecar + InjectSidecarDisable = "disable" + injectSidecar + + // config in pv + MountPodCpuLimitKey = "juicefs/mount-cpu-limit" + MountPodMemLimitKey = "juicefs/mount-memory-limit" + MountPodCpuRequestKey = "juicefs/mount-cpu-request" + MountPodMemRequestKey = "juicefs/mount-memory-request" + mountPodLabelKey = "juicefs/mount-labels" + mountPodAnnotationKey = "juicefs/mount-annotations" + mountPodServiceAccount = "juicefs/mount-service-account" + mountPodImageKey = "juicefs/mount-image" + deleteDelay = "juicefs/mount-delete-delay" + cleanCache = "juicefs/clean-cache" + cachePVC = "juicefs/mount-cache-pvc" + cacheEmptyDir = "juicefs/mount-cache-emptydir" + cacheInlineVolume = "juicefs/mount-cache-inline-volume" + mountPodHostPath = "juicefs/host-path" + + // DeleteDelayTimeKey mount pod annotation + DeleteDelayTimeKey = "juicefs-delete-delay" + DeleteDelayAtKey = "juicefs-delete-at" + + // default value + DefaultMountPodCpuLimit = "2000m" + DefaultMountPodMemLimit = "5Gi" + DefaultMountPodCpuRequest = "1000m" + DefaultMountPodMemRequest = "1Gi" + + // secret labels + JuicefsSecretLabelKey = "juicefs/secret" + + PodInfoName = "csi.storage.k8s.io/pod.name" + PodInfoNamespace = "csi.storage.k8s.io/pod.namespace" +) + var interVolumesPrefix = []string{ "rsa-key", "init-config", @@ -358,37 +419,6 @@ func LoadConfig(configPath string) error { return err } -func LoadFromConfigMap(ctx context.Context, client *k8s.K8sClient) error { - cmName := os.Getenv("JUICEFS_CONFIG_NAME") - if cmName == "" { - cmName = "juicefs-csi-driver-config" - } - sysNamespace := os.Getenv("SYS_NAMESPACE") - if sysNamespace == "" { - sysNamespace = "kube-system" - } - cm, err := client.GetConfigMap(ctx, cmName, sysNamespace) - if err != nil { - return err - } - - cfg := newCfg() - - // compatible with old version - if os.Getenv("ENABLE_NODE_SELECTOR") == "1" { - cfg.EnableNodeSelector = true - } - - err = cfg.Unmarshal([]byte(cm.Data["config.yaml"])) - if err != nil { - return err - } - - GlobalConfig = cfg - log.V(1).Info("config loaded", "global config", *GlobalConfig) - return err -} - // ConfigReloader reloads config file when it is updated func StartConfigReloader(configPath string) error { // load first diff --git a/pkg/config/setting.go b/pkg/config/setting.go index edb7c97eb0..104235eda0 100644 --- a/pkg/config/setting.go +++ b/pkg/config/setting.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "path/filepath" "strconv" "strings" "time" @@ -30,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/yaml" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" "github.com/juicedata/juicefs-csi-driver/pkg/util/security" @@ -217,10 +217,10 @@ func ParseSetting(secrets, volCtx map[string]string, options []string, usePod bo jfsSetting.SubPath = volCtx["subPath"] } - if volCtx[common.CleanCacheKey] == "true" { + if volCtx[cleanCache] == "true" { jfsSetting.CleanCache = true } - delay := volCtx[common.DeleteDelay] + delay := volCtx[deleteDelay] if delay != "" { if _, err := time.ParseDuration(delay); err != nil { return nil, fmt.Errorf("can't parse delay time %s", delay) @@ -229,8 +229,8 @@ func ParseSetting(secrets, volCtx map[string]string, options []string, usePod bo } var hostPaths []string - if volCtx[common.MountPodHostPath] != "" { - for _, v := range strings.Split(volCtx[common.MountPodHostPath], ",") { + if volCtx[mountPodHostPath] != "" { + for _, v := range strings.Split(volCtx[mountPodHostPath], ",") { p := strings.TrimSpace(v) if p != "" { hostPaths = append(hostPaths, strings.TrimSpace(v)) @@ -256,8 +256,8 @@ func genCacheDirs(jfsSetting *JfsSetting, volCtx map[string]string) error { cacheDirsInContainer := []string{} var err error // parse pvc of cache - if volCtx != nil && volCtx[common.CachePVC] != "" { - cachePVCs := strings.Split(strings.TrimSpace(volCtx[common.CachePVC]), ",") + if volCtx != nil && volCtx[cachePVC] != "" { + cachePVCs := strings.Split(strings.TrimSpace(volCtx[cachePVC]), ",") for i, pvc := range cachePVCs { if pvc == "" { continue @@ -272,10 +272,10 @@ func genCacheDirs(jfsSetting *JfsSetting, volCtx map[string]string) error { } // parse emptydir of cache if volCtx != nil { - if _, ok := volCtx[common.CacheEmptyDir]; ok { + if _, ok := volCtx[cacheEmptyDir]; ok { volPath := "/var/jfsCache-emptyDir" cacheDirsInContainer = append(cacheDirsInContainer, volPath) - cacheEmptyDirs := strings.Split(strings.TrimSpace(volCtx[common.CacheEmptyDir]), ":") + cacheEmptyDirs := strings.Split(strings.TrimSpace(volCtx[cacheEmptyDir]), ":") var ( medium string sizeLimit string @@ -301,9 +301,9 @@ func genCacheDirs(jfsSetting *JfsSetting, volCtx map[string]string) error { } // parse inline volume of cache if volCtx != nil { - if _, ok := volCtx[common.CacheInlineVolume]; ok { + if _, ok := volCtx[cacheInlineVolume]; ok { inlineVolumes := []*corev1.CSIVolumeSource{} - err = json.Unmarshal([]byte(volCtx[common.CacheInlineVolume]), &inlineVolumes) + err = json.Unmarshal([]byte(volCtx[cacheInlineVolume]), &inlineVolumes) if err != nil { return fmt.Errorf("parse cache inline volume error: %v", err) } @@ -425,22 +425,22 @@ func GenPodAttrWithCfg(setting *JfsSetting, volCtx map[string]string) error { } if volCtx != nil { - if v, ok := volCtx[common.MountPodImageKey]; ok && v != "" { + if v, ok := volCtx[mountPodImageKey]; ok && v != "" { attr.Image = v } - if v, ok := volCtx[common.MountPodServiceAccount]; ok && v != "" { + if v, ok := volCtx[mountPodServiceAccount]; ok && v != "" { attr.ServiceAccountName = v } - cpuLimit := volCtx[common.MountPodCpuLimitKey] - memoryLimit := volCtx[common.MountPodMemLimitKey] - cpuRequest := volCtx[common.MountPodCpuRequestKey] - memoryRequest := volCtx[common.MountPodMemRequestKey] + cpuLimit := volCtx[MountPodCpuLimitKey] + memoryLimit := volCtx[MountPodMemLimitKey] + cpuRequest := volCtx[MountPodCpuRequestKey] + memoryRequest := volCtx[MountPodMemRequestKey] attr.Resources, err = ParsePodResources(cpuLimit, memoryLimit, cpuRequest, memoryRequest) if err != nil { log.Error(err, "Parse resource error") return err } - if v, ok := volCtx[common.MountPodLabelKey]; ok && v != "" { + if v, ok := volCtx[mountPodLabelKey]; ok && v != "" { ctxLabel := make(map[string]string) if err := parseYamlOrJson(v, &ctxLabel); err != nil { return err @@ -449,7 +449,7 @@ func GenPodAttrWithCfg(setting *JfsSetting, volCtx map[string]string) error { attr.Labels[k] = v } } - if v, ok := volCtx[common.MountPodAnnotationKey]; ok && v != "" { + if v, ok := volCtx[mountPodAnnotationKey]; ok && v != "" { ctxAnno := make(map[string]string) if err := parseYamlOrJson(v, &ctxAnno); err != nil { return err @@ -501,19 +501,7 @@ func GenPodAttrWithMountPod(ctx context.Context, client *k8sclient.K8sClient, mo for k, v := range mountPod.Annotations { attr.Annotations[k] = v } - pvName := mountPod.Annotations[common.UniqueId] - - // in `STORAGE_CLASS_SHARE_MOUNT` mode, the uniqueId is the storageClass name - // parse mountpod ref annotation to get the real pv name - // maybe has multiple pv, we need to get the first one - if StorageClassShareMount { - for _, target := range mountPod.Annotations { - if v := getPVNameFromTarget(target); v != "" { - pvName = v - break - } - } - } + pvName := mountPod.Annotations[UniqueId] pv, err := client.GetPersistentVolume(ctx, pvName) if err != nil { log.Error(err, "Get pv error", "pv", pvName) @@ -524,28 +512,23 @@ func GenPodAttrWithMountPod(ctx context.Context, client *k8sclient.K8sClient, mo log.Error(err, "Get pvc error", "namespace", pv.Spec.ClaimRef.Namespace, "name", pv.Spec.ClaimRef.Name) return nil, err } - cpuLimit := pvc.Annotations[common.MountPodCpuLimitKey] - memoryLimit := pvc.Annotations[common.MountPodMemLimitKey] - cpuRequest := pvc.Annotations[common.MountPodCpuRequestKey] - memoryRequest := pvc.Annotations[common.MountPodMemRequestKey] + cpuLimit := pvc.Annotations[MountPodCpuLimitKey] + memoryLimit := pvc.Annotations[MountPodMemLimitKey] + cpuRequest := pvc.Annotations[MountPodCpuRequestKey] + memoryRequest := pvc.Annotations[MountPodMemRequestKey] resources, err := ParsePodResources(cpuLimit, memoryLimit, cpuRequest, memoryRequest) if err != nil { return nil, fmt.Errorf("parse pvc resources error: %v", err) } attr.Resources = resources - mntPath, _, err := util.GetMountPathOfPod(*mountPod) - if err != nil { - return nil, err - } - setting := &JfsSetting{ IsCe: IsCEMountPod(mountPod), PV: pv, PVC: pvc, - Name: mountPod.Annotations[common.JuiceFSUUID], - VolumeId: mountPod.Annotations[common.UniqueId], + Name: mountPod.Annotations[JuiceFSUUID], + VolumeId: mountPod.Annotations[UniqueId], + MountPath: filepath.Join(PodMountBase, pvName) + mountPod.Name[len(mountPod.Name)-7:], Options: pv.Spec.MountOptions, - MountPath: mntPath, } if v, ok := pv.Spec.CSI.VolumeAttributes["subPath"]; ok && v != "" { setting.SubPath = v @@ -568,15 +551,15 @@ func ParseAppInfo(volCtx map[string]string) (*AppInfo, error) { return nil, err } if _, err := kc.GetNodeRunningPods(); err != nil { - if volCtx == nil || volCtx[common.PodInfoName] == "" { + if volCtx == nil || volCtx[PodInfoName] == "" { return nil, fmt.Errorf("can not connect to kubelet, please turn `podInfoOnMount` on in csiDriver, and fallback to apiServer") } } } if volCtx != nil { return &AppInfo{ - Name: volCtx[common.PodInfoName], - Namespace: volCtx[common.PodInfoNamespace], + Name: volCtx[PodInfoName], + Namespace: volCtx[PodInfoNamespace], }, nil } return nil, nil @@ -658,10 +641,10 @@ func ParsePodResources(cpuLimit, memoryLimit, cpuRequest, memoryRequest string) podLimit := map[corev1.ResourceName]resource.Quantity{} podRequest := map[corev1.ResourceName]resource.Quantity{} // set default value - podLimit[corev1.ResourceCPU] = resource.MustParse(common.DefaultMountPodCpuLimit) - podLimit[corev1.ResourceMemory] = resource.MustParse(common.DefaultMountPodMemLimit) - podRequest[corev1.ResourceCPU] = resource.MustParse(common.DefaultMountPodCpuRequest) - podRequest[corev1.ResourceMemory] = resource.MustParse(common.DefaultMountPodMemRequest) + podLimit[corev1.ResourceCPU] = resource.MustParse(DefaultMountPodCpuLimit) + podLimit[corev1.ResourceMemory] = resource.MustParse(DefaultMountPodMemLimit) + podRequest[corev1.ResourceCPU] = resource.MustParse(DefaultMountPodCpuRequest) + podRequest[corev1.ResourceMemory] = resource.MustParse(DefaultMountPodMemRequest) var err error if cpuLimit != "" { if podLimit[corev1.ResourceCPU], err = resource.ParseQuantity(cpuLimit); err != nil { @@ -708,12 +691,12 @@ func ParsePodResources(cpuLimit, memoryLimit, cpuRequest, memoryRequest string) func getDefaultResource() corev1.ResourceRequirements { return corev1.ResourceRequirements{ Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(common.DefaultMountPodCpuLimit), - corev1.ResourceMemory: resource.MustParse(common.DefaultMountPodMemLimit), + corev1.ResourceCPU: resource.MustParse(DefaultMountPodCpuLimit), + corev1.ResourceMemory: resource.MustParse(DefaultMountPodMemLimit), }, Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(common.DefaultMountPodCpuRequest), - corev1.ResourceMemory: resource.MustParse(common.DefaultMountPodMemRequest), + corev1.ResourceCPU: resource.MustParse(DefaultMountPodCpuRequest), + corev1.ResourceMemory: resource.MustParse(DefaultMountPodMemRequest), }, } } @@ -774,17 +757,3 @@ func IsCEMountPod(pod *corev1.Pod) bool { } return false } - -func getPVNameFromTarget(target string) string { - pair := strings.Split(target, "volumes/kubernetes.io~csi") - if len(pair) != 2 { - return "" - } - - pvName := strings.TrimPrefix(pair[1], "/") - index := strings.Index(pvName, "/") - if index <= 0 { - return "" - } - return pvName[:index] -} diff --git a/pkg/config/setting_test.go b/pkg/config/setting_test.go index fc780177b6..fb410f3e28 100644 --- a/pkg/config/setting_test.go +++ b/pkg/config/setting_test.go @@ -24,8 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - - "github.com/juicedata/juicefs-csi-driver/pkg/common" ) var ( @@ -51,12 +49,12 @@ func TestParseSecret(t *testing.T) { } defaultResource := corev1.ResourceRequirements{ Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(common.DefaultMountPodCpuLimit), - corev1.ResourceMemory: resource.MustParse(common.DefaultMountPodMemLimit), + corev1.ResourceCPU: resource.MustParse(DefaultMountPodCpuLimit), + corev1.ResourceMemory: resource.MustParse(DefaultMountPodMemLimit), }, Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(common.DefaultMountPodCpuRequest), - corev1.ResourceMemory: resource.MustParse(common.DefaultMountPodMemRequest), + corev1.ResourceCPU: resource.MustParse(DefaultMountPodCpuRequest), + corev1.ResourceMemory: resource.MustParse(DefaultMountPodMemRequest), }, } @@ -167,7 +165,7 @@ func TestParseSecret(t *testing.T) { name: "test-cpu-limit", args: args{ secrets: map[string]string{"name": "test", "storage": "s3"}, - volCtx: map[string]string{common.MountPodCpuLimitKey: "1"}, + volCtx: map[string]string{MountPodCpuLimitKey: "1"}, usePod: true, }, want: &JfsSetting{ @@ -184,7 +182,7 @@ func TestParseSecret(t *testing.T) { Resources: corev1.ResourceRequirements{ Limits: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse(common.DefaultMountPodMemLimit), + corev1.ResourceMemory: resource.MustParse(DefaultMountPodMemLimit), }, Requests: defaultResource.Requests, }, @@ -200,7 +198,7 @@ func TestParseSecret(t *testing.T) { name: "test-mem-limit", args: args{ secrets: map[string]string{"name": "test", "storage": "s3"}, - volCtx: map[string]string{common.MountPodMemLimitKey: "1G"}, + volCtx: map[string]string{MountPodMemLimitKey: "1G"}, usePod: true, }, want: &JfsSetting{ @@ -217,7 +215,7 @@ func TestParseSecret(t *testing.T) { Resources: corev1.ResourceRequirements{ Limits: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceMemory: resource.MustParse("1G"), - corev1.ResourceCPU: resource.MustParse(common.DefaultMountPodCpuLimit), + corev1.ResourceCPU: resource.MustParse(DefaultMountPodCpuLimit), }, Requests: defaultResource.Requests, }, @@ -233,7 +231,7 @@ func TestParseSecret(t *testing.T) { name: "test-mem-request", args: args{ secrets: map[string]string{"name": "test", "storage": "s3"}, - volCtx: map[string]string{common.MountPodMemRequestKey: "1G"}, + volCtx: map[string]string{MountPodMemRequestKey: "1G"}, usePod: true, }, want: &JfsSetting{ @@ -250,7 +248,7 @@ func TestParseSecret(t *testing.T) { Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceMemory: resource.MustParse("1G"), - corev1.ResourceCPU: resource.MustParse(common.DefaultMountPodCpuRequest), + corev1.ResourceCPU: resource.MustParse(DefaultMountPodCpuRequest), }, Limits: defaultResource.Limits, }, @@ -266,7 +264,7 @@ func TestParseSecret(t *testing.T) { name: "test-cpu-request", args: args{ secrets: map[string]string{"name": "test"}, - volCtx: map[string]string{common.MountPodCpuRequestKey: "1"}, + volCtx: map[string]string{MountPodCpuRequestKey: "1"}, }, want: &JfsSetting{ Name: "test", @@ -280,7 +278,7 @@ func TestParseSecret(t *testing.T) { Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceCPU: resource.MustParse("1"), - corev1.ResourceMemory: resource.MustParse(common.DefaultMountPodMemRequest), + corev1.ResourceMemory: resource.MustParse(DefaultMountPodMemRequest), }, Limits: defaultResource.Limits, }, @@ -296,7 +294,7 @@ func TestParseSecret(t *testing.T) { name: "test-labels", args: args{ secrets: map[string]string{"name": "test"}, - volCtx: map[string]string{common.MountPodLabelKey: "a: b"}, + volCtx: map[string]string{mountPodLabelKey: "a: b"}, }, want: &JfsSetting{ Name: "test", @@ -321,7 +319,7 @@ func TestParseSecret(t *testing.T) { name: "test-labels-error", args: args{ secrets: map[string]string{"name": "test"}, - volCtx: map[string]string{common.MountPodLabelKey: "-"}, + volCtx: map[string]string{mountPodLabelKey: "-"}, }, wantErr: true, }, @@ -329,7 +327,7 @@ func TestParseSecret(t *testing.T) { name: "test-labels-json", args: args{ secrets: map[string]string{"name": "test"}, - volCtx: map[string]string{common.MountPodLabelKey: "{\"a\": \"b\"}"}, + volCtx: map[string]string{mountPodLabelKey: "{\"a\": \"b\"}"}, }, want: &JfsSetting{ Name: "test", @@ -354,7 +352,7 @@ func TestParseSecret(t *testing.T) { name: "test-annotation", args: args{ secrets: map[string]string{"name": "test"}, - volCtx: map[string]string{common.MountPodAnnotationKey: "a: b"}, + volCtx: map[string]string{mountPodAnnotationKey: "a: b"}, }, want: &JfsSetting{ Name: "test", @@ -379,7 +377,7 @@ func TestParseSecret(t *testing.T) { name: "test-annotation-error", args: args{ secrets: map[string]string{"name": "test"}, - volCtx: map[string]string{common.MountPodAnnotationKey: "-"}, + volCtx: map[string]string{mountPodAnnotationKey: "-"}, }, want: nil, wantErr: true, @@ -388,7 +386,7 @@ func TestParseSecret(t *testing.T) { name: "test-serviceaccount", args: args{ secrets: map[string]string{"name": "test", "storage": "s3"}, - volCtx: map[string]string{common.MountPodServiceAccount: "test"}, + volCtx: map[string]string{mountPodServiceAccount: "test"}, usePod: true, }, want: &JfsSetting{ @@ -547,7 +545,7 @@ func TestParseSecret(t *testing.T) { name: "specify mount image", args: args{ secrets: map[string]string{"configs": "a: b", "name": "test"}, - volCtx: map[string]string{common.MountPodImageKey: "abc"}, + volCtx: map[string]string{mountPodImageKey: "abc"}, }, want: &JfsSetting{ Name: "test", @@ -571,7 +569,7 @@ func TestParseSecret(t *testing.T) { name: "specify host path", args: args{ secrets: map[string]string{"name": "test"}, - volCtx: map[string]string{common.MountPodHostPath: "/abc"}, + volCtx: map[string]string{mountPodHostPath: "/abc"}, }, want: &JfsSetting{ Name: "test", @@ -1035,44 +1033,3 @@ func Test_ParseFormatOptions(t *testing.T) { }) } } -func Test_getPVNameFromTarget(t *testing.T) { - tests := []struct { - name string - target string - want string - }{ - { - name: "valid target", - target: "/var/lib/kubelet/pods/abc/volumes/kubernetes.io~csi/pv-name/mount", - want: "pv-name", - }, - { - name: "invalid target - no csi", - target: "/var/lib/kubelet/pods/abc/volumes/kubernetes.io~other/pv-name/mount", - want: "", - }, - { - name: "invalid target - no pv name", - target: "/var/lib/kubelet/pods/abc/volumes/kubernetes.io~csi//mount", - want: "", - }, - { - name: "invalid target", - target: "xxxxxx", - want: "", - }, - { - name: "empty string", - target: "", - want: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := getPVNameFromTarget(tt.target); got != tt.want { - t.Errorf("getPVNameFromTarget() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/controller/app_controller.go b/pkg/controller/app_controller.go index 3f9b4e2f36..0804c32a84 100644 --- a/pkg/controller/app_controller.go +++ b/pkg/controller/app_controller.go @@ -19,7 +19,6 @@ package controller import ( "context" "strings" - "time" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -32,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/juicedata/juicefs-csi-driver/pkg/common" + "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" ) @@ -50,7 +49,7 @@ func NewAppController(client *k8sclient.K8sClient) *AppController { } func (a *AppController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - appCtrlLog.V(1).Info("Receive pod", "name", request.Name, "namespace", request.Namespace) + appCtrlLog.V(1).Info("Receive pod %s %s", "name", request.Name, "namespace", request.Namespace) pod, err := a.K8sClient.GetPod(ctx, request.Name, request.Namespace) if err != nil && !k8serrors.IsNotFound(err) { appCtrlLog.Error(err, "get pod error", "name", request.Name) @@ -65,42 +64,18 @@ func (a *AppController) Reconcile(ctx context.Context, request reconcile.Request appCtrlLog.V(1).Info("pod should not in queue", "name", request.Name) return reconcile.Result{}, nil } - - // get a last terminated container finsh time - // if the time is more than 5 minutes, kill the fuse process - var appContainerExitedTime time.Time - for _, containerStatus := range pod.Status.ContainerStatuses { - if !strings.Contains(containerStatus.Name, common.MountContainerName) { - if containerStatus.State.Terminated != nil { - if containerStatus.State.Terminated.FinishedAt.After(appContainerExitedTime) { - appContainerExitedTime = containerStatus.State.Terminated.FinishedAt.Time - } - } - } - } - - if !appContainerExitedTime.IsZero() && time.Since(appContainerExitedTime) > 5*time.Minute { - appCtrlLog.V(1).Info("app container exited more than 5 minutes, kill the mount process, app pod will enter an error phase") - err = a.killFuseProcesss(ctx, pod) - if err != nil { - appCtrlLog.Error(err, "kill fuse process error", "name", request.Name) - return reconcile.Result{}, err - } - return reconcile.Result{}, nil - } - // umount fuse sidecars - err = a.umountFuseSidecars(ctx, pod) + err = a.umountFuseSidecars(pod) if err != nil { return reconcile.Result{}, err } - return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil + return reconcile.Result{}, nil } -func (a *AppController) umountFuseSidecars(ctx context.Context, pod *corev1.Pod) (err error) { +func (a *AppController) umountFuseSidecars(pod *corev1.Pod) (err error) { for _, cn := range pod.Spec.Containers { - if strings.Contains(cn.Name, common.MountContainerName) { - if e := a.umountFuseSidecar(ctx, pod, cn); e != nil { + if strings.Contains(cn.Name, config.MountContainerName) { + if e := a.umountFuseSidecar(pod, cn); e != nil { return e } } @@ -108,7 +83,7 @@ func (a *AppController) umountFuseSidecars(ctx context.Context, pod *corev1.Pod) return } -func (a *AppController) umountFuseSidecar(ctx context.Context, pod *corev1.Pod, fuseContainer corev1.Container) (err error) { +func (a *AppController) umountFuseSidecar(pod *corev1.Pod, fuseContainer corev1.Container) (err error) { if fuseContainer.Name == "" { return } @@ -116,13 +91,13 @@ func (a *AppController) umountFuseSidecar(ctx context.Context, pod *corev1.Pod, // get prestop if fuseContainer.Lifecycle == nil || fuseContainer.Lifecycle.PreStop == nil || fuseContainer.Lifecycle.PreStop.Exec == nil { - log.Info("no prestop in container of pod", "cnName", common.MountContainerName) + log.Info("no prestop in container of pod", "cnName", config.MountContainerName) return nil } cmd := fuseContainer.Lifecycle.PreStop.Exec.Command - log.Info("exec cmd in container of pod", "command", cmd, "cnName", common.MountContainerName) - stdout, stderr, err := a.K8sClient.ExecuteInContainer(ctx, pod.Name, pod.Namespace, fuseContainer.Name, cmd) + log.Info("exec cmd in container of pod", "command", cmd, "cnName", config.MountContainerName) + stdout, stderr, err := a.K8sClient.ExecuteInContainer(pod.Name, pod.Namespace, fuseContainer.Name, cmd) if err != nil { if strings.Contains(stderr, "not mounted") || strings.Contains(stderr, "mountpoint not found") || @@ -138,33 +113,6 @@ func (a *AppController) umountFuseSidecar(ctx context.Context, pod *corev1.Pod, } return } - -func (a *AppController) killFuseProcesss(ctx context.Context, pod *corev1.Pod) error { - for _, cn := range pod.Spec.Containers { - if strings.Contains(cn.Name, common.MountContainerName) { - if e := a.killFuseProcess(ctx, pod, cn); e != nil { - return e - } - } - } - return nil -} - -func (a *AppController) killFuseProcess(ctx context.Context, pod *corev1.Pod, fuseContainer corev1.Container) error { - if fuseContainer.Name == "" { - return nil - } - log := klog.NewKlogr().WithName("app-ctrl").WithValues("pod", pod.Name, "namespace", pod.Namespace) - cmd := []string{"sh", "-c", "pkill mount.juicefs"} - log.Info("exec cmd in container of pod", "command", cmd, "cnName", common.MountContainerName) - stdout, stderr, err := a.K8sClient.ExecuteInContainer(ctx, pod.Name, pod.Namespace, fuseContainer.Name, cmd) - if err != nil { - return err - } - log.Info("exec cmd result", "stdout", stdout, "stderr", stderr) - return err -} - func (a *AppController) SetupWithManager(mgr ctrl.Manager) error { c, err := controller.New("app", mgr, controller.Options{Reconciler: a}) if err != nil { @@ -229,7 +177,7 @@ func ShouldInQueue(pod *corev1.Pod) bool { log := klog.NewKlogr().WithName("app-ctrl").WithValues("pod", pod.Name, "namespace", pod.Namespace) // ignore if it's not fluid label pod - if util.CheckExpectValue(pod.Labels, common.InjectSidecarDisable, common.True) { + if util.CheckExpectValue(pod.Labels, config.InjectSidecarDisable, config.True) { log.V(1).Info("Sidecar inject disabled in pod in labels, skip.", "labels", pod.Labels) return false } @@ -243,7 +191,7 @@ func ShouldInQueue(pod *corev1.Pod) bool { // ignore if no fuse container exist := false for _, cn := range pod.Spec.Containers { - if strings.Contains(cn.Name, common.MountContainerName) { + if strings.Contains(cn.Name, config.MountContainerName) { exist = true break } @@ -261,7 +209,7 @@ func ShouldInQueue(pod *corev1.Pod) bool { // reconcile if all app containers exit 0 and fuse container not exit for _, containerStatus := range pod.Status.ContainerStatuses { - if !strings.Contains(containerStatus.Name, common.MountContainerName) { + if !strings.Contains(containerStatus.Name, config.MountContainerName) { log.V(1).Info("container status", "container", containerStatus.Name, "status", containerStatus) if containerStatus.State.Terminated == nil { log.V(1).Info("container not exited", "container", containerStatus.Name) @@ -269,7 +217,7 @@ func ShouldInQueue(pod *corev1.Pod) bool { return false } } - if strings.Contains(containerStatus.Name, common.MountContainerName) { + if strings.Contains(containerStatus.Name, config.MountContainerName) { if containerStatus.State.Running == nil { log.V(1).Info("juicefs fuse client in pod not running") return false diff --git a/pkg/controller/app_controller_test.go b/pkg/controller/app_controller_test.go index 2044a5e796..8331ed3c72 100644 --- a/pkg/controller/app_controller_test.go +++ b/pkg/controller/app_controller_test.go @@ -17,7 +17,6 @@ package controller import ( - ctx "context" "fmt" "reflect" "testing" @@ -30,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "github.com/juicedata/juicefs-csi-driver/pkg/common" + "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) @@ -61,7 +60,7 @@ func Test_shouldRequeue(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Labels: map[string]string{ - common.InjectSidecarDone: common.True, + config.InjectSidecarDone: config.True, }, }, Spec: corev1.PodSpec{RestartPolicy: corev1.RestartPolicyAlways}, @@ -76,7 +75,7 @@ func Test_shouldRequeue(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Labels: map[string]string{ - common.InjectSidecarDone: common.True, + config.InjectSidecarDone: config.True, }, }, Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}}}, @@ -90,9 +89,9 @@ func Test_shouldRequeue(t *testing.T) { pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Labels: map[string]string{common.InjectSidecarDone: common.True}, + Labels: map[string]string{config.InjectSidecarDone: config.True}, }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: common.MountContainerName + "-0"}}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: config.MountContainerName + "-0"}}}, Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{ { Name: "app", @@ -103,7 +102,7 @@ func Test_shouldRequeue(t *testing.T) { }, }, { - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", State: corev1.ContainerState{ Running: &corev1.ContainerStateRunning{ StartedAt: metav1.Time{Time: time.Now()}, @@ -121,9 +120,9 @@ func Test_shouldRequeue(t *testing.T) { pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Labels: map[string]string{common.InjectSidecarDone: common.True}, + Labels: map[string]string{config.InjectSidecarDone: config.True}, }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: common.MountContainerName + "-0"}}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: config.MountContainerName + "-0"}}}, Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{ { Name: "app", @@ -135,7 +134,7 @@ func Test_shouldRequeue(t *testing.T) { }, }, { - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", State: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ StartedAt: metav1.Time{Time: time.Now()}, @@ -154,9 +153,9 @@ func Test_shouldRequeue(t *testing.T) { pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Labels: map[string]string{common.InjectSidecarDone: common.True}, + Labels: map[string]string{config.InjectSidecarDone: config.True}, }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: common.MountContainerName + "-0"}}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: config.MountContainerName + "-0"}}}, Status: corev1.PodStatus{ Phase: corev1.PodRunning, ContainerStatuses: []corev1.ContainerStatus{ @@ -170,7 +169,7 @@ func Test_shouldRequeue(t *testing.T) { }, }, { - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", State: corev1.ContainerState{ Running: &corev1.ContainerStateRunning{ StartedAt: metav1.Time{Time: time.Now()}, @@ -188,9 +187,9 @@ func Test_shouldRequeue(t *testing.T) { pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Labels: map[string]string{common.InjectSidecarDone: common.True}, + Labels: map[string]string{config.InjectSidecarDone: config.True}, }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: "app2"}, {Name: common.MountContainerName + "-0"}}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: "app2"}, {Name: config.MountContainerName + "-0"}}}, Status: corev1.PodStatus{ Phase: corev1.PodRunning, ContainerStatuses: []corev1.ContainerStatus{ @@ -209,7 +208,7 @@ func Test_shouldRequeue(t *testing.T) { }}, }, { - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", State: corev1.ContainerState{ Running: &corev1.ContainerStateRunning{ StartedAt: metav1.Time{Time: time.Now()}, @@ -226,9 +225,9 @@ func Test_shouldRequeue(t *testing.T) { pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Labels: map[string]string{common.InjectSidecarDone: common.True}, + Labels: map[string]string{config.InjectSidecarDone: config.True}, }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: "app2"}, {Name: common.MountContainerName + "-0"}}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: "app2"}, {Name: config.MountContainerName + "-0"}}}, Status: corev1.PodStatus{ContainerStatuses: []corev1.ContainerStatus{ { Name: "app", @@ -248,7 +247,7 @@ func Test_shouldRequeue(t *testing.T) { }, }, { - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", State: corev1.ContainerState{ Running: &corev1.ContainerStateRunning{ StartedAt: metav1.Time{Time: time.Now()}, @@ -265,9 +264,9 @@ func Test_shouldRequeue(t *testing.T) { pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Labels: map[string]string{common.InjectSidecarDone: common.True}, + Labels: map[string]string{config.InjectSidecarDone: config.True}, }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: common.MountContainerName + "-0"}}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "app"}, {Name: config.MountContainerName + "-0"}}}, Status: corev1.PodStatus{ Phase: corev1.PodPending, ContainerStatuses: []corev1.ContainerStatus{}}, @@ -289,7 +288,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { Convey("Test umountFuseSidecars", t, func() { Convey("exec pod cmd normal", func() { client := &k8sclient.K8sClient{} - patch1 := ApplyMethod(reflect.TypeOf(client), "ExecuteInContainer", func(_ *k8sclient.K8sClient, c ctx.Context, podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { + patch1 := ApplyMethod(reflect.TypeOf(client), "ExecuteInContainer", func(_ *k8sclient.K8sClient, podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { return "", "", nil }) defer patch1.Reset() @@ -326,7 +325,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", Lifecycle: &corev1.Lifecycle{ PreStop: &corev1.Handler{ Exec: &corev1.ExecAction{Command: []string{"umount"}}, @@ -346,7 +345,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", Lifecycle: &corev1.Lifecycle{ PreStop: &corev1.Handler{ Exec: &corev1.ExecAction{Command: []string{"umount"}}, @@ -354,7 +353,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { }, }, { - Name: common.MountContainerName + "-1", + Name: config.MountContainerName + "-1", Lifecycle: &corev1.Lifecycle{ PreStop: &corev1.Handler{ Exec: &corev1.ExecAction{Command: []string{"umount"}}, @@ -373,7 +372,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { a := &AppController{ K8sClient: client, } - if err := a.umountFuseSidecars(ctx.TODO(), tt.args.pod); (err != nil) != tt.wantErr { + if err := a.umountFuseSidecars(tt.args.pod); (err != nil) != tt.wantErr { t.Errorf("umountFuseSidecars() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -381,7 +380,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { }) Convey("exec pod cmd error", func() { client := &k8sclient.K8sClient{} - patch1 := ApplyMethod(reflect.TypeOf(client), "ExecuteInContainer", func(_ *k8sclient.K8sClient, c ctx.Context, podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { + patch1 := ApplyMethod(reflect.TypeOf(client), "ExecuteInContainer", func(_ *k8sclient.K8sClient, podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { return "", "", fmt.Errorf("exec error") }) defer patch1.Reset() @@ -418,7 +417,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", Lifecycle: &corev1.Lifecycle{ PreStop: &corev1.Handler{ Exec: &corev1.ExecAction{Command: []string{"umount"}}, @@ -438,7 +437,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: common.MountContainerName + "-0", + Name: config.MountContainerName + "-0", Lifecycle: &corev1.Lifecycle{ PreStop: &corev1.Handler{ Exec: &corev1.ExecAction{Command: []string{"umount"}}, @@ -446,7 +445,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { }, }, { - Name: common.MountContainerName + "-1", + Name: config.MountContainerName + "-1", Lifecycle: &corev1.Lifecycle{ PreStop: &corev1.Handler{ Exec: &corev1.ExecAction{Command: []string{"umount"}}, @@ -465,7 +464,7 @@ func TestAppController_umountFuseSidecars(t *testing.T) { a := &AppController{ K8sClient: client, } - if err := a.umountFuseSidecars(ctx.TODO(), tt.args.pod); (err != nil) != tt.wantErr { + if err := a.umountFuseSidecars(tt.args.pod); (err != nil) != tt.wantErr { t.Errorf("umountFuseSidecars() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/pkg/controller/job_controller.go b/pkg/controller/job_controller.go index 35865205df..a2705cbd16 100644 --- a/pkg/controller/job_controller.go +++ b/pkg/controller/job_controller.go @@ -32,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" @@ -86,14 +85,14 @@ func (m *JobController) Reconcile(ctx context.Context, request reconcile.Request needRecycled := false // check csi node exist or not labelSelector := metav1.LabelSelector{ - MatchLabels: map[string]string{common.CSINodeLabelKey: common.CSINodeLabelValue}, + MatchLabels: map[string]string{config.CSINodeLabelKey: config.CSINodeLabelValue}, } fieldSelector := fields.Set{ "spec.nodeName": nodeName, } csiPods, err := m.ListPod(ctx, config.Namespace, &labelSelector, &fieldSelector) if err != nil { - jobCtrlLog.Error(err, "list pod by label and field error", "label", common.CSINodeLabelValue, "node", nodeName) + jobCtrlLog.Error(err, "list pod by label and field error", "label", config.CSINodeLabelValue, "node", nodeName) return reconcile.Result{}, err } if len(csiPods) == 0 { @@ -103,7 +102,7 @@ func (m *JobController) Reconcile(ctx context.Context, request reconcile.Request // if csi node not exist, or job should be recycled itself, delete it if needRecycled || resource.IsJobShouldBeRecycled(job) { - jobCtrlLog.Info("recycle job", "name", job.Name) + jobCtrlLog.Info("recycle job %s", "name", job.Name) err = m.DeleteJob(ctx, job.Name, job.Namespace) if err != nil { jobCtrlLog.Error(err, "delete job error", "name", job.Name) diff --git a/pkg/controller/mount_controller.go b/pkg/controller/mount_controller.go index e5f5f9ab52..37b5178850 100644 --- a/pkg/controller/mount_controller.go +++ b/pkg/controller/mount_controller.go @@ -32,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" @@ -68,7 +67,7 @@ func (m MountController) Reconcile(ctx context.Context, request reconcile.Reques mountCtrlLog.V(1).Info("pod is not deleted", "name", mountPod.Name) return reconcile.Result{}, nil } - if !util.ContainsString(mountPod.GetFinalizers(), common.Finalizer) { + if !util.ContainsString(mountPod.GetFinalizers(), config.Finalizer) { // do nothing return reconcile.Result{}, nil } @@ -76,14 +75,14 @@ func (m MountController) Reconcile(ctx context.Context, request reconcile.Reques // check csi node exist or not nodeName := mountPod.Spec.NodeName labelSelector := metav1.LabelSelector{ - MatchLabels: map[string]string{common.CSINodeLabelKey: common.CSINodeLabelValue}, + MatchLabels: map[string]string{config.CSINodeLabelKey: config.CSINodeLabelValue}, } fieldSelector := fields.Set{ "spec.nodeName": nodeName, } csiPods, err := m.ListPod(ctx, config.Namespace, &labelSelector, &fieldSelector) if err != nil { - mountCtrlLog.Error(err, "list pod by label and field error", "labels", common.CSINodeLabelValue, "node", nodeName) + mountCtrlLog.Error(err, "list pod by label and field error", "labels", config.CSINodeLabelValue, "node", nodeName) return reconcile.Result{}, err } if len(csiPods) > 0 { @@ -93,7 +92,7 @@ func (m MountController) Reconcile(ctx context.Context, request reconcile.Reques mountCtrlLog.Info("csi node did not exist. remove finalizer of pod", "node", nodeName, "name", mountPod.Name) // remove finalizer - err = resource.RemoveFinalizer(ctx, m.K8sClient, mountPod, common.Finalizer) + err = resource.RemoveFinalizer(ctx, m.K8sClient, mountPod, config.Finalizer) if err != nil { mountCtrlLog.Error(err, "remove finalizer of pod error", "name", mountPod.Name) } @@ -116,7 +115,7 @@ func (m *MountController) SetupWithManager(mgr ctrl.Manager) error { mountCtrlLog.V(1).Info("pod is not deleted", "name", pod.Name) return false } - if !util.ContainsString(pod.GetFinalizers(), common.Finalizer) { + if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) { return false } return true @@ -144,7 +143,7 @@ func (m *MountController) SetupWithManager(mgr ctrl.Manager) error { mountCtrlLog.V(1).Info("pod is not deleted", "name", podNew.Name) return false } - if !util.ContainsString(podNew.GetFinalizers(), common.Finalizer) { + if !util.ContainsString(podNew.GetFinalizers(), config.Finalizer) { return false } return true @@ -157,7 +156,7 @@ func (m *MountController) SetupWithManager(mgr ctrl.Manager) error { mountCtrlLog.V(1).Info("pod is not deleted", "name", pod.Name) return false } - if !util.ContainsString(pod.GetFinalizers(), common.Finalizer) { + if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) { // do nothing return false } diff --git a/pkg/controller/mountinfo.go b/pkg/controller/mountinfo.go index 01d24c3428..c7179f3fd5 100644 --- a/pkg/controller/mountinfo.go +++ b/pkg/controller/mountinfo.go @@ -65,7 +65,7 @@ func (mit *mountInfoTable) setPodsStatus(podList *corev1.PodList) { if pod.DeletionTimestamp != nil { deleted = true } - miLog.V(2).Info("set pod deleted status", "name", pod.Name, "deleted status", deleted) + miLog.V(1).Info("set pod deleted status", "name", pod.Name, "deleted status", deleted) mit.deletedPods[string(pod.UID)] = deleted } } @@ -223,10 +223,6 @@ func (ti *targetItem) check(ctx context.Context, mounted bool) { ti.err = err } - if err.Error() == "function timeout" { - ti.status = targetStatusCorrupt - return - } corrupted := k8sMount.IsCorruptedMnt(err) if corrupted { ti.status = targetStatusCorrupt diff --git a/pkg/controller/pod_controller.go b/pkg/controller/pod_controller.go index 19d3ca6f41..64e2332b8f 100644 --- a/pkg/controller/pod_controller.go +++ b/pkg/controller/pod_controller.go @@ -35,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" @@ -80,7 +79,7 @@ func (m *PodController) Reconcile(ctx context.Context, request reconcile.Request labelSelector := metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: common.UniqueId, + Key: config.UniqueId, Operator: metav1.LabelSelectorOpExists, }}, } @@ -105,9 +104,9 @@ func (m *PodController) Reconcile(ctx context.Context, request reconcile.Request podCtrlLog.Error(err, "Driver check pod error", "podName", mountPod.Name) return reconcile.Result{}, err } - if mountPod.Annotations[common.DeleteDelayAtKey] != "" { + if mountPod.Annotations[config.DeleteDelayAtKey] != "" { // if mount pod set delay deleted, requeue after delay time - delayAtStr := mountPod.Annotations[common.DeleteDelayAtKey] + delayAtStr := mountPod.Annotations[config.DeleteDelayAtKey] delayAt, err := util.GetTime(delayAtStr) if err != nil { return reconcile.Result{}, err diff --git a/pkg/controller/pod_driver.go b/pkg/controller/pod_driver.go index 722eaa6f5d..6cca8f9a8f 100644 --- a/pkg/controller/pod_driver.go +++ b/pkg/controller/pod_driver.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "os" - "path" "runtime" "strings" "time" @@ -28,15 +27,13 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" "k8s.io/utils/mount" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" + "github.com/juicedata/juicefs-csi-driver/pkg/fuse" podmount "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount/builder" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -74,7 +71,6 @@ func newPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) driver.handlers[podError] = driver.podErrorHandler driver.handlers[podPending] = driver.podPendingHandler driver.handlers[podDeleted] = driver.podDeletedHandler - driver.handlers[podComplete] = driver.podCompleteHandler return driver } @@ -86,11 +82,10 @@ type Result struct { } const ( - podReady podStatus = "podReady" - podError podStatus = "podError" - podDeleted podStatus = "podDeleted" - podPending podStatus = "podPending" - podComplete podStatus = "podComplete" + podReady podStatus = "podReady" + podError podStatus = "podError" + podDeleted podStatus = "podDeleted" + podPending podStatus = "podPending" ) func (p *PodDriver) SetMountInfo(mit mountInfoTable) { @@ -134,9 +129,6 @@ func getPodStatus(pod *corev1.Pod) podStatus { if pod.DeletionTimestamp != nil { return podDeleted } - if resource.IsPodComplete(pod) { - return podComplete - } if resource.IsPodError(pod) { return podError } @@ -152,7 +144,7 @@ func getPodStatus(pod *corev1.Pod) podStatus { func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error { log := util.GenLog(ctx, podDriverLog, "") // check refs in mount pod, the corresponding pod exists or not - hashVal := pod.Labels[common.PodJuiceHashLabelKey] + hashVal := pod.Labels[config.PodJuiceHashLabelKey] if hashVal == "" { return fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) } @@ -177,8 +169,8 @@ func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error } } - if existTargets != 0 && pod.Annotations[common.DeleteDelayAtKey] != "" { - delAnnotations = append(delAnnotations, common.DeleteDelayAtKey) + if existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != "" { + delAnnotations = append(delAnnotations, config.DeleteDelayAtKey) } if len(delAnnotations) != 0 { // check mount pod reference key, if it is not the latest, return conflict @@ -233,67 +225,13 @@ func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error return nil } -func (p *PodDriver) podCompleteHandler(ctx context.Context, pod *corev1.Pod) (Result, error) { - if pod == nil { - return Result{}, nil - } - log := util.GenLog(ctx, podDriverLog, "podCompleteHandler") - hashVal := pod.Labels[common.PodJuiceHashLabelKey] - if hashVal == "" { - return Result{}, fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) - } - lock := config.GetPodLock(hashVal) - lock.Lock() - defer lock.Unlock() - - needCreate, err := p.needCreateMountPod(ctx, pod.Labels[common.PodUniqueIdLabelKey], hashVal) - if err != nil { - return Result{}, err - } - if needCreate { - newPodName := podmount.GenPodNameByUniqueId(pod.Labels[common.PodUniqueIdLabelKey], true) - log.Info("need to create a new one", "newPodName", newPodName) - newPod, err := p.newMountPod(ctx, pod, newPodName) - if err != nil { - return Result{}, err - } - // get sid - sid := passfd.GlobalFds.GetSid(hashVal) - if sid != 0 { - env := []corev1.EnvVar{} - oldEnv := newPod.Spec.Containers[0].Env - for _, v := range oldEnv { - if v.Name != "_JFS_META_SID" { - env = append(env, v) - } - } - env = append(env, corev1.EnvVar{ - Name: "_JFS_META_SID", - Value: fmt.Sprintf("%d", sid), - }) - newPod.Spec.Containers[0].Env = env - } - - _, err = p.Client.CreatePod(ctx, newPod) - if err != nil { - log.Error(err, "Create pod") - return Result{}, err - } - } - - // delete the old one - log.Info("delete the old complete mount pod") - err = p.Client.DeletePod(ctx, pod) - return Result{}, err -} - // podErrorHandler handles mount pod error status func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Result, error) { if pod == nil { return Result{}, nil } log := util.GenLog(ctx, podDriverLog, "podErrorHandler") - hashVal := pod.Labels[common.PodJuiceHashLabelKey] + hashVal := pod.Labels[config.PodJuiceHashLabelKey] if hashVal == "" { return Result{}, fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) } @@ -306,7 +244,7 @@ func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Resul log.Info("Pod failed because of resource.") if resource.IsPodHasResource(*pod) { // if pod is failed because of resource, delete resource and deploy pod again. - _ = resource.RemoveFinalizer(ctx, p.Client, pod, common.Finalizer) + _ = resource.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer) log.Info("Delete it and deploy again with no resource.") if err := p.Client.DeletePod(ctx, pod); err != nil { log.Error(err, "delete pod err") @@ -346,7 +284,7 @@ func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) (Resul }, Spec: pod.Spec, } - controllerutil.AddFinalizer(newPod, common.Finalizer) + controllerutil.AddFinalizer(newPod, config.Finalizer) resource.DeleteResourceOfPod(newPod) err := mkrMp(ctx, *newPod) if err != nil { @@ -374,14 +312,14 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res log.Info("Pod is to be deleted.") // pod with no finalizer - if !util.ContainsString(pod.GetFinalizers(), common.Finalizer) { + if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) { log.V(1).Info("Pod has no finalizer, skip deleting") // do nothing return Result{}, nil } // remove finalizer of pod - if err := resource.RemoveFinalizer(ctx, p.Client, pod, common.Finalizer); err != nil { + if err := resource.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil { log.Error(err, "remove pod finalizer error") return Result{}, err } @@ -395,7 +333,7 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res } // get mount point - sourcePath, _, err := util.GetMountPathOfPod(*pod) + sourcePath, _, err := resource.GetMountPathOfPod(*pod) if err != nil { log.Error(err, "get mount point error") return Result{}, err @@ -406,9 +344,10 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res if pod.Annotations == nil { return Result{}, nil } + annotation := pod.Annotations existTargets := make(map[string]string) - hashVal := pod.Labels[common.PodJuiceHashLabelKey] + hashVal := pod.Labels[config.PodJuiceHashLabelKey] if hashVal == "" { return Result{}, fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) } @@ -422,10 +361,7 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res if len(existTargets) == 0 { // do not need to create new one, umount - _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { - util.UmountPath(ctx, sourcePath) - return nil - }) + util.UmountPath(ctx, sourcePath) // clean mount point err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { log.Info("Clean mount point", "mountPath", sourcePath) @@ -437,7 +373,7 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res // cleanup cache should always complete, don't set timeout go p.CleanUpCache(context.TODO(), pod) // stop fuse fd and clean up socket - go passfd.GlobalFds.StopFd(context.TODO(), hashVal) + go fuse.GlobalFds.StopFd(context.TODO(), hashVal) return Result{}, nil } @@ -446,44 +382,88 @@ func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) (Res defer lock.Unlock() // create - log.Info("pod targetPath not empty, need to create pod") + log.Info("pod targetPath not empty, need create pod") // check pod delete - _, err = p.Client.GetPod(ctx, pod.Name, pod.Namespace) - if err == nil || apierrors.IsNotFound(err) { - needCreate, err := p.needCreateMountPod(ctx, pod.Labels[common.PodUniqueIdLabelKey], hashVal) - if err != nil { - return Result{}, err + for { + po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace) + if err == nil && po.DeletionTimestamp != nil { + log.V(1).Info("pod is being deleted, waiting") + time.Sleep(time.Millisecond * 500) + continue } - if needCreate { - // create pod - newPodName := podmount.GenPodNameByUniqueId(pod.Labels[common.PodUniqueIdLabelKey], true) - log.Info("need to create a new one", "newPodName", newPodName) - // delete tmp file - log.Info("delete tmp state file because it is not smoothly upgrade") - _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { - return os.Remove(path.Join("/tmp", hashVal, "state1.json")) - }) - newPod, err := p.newMountPod(ctx, pod, newPodName) - if err == nil { + if err != nil { + if apierrors.IsTimeout(err) { + break + } + if apierrors.IsNotFound(err) { + // create pod + oldSupportFusePass := util.SupportFusePass(pod.Spec.Containers[0].Image) + var newPod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: pod.Namespace, + Labels: pod.Labels, + Annotations: annotation, + }, + Spec: pod.Spec, + } + controllerutil.AddFinalizer(newPod, config.Finalizer) + log.Info("Need to create pod") + if err := p.applyConfigPatch(ctx, newPod); err != nil { + log.Error(err, "apply config patch error, will ignore") + } + if !util.SupportFusePass(newPod.Spec.Containers[0].Image) { + if oldSupportFusePass { + // old image support fuse pass and new image do not support, stop fd in csi + fuse.GlobalFds.StopFd(ctx, hashVal) + } + // umount mount point before recreate mount pod + err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { + exist, _ := mount.PathExists(sourcePath) + if !exist { + return fmt.Errorf("%s not exist", sourcePath) + } + return nil + }) + if err == nil { + log.Info("start to umount: %s", "mountPath", sourcePath) + _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { + util.UmountPath(ctx, sourcePath) + return nil + }) + } + } + err = mkrMp(ctx, *newPod) + if err != nil { + log.Error(err, "mkdir mount point of pod") + } _, err = p.Client.CreatePod(ctx, newPod) if err != nil { log.Error(err, "Create pod") } + return Result{RequeueImmediately: true}, err } - return Result{RequeueImmediately: true}, err + log.Error(err, "Get pod error") + return Result{}, nil } - } - if err != nil { - if apierrors.IsTimeout(err) { - err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace) - log.Error(err, "delete pod error") - return Result{}, err + + // pod is created elsewhere + if po.Annotations == nil { + po.Annotations = make(map[string]string) + } + for k, v := range existTargets { + // add exist target in annotation + po.Annotations[k] = v + } + if err := resource.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil { + log.Error(err, "Update pod error") } - log.Error(err, "Get pod error") return Result{}, err } - return Result{}, nil + err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace) + log.Error(err, "delete pod error") + return Result{}, err } // podPendingHandler handles mount pod that is pending @@ -492,7 +472,7 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res return Result{}, nil } log := util.GenLog(ctx, podDriverLog, "podPendingHandler") - hashVal := pod.Labels[common.PodJuiceHashLabelKey] + hashVal := pod.Labels[config.PodJuiceHashLabelKey] if hashVal == "" { return Result{}, fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) } @@ -505,7 +485,7 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res log.Info("Pod failed because of resource.") if resource.IsPodHasResource(*pod) { // if pod is failed because of resource, delete resource and deploy pod again. - _ = resource.RemoveFinalizer(ctx, p.Client, pod, common.Finalizer) + _ = resource.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer) log.Info("Delete it and deploy again with no resource.") if err := p.Client.DeletePod(ctx, pod); err != nil { log.Error(err, "delete pod error") @@ -545,7 +525,7 @@ func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) (Res }, Spec: pod.Spec, } - controllerutil.AddFinalizer(newPod, common.Finalizer) + controllerutil.AddFinalizer(newPod, config.Finalizer) resource.DeleteResourceOfPod(newPod) err := mkrMp(ctx, *newPod) if err != nil { @@ -573,7 +553,7 @@ func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Resul return Result{}, nil } // get mount point - mntPath, _, err := util.GetMountPathOfPod(*pod) + mntPath, _, err := resource.GetMountPathOfPod(*pod) if err != nil { log.Error(err, "get mount point error") return Result{}, err @@ -584,7 +564,7 @@ func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Resul defer lock.Unlock() supFusePass := util.SupportFusePass(pod.Spec.Containers[0].Image) - podHashVal := pod.Labels[common.PodJuiceHashLabelKey] + podHashVal := pod.Labels[config.PodJuiceHashLabelKey] err = resource.WaitUtilMountReady(ctx, pod.Name, mntPath, defaultCheckoutTimeout) if err != nil { @@ -592,7 +572,7 @@ func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) (Resul log.Error(err, "pod is not ready within 60s") // mount pod hang probably, close fd and delete it log.Info("close fd and delete pod") - passfd.GlobalFds.CloseFd(podHashVal) + fuse.GlobalFds.CloseFd(podHashVal) // umount it _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { util.UmountPath(ctx, mntPath) @@ -686,10 +666,7 @@ func (p *PodDriver) recoverTarget(ctx context.Context, podName, sourcePath strin } log.Info("recover volPath", "target", ti.target, "mountPath", sourcePath) mountOption := []string{"bind"} - err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { - return p.Mount(sourcePath, ti.target, "none", mountOption) - }) - if err != nil { + if err := p.Mount(sourcePath, ti.target, "none", mountOption); err != nil { ms := fmt.Sprintf("exec cmd: mount -o bind %s %s err:%v", sourcePath, ti.target, err) log.Error(err, "bind mount error") return fmt.Errorf(ms) @@ -741,21 +718,18 @@ func (p *PodDriver) umountTargetUntilRemain(ctx context.Context, basemi *mountIt return nil } - _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { - util.UmountPath(subCtx, target) - return nil - }) + util.UmountPath(subCtx, target) } } // CleanUpCache clean up cache func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) { log := util.GenLog(ctx, podDriverLog, "CleanUpCache") - if pod.Annotations[common.CleanCache] != "true" { + if pod.Annotations[config.CleanCache] != "true" { return } - uuid := pod.Annotations[common.JuiceFSUUID] - uniqueId := pod.Annotations[common.UniqueId] + uuid := pod.Annotations[config.JuiceFSUUID] + uniqueId := pod.Annotations[config.UniqueId] if uuid == "" && uniqueId == "" { // no necessary info, return log.Info("Can't get uuid and uniqueId from pod annotation. skip cache clean.") @@ -839,7 +813,7 @@ func (p *PodDriver) checkMountPodStuck(pod *corev1.Pod) { return } log := klog.NewKlogr().WithName("abortFuse").WithValues("podName", pod.Name) - mountPoint, _, _ := util.GetMountPathOfPod(*pod) + mountPoint, _, _ := resource.GetMountPathOfPod(*pod) defer func() { if runtime.GOOS == "linux" { util.DevMinorTableDelete(mountPoint) @@ -937,7 +911,7 @@ func mkrMp(ctx context.Context, pod corev1.Pod) error { // get mount point var mntPath string var err error - mntPath, _, err = util.GetMountPathOfPod(pod) + mntPath, _, err = resource.GetMountPathOfPod(pod) if err != nil { log.Error(err, "get mount point error") return err @@ -954,76 +928,3 @@ func mkrMp(ctx context.Context, pod corev1.Pod) error { } return nil } - -func (p *PodDriver) needCreateMountPod(ctx context.Context, uniqueId, hashVal string) (bool, error) { - log := util.GenLog(ctx, podDriverLog, "needCreate") - labelSelector := &metav1.LabelSelector{MatchLabels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodUniqueIdLabelKey: uniqueId, - common.PodJuiceHashLabelKey: hashVal, - }} - fieldSelector := &fields.Set{"spec.nodeName": config.NodeName} - pods, err := p.Client.ListPod(ctx, config.Namespace, labelSelector, fieldSelector) - if err != nil { - log.Error(err, "List pod error") - return false, err - } - needCreate := true - for _, po := range pods { - if po.DeletionTimestamp == nil && !resource.IsPodComplete(&po) { - needCreate = false - } - } - return needCreate, nil -} - -func (p *PodDriver) newMountPod(ctx context.Context, pod *corev1.Pod, newPodName string) (*corev1.Pod, error) { - log := util.GenLog(ctx, podDriverLog, "newMountPod") - hashVal := pod.Labels[common.PodJuiceHashLabelKey] - // get mount point - sourcePath, _, err := util.GetMountPathOfPod(*pod) - if err != nil { - log.Error(err, "get mount point error") - return nil, err - } - oldSupportFusePass := util.SupportFusePass(pod.Spec.Containers[0].Image) - var newPod = &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: newPodName, - Namespace: pod.Namespace, - Labels: pod.Labels, - Annotations: pod.Annotations, - }, - Spec: pod.Spec, - } - controllerutil.AddFinalizer(newPod, common.Finalizer) - if err := p.applyConfigPatch(ctx, newPod); err != nil { - log.Error(err, "apply config patch error, will ignore") - } - if !util.SupportFusePass(newPod.Spec.Containers[0].Image) { - if oldSupportFusePass { - // old image support fuse pass and new image do not support, stop fd in csi - passfd.GlobalFds.StopFd(ctx, hashVal) - } - // umount mount point before recreate mount pod - err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { - exist, _ := mount.PathExists(sourcePath) - if !exist { - return fmt.Errorf("%s not exist", sourcePath) - } - return nil - }) - if err == nil { - log.Info("start to umount", "mountPath", sourcePath) - _ = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error { - util.UmountPath(ctx, sourcePath) - return nil - }) - } - } - err = mkrMp(ctx, *newPod) - if err != nil { - log.Error(err, "mkdir mount point of pod") - } - return newPod, nil -} diff --git a/pkg/controller/pod_driver_test.go b/pkg/controller/pod_driver_test.go index a57fadd44f..7f75513600 100644 --- a/pkg/controller/pod_driver_test.go +++ b/pkg/controller/pod_driver_test.go @@ -37,11 +37,12 @@ import ( k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" - "github.com/juicedata/juicefs-csi-driver/pkg/common" + jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/driver/mocks" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" + "github.com/juicedata/juicefs-csi-driver/pkg/fuse" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" + jfsResource "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" ) var ( @@ -65,9 +66,9 @@ var readyPod = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey(target): target}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -100,9 +101,9 @@ var errCmdPod = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -119,10 +120,10 @@ var deletedPod = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{Image: "juicedata/mount:ce-v1.2.1"}}, @@ -152,9 +153,9 @@ var errorPod1 = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -195,9 +196,9 @@ var resourceErrPod = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -238,9 +239,9 @@ var errorPod2 = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{Image: "juicedata/mount:ce-v1.2.1"}}, @@ -273,9 +274,9 @@ var errorPod3 = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{Image: "juicedata/mount:ce-v1.2.1"}}, @@ -299,9 +300,9 @@ var pendingPod = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{Image: "juicedata/mount:ce-v1.2.1"}}, @@ -325,9 +326,9 @@ var runningPod = &corev1.Pod{ Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, Labels: map[string]string{ - common.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", + jfsConfig.PodJuiceHashLabelKey: "e11ef7a140d2e8bac9c75b1c44dcba22954402edc5015a8eae931d389b82db9", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{Image: "juicedata/mount:ce-v1.2.1"}}, @@ -467,7 +468,7 @@ func genMountInfos() []mount.MountInfo { func TestPodDriver_podReadyHandler(t *testing.T) { defer func() { _ = os.RemoveAll("tmp") }() - passfd.InitTestFds() + fuse.InitTestFds() Convey("Test pod ready handler", t, FailureContinues, func() { Convey("pod ready add need recovery ", func() { d := NewPodDriver(&k8sclient.K8sClient{Interface: fake.NewSimpleClientset()}, mount.SafeFormatAndMount{ @@ -582,7 +583,7 @@ func TestPodDriver_podReadyHandler(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-err-pod", Annotations: nil, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -610,7 +611,7 @@ func TestPodDriver_podReadyHandler(t *testing.T) { Name: "juicefs-test-err-mount-cmd-pod", Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -693,7 +694,7 @@ func TestPodDriver_podReadyHandler(t *testing.T) { Name: "juicefs-test-err-mount-cmd-pod", Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -713,7 +714,7 @@ func TestPodDriver_podDeletedHandler(t *testing.T) { Convey("Test pod delete handler", t, func() { Convey("umount fail", func() { var tmpCmd = &exec.Cmd{} - patch1 := ApplyFunc(util.GetMountPathOfPod, func(pod corev1.Pod) (string, string, error) { + patch1 := ApplyFunc(jfsResource.GetMountPathOfPod, func(pod corev1.Pod) (string, string, error) { return "/test", "test", nil }) defer patch1.Reset() @@ -757,7 +758,7 @@ func TestPodDriver_podDeletedHandler(t *testing.T) { }) Convey("new pod create", func() { var tmpCmd = &exec.Cmd{} - patch1 := ApplyFunc(util.GetMountPathOfPod, func(pod corev1.Pod) (string, string, error) { + patch1 := ApplyFunc(jfsResource.GetMountPathOfPod, func(pod corev1.Pod) (string, string, error) { return "/test", "test", nil }) defer patch1.Reset() @@ -974,7 +975,7 @@ func TestPodDriver_podErrorHandler(t *testing.T) { default: return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{common.Finalizer}, + Finalizers: []string{jfsConfig.Finalizer}, }, }, nil } diff --git a/pkg/controller/reconciler.go b/pkg/controller/reconciler.go index 87b36939c3..f0aa7fd7c0 100644 --- a/pkg/controller/reconciler.go +++ b/pkg/controller/reconciler.go @@ -29,7 +29,6 @@ import ( k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) @@ -107,7 +106,7 @@ func doReconcile(ks *k8sclient.K8sClient, kc *k8sclient.KubeletClient) { continue } // check label - if value, ok := pod.Labels[common.PodTypeKey]; !ok || value != common.PodTypeValue { + if value, ok := pod.Labels[config.PodTypeKey]; !ok || value != config.PodTypeValue { continue } crtPodStatus := getPodStatus(pod) diff --git a/pkg/controller/secret_controller.go b/pkg/controller/secret_controller.go index 7dac30df2c..9e01481707 100644 --- a/pkg/controller/secret_controller.go +++ b/pkg/controller/secret_controller.go @@ -34,7 +34,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -58,7 +57,7 @@ func checkAndCleanOrphanSecret(ctx context.Context, client *k8sclient.K8sClient, } // new version of juicefs-csi-driver has a label to identify the secret // no need to manual clean up - if _, ok := secrets.Labels[common.JuicefsSecretLabelKey]; ok { + if _, ok := secrets.Labels[config.JuicefsSecretLabelKey]; ok { return nil } if !strings.HasPrefix(secrets.Name, "juicefs-") || !strings.HasSuffix(secrets.Name, "-secret") { diff --git a/pkg/dashboard/api.go b/pkg/dashboard/api.go index fb1d1d3344..8e2cc298d4 100644 --- a/pkg/dashboard/api.go +++ b/pkg/dashboard/api.go @@ -78,14 +78,12 @@ func (api *API) Handle(group *gin.RouterGroup) { group.PUT("/config", api.putCSIConfig()) podGroup := group.Group("/pod/:namespace/:name", api.getPodMiddileware()) podGroup.GET("/", api.getPodHandler()) - podGroup.GET("/latestimage", api.getPodLatestImage()) podGroup.GET("/events", api.getPodEvents()) podGroup.GET("/logs/:container", api.getPodLogs()) podGroup.GET("/pvs", api.listPodPVsHandler()) podGroup.GET("/pvcs", api.listPodPVCsHandler()) podGroup.GET("/mountpods", api.listMountPodsOfAppPod()) podGroup.GET("/apppods", api.listAppPodsOfMountPod()) - podGroup.GET("/csi-nodes", api.listCSINodePod()) podGroup.GET("/node", api.getPodNode()) podGroup.GET("/downloadDebugFile", api.downloadDebugFile()) pvGroup := group.Group("/pv/:name", api.getPVMiddileware()) @@ -105,7 +103,6 @@ func (api *API) Handle(group *gin.RouterGroup) { // only for mountpod websocketAPI.GET("/pod/:namespace/:name/:container/accesslog", api.watchMountPodAccessLog()) websocketAPI.GET("/pod/:namespace/:name/:container/debug", api.debugPod()) - websocketAPI.GET("/pod/:namespace/:name/upgrade", api.smoothUpgrade()) websocketAPI.GET("/pod/:namespace/:name/:container/warmup", api.warmupPod()) websocketAPI.GET("/pod/:namespace/:name/:container/exec", api.execPod()) } diff --git a/pkg/dashboard/cm.go b/pkg/dashboard/cm.go index 4a2233211e..b1ea773c89 100644 --- a/pkg/dashboard/cm.go +++ b/pkg/dashboard/cm.go @@ -59,29 +59,6 @@ func (api *API) putCSIConfig() gin.HandlerFunc { c.JSON(500, gin.H{"error": err.Error()}) return } - s, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/name": "juicefs-csi-driver", - "app": "juicefs-csi-node", - }, - }) - if err != nil { - c.String(500, "parse label selector error %v", err) - return - } - csiNodeList, err := api.client.CoreV1().Pods(api.sysNamespace).List(c, metav1.ListOptions{LabelSelector: s.String()}) - if err != nil { - c.String(500, "list csi node error %v", err) - return - } - for _, pod := range csiNodeList.Items { - pod.Annotations["juicefs/update-time"] = metav1.Now().Format("2006-01-02T15:04:05Z") - _, err = api.client.CoreV1().Pods(api.sysNamespace).Update(c, &pod, metav1.UpdateOptions{}) - if err != nil { - c.JSON(500, gin.H{"error": err.Error()}) - return - } - } c.JSON(200, cm) } } diff --git a/pkg/dashboard/controller.go b/pkg/dashboard/controller.go index 2c329d2e6f..79635a66db 100644 --- a/pkg/dashboard/controller.go +++ b/pkg/dashboard/controller.go @@ -24,7 +24,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -117,13 +116,6 @@ func (c *PodController) SetupWithManager(mgr manager.Manager) error { return err } - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, "spec.nodeName", func(rawObj client.Object) []string { - pod := rawObj.(*corev1.Pod) - return []string{pod.Spec.NodeName} - }); err != nil { - return err - } - return ctr.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return true diff --git a/pkg/dashboard/pod.go b/pkg/dashboard/pod.go index ef77e650fb..f8e430d863 100644 --- a/pkg/dashboard/pod.go +++ b/pkg/dashboard/pod.go @@ -34,10 +34,7 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" - "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" - "github.com/juicedata/juicefs-csi-driver/pkg/util" "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" ) @@ -283,13 +280,6 @@ func (api *API) listMountPod() gin.HandlerFunc { func (api *API) listCSINodePod() gin.HandlerFunc { return func(c *gin.Context) { - var targetPod *corev1.Pod - if v, ok := c.Get("pod"); ok { - targetPod = v.(*corev1.Pod) - } - if targetPod.Labels["app.kubernetes.io/name"] == "juicefs-csi-driver" { - return - } var pods corev1.PodList s, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -301,15 +291,8 @@ func (api *API) listCSINodePod() gin.HandlerFunc { c.String(500, "parse label selector error %v", err) return } - var fieldSelector fields.Selector - if targetPod != nil { - fieldSelector = fields.SelectorFromSet(fields.Set{ - "spec.nodeName": targetPod.Spec.NodeName, - }) - } err = api.cachedReader.List(c, &pods, &client.ListOptions{ LabelSelector: s, - FieldSelector: fieldSelector, }) if err != nil { c.String(500, "list pods error %v", err) @@ -389,37 +372,6 @@ func (api *API) getPodHandler() gin.HandlerFunc { } } -func (api *API) getPodLatestImage() gin.HandlerFunc { - return func(c *gin.Context) { - po, ok := c.Get("pod") - if !ok { - c.String(404, "not found") - return - } - rawPod := po.(*corev1.Pod) - // gen k8s client - k8sClient, err := k8sclient.NewClientWithConfig(api.kubeconfig) - if err != nil { - c.String(500, "Could not create k8s client: %v", err) - return - } - if rawPod.Labels[common.PodTypeKey] != common.PodTypeValue { - c.String(400, "pod %s is not a mount pod", rawPod.Name) - return - } - if err := config.LoadFromConfigMap(c, k8sClient); err != nil { - c.String(500, "Load config from configmap error: %v", err) - return - } - attr, err := config.GenPodAttrWithMountPod(c, k8sClient, rawPod) - if err != nil { - c.String(500, "generate pod attribute error: %v", err) - return - } - c.IndentedJSON(200, attr.Image) - } -} - func (api *API) getPodEvents() gin.HandlerFunc { return func(c *gin.Context) { p, ok := c.Get("pod") @@ -571,28 +523,6 @@ func (api *API) listMountPodOf(ctx context.Context, pod *corev1.Pod) ([]*corev1. return mountPods, nil } -func (api *API) listMountPodOfCSINode(ctx context.Context, csiNode *corev1.Pod) ([]corev1.Pod, error) { - var mountPods corev1.PodList - s, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/name": "juicefs-mount", - }, - }) - if err != nil { - return nil, err - } - err = api.cachedReader.List(ctx, &mountPods, &client.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{ - "spec.nodeName": csiNode.Spec.NodeName, - }), - LabelSelector: s, - }) - if err != nil { - return nil, err - } - return mountPods.Items, nil -} - func (api *API) listMountPodsOfAppPod() gin.HandlerFunc { return func(c *gin.Context) { obj, ok := c.Get("pod") @@ -601,15 +531,6 @@ func (api *API) listMountPodsOfAppPod() gin.HandlerFunc { return } pod := obj.(*corev1.Pod) - if isCsiNode(pod) { - mountPods, err := api.listMountPodOfCSINode(c, pod) - if err != nil { - c.String(500, "list mount pods error %v", err) - return - } - c.IndentedJSON(200, mountPods) - return - } pods, err := api.listMountPodOf(c, pod) if err != nil { c.String(500, "list mount pods error %v", err) @@ -880,7 +801,7 @@ func (api *API) watchMountPodAccessLog() gin.HandlerFunc { podLog.Error(err, "Failed to get mount pod") return } - mntPath, _, err := util.GetMountPathOfPod(*mountpod) + mntPath, _, err := resource.GetMountPathOfPod(*mountpod) if err != nil || mntPath == "" { podLog.Error(err, "Failed to get mount path") return @@ -913,7 +834,7 @@ func (api *API) debugPod() gin.HandlerFunc { podLog.Error(err, "Failed to get mount pod") return } - mntPath, _, err := util.GetMountPathOfPod(*mountpod) + mntPath, _, err := resource.GetMountPathOfPod(*mountpod) if err != nil || mntPath == "" { podLog.Error(err, "Failed to get mount path") return @@ -958,7 +879,7 @@ func (api *API) warmupPod() gin.HandlerFunc { return } rootPath := "" - volumeId := mountpod.Labels[common.PodUniqueIdLabelKey] + volumeId := mountpod.Labels[config.PodUniqueIdLabelKey] var pv corev1.PersistentVolume if err := api.cachedReader.Get(ctx, api.sysNamespaced(volumeId), &pv); err == nil { if pv.Spec.CSI != nil && pv.Spec.CSI.VolumeAttributes != nil { @@ -968,7 +889,7 @@ func (api *API) warmupPod() gin.HandlerFunc { } } - mntPath, _, err := util.GetMountPathOfPod(*mountpod) + mntPath, _, err := resource.GetMountPathOfPod(*mountpod) if err != nil || mntPath == "" { klog.Error("Failed to get mount path: ", err) return @@ -999,7 +920,7 @@ func (api *API) downloadDebugFile() gin.HandlerFunc { return func(c *gin.Context) { namespace := c.Param("namespace") name := c.Param("name") - container := common.MountContainerName + container := config.MountContainerName c.Header("Content-Disposition", "attachment; filename="+namespace+"_"+name+"_"+"debug.zip") err := resource.DownloadPodFile( api.client, api.kubeconfig, c.Writer, namespace, name, container, @@ -1010,44 +931,3 @@ func (api *API) downloadDebugFile() gin.HandlerFunc { } } } - -func (api *API) smoothUpgrade() gin.HandlerFunc { - return func(c *gin.Context) { - namespace := c.Param("namespace") - name := c.Param("name") - - mountpod, err := api.client.CoreV1().Pods(namespace).Get(c, name, metav1.GetOptions{}) - if err != nil { - klog.Error("Failed to get mount pod: ", err) - return - } - recreate := c.Query("recreate") - podLog.Info("upgrade juicefs-csi-driver", "pod", mountpod.Name, "recreate", recreate) - - csiNode, err := api.getCSINode(c, mountpod.Spec.NodeName) - if err != nil { - podLog.Error(err, "get csi node error", "node", mountpod.Spec.NodeName) - c.String(500, "get csi node error %v", err) - return - } - websocket.Handler(func(ws *websocket.Conn) { - defer ws.Close() - ctx, cancel := context.WithCancel(c.Request.Context()) - defer cancel() - terminal := resource.NewTerminalSession(ctx, ws, resource.EndOfText) - - podLog.Info("Start to upgrade juicefs-csi-driver", "pod", mountpod.Name, "recreate", recreate) - cmds := []string{"juicefs-csi-driver", "upgrade", mountpod.Name} - if recreate == "true" { - cmds = append(cmds, "--restart") - } - podLog.Info("cmds", "cmds", cmds) - - if err := resource.ExecInPod( - api.client, api.kubeconfig, terminal, csiNode.Namespace, csiNode.Name, "juicefs-plugin", cmds); err != nil { - podLog.Error(err, "Failed to start process") - return - } - }).ServeHTTP(c.Writer, c.Request) - } -} diff --git a/pkg/dashboard/utils.go b/pkg/dashboard/utils.go index e182ac714d..c067447ee6 100644 --- a/pkg/dashboard/utils.go +++ b/pkg/dashboard/utils.go @@ -18,16 +18,13 @@ package dashboard import ( "context" - "sort" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "sort" + "github.com/juicedata/juicefs-csi-driver/pkg/config" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - - "github.com/juicedata/juicefs-csi-driver/pkg/common" - jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" ) func (api *API) sysNamespaced(name string) types.NamespacedName { @@ -40,11 +37,11 @@ func (api *API) sysNamespaced(name string) types.NamespacedName { func isAppPod(pod *corev1.Pod) bool { if pod.Labels != nil { // mount pod mode - if _, ok := pod.Labels[common.UniqueId]; ok { + if _, ok := pod.Labels[config.UniqueId]; ok { return true } // sidecar mode - if _, ok := pod.Labels[common.InjectSidecarDone]; ok { + if _, ok := pod.Labels[config.InjectSidecarDone]; ok { return true } } @@ -69,7 +66,7 @@ func (api *API) isAppPodShouldList(ctx context.Context, pod *corev1.Pod) bool { if err := api.cachedReader.Get(ctx, types.NamespacedName{Name: pvc.Spec.VolumeName}, &pv); err != nil { return false } - if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == jfsConfig.DriverName { + if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == config.DriverName { return true } } @@ -110,7 +107,7 @@ func LabelSelectorOfMount(pv corev1.PersistentVolume) labels.Selector { } sl := metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: common.PodUniqueIdLabelKey, + Key: config.PodUniqueIdLabelKey, Operator: metav1.LabelSelectorOpIn, Values: values, }}, diff --git a/pkg/driver/node.go b/pkg/driver/node.go index e827896673..f6bbfccb2d 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -34,7 +34,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/juicedata/juicefs-csi-driver/pkg/common" + "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" @@ -104,8 +104,8 @@ func (d *nodeService) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag func (d *nodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { volCtx := req.GetVolumeContext() log := klog.NewKlogr().WithName("NodePublishVolume") - if volCtx != nil && volCtx[common.PodInfoName] != "" { - log = log.WithValues("appName", volCtx[common.PodInfoName]) + if volCtx != nil && volCtx[config.PodInfoName] != "" { + log = log.WithValues("appName", volCtx[config.PodInfoName]) } volumeID := req.GetVolumeId() log = log.WithValues("volumeId", volumeID) diff --git a/pkg/driver/provisioner.go b/pkg/driver/provisioner.go index 0411f817f2..aeef25f31c 100644 --- a/pkg/driver/provisioner.go +++ b/pkg/driver/provisioner.go @@ -34,7 +34,6 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -171,8 +170,8 @@ func (j *provisionerService) Provision(ctx context.Context, options provisioncon FSType: "juicefs", VolumeAttributes: volCtx, NodePublishSecretRef: &corev1.SecretReference{ - Name: scParams[common.PublishSecretName], - Namespace: scParams[common.PublishSecretNamespace], + Name: scParams[config.PublishSecretName], + Namespace: scParams[config.PublishSecretNamespace], }, }, }, @@ -183,15 +182,15 @@ func (j *provisionerService) Provision(ctx context.Context, options provisioncon VolumeMode: options.PVC.Spec.VolumeMode, }, } - if scParams[common.ControllerExpandSecretName] != "" && scParams[common.ControllerExpandSecretNamespace] != "" { + if scParams[config.ControllerExpandSecretName] != "" && scParams[config.ControllerExpandSecretNamespace] != "" { pv.Spec.CSI.ControllerExpandSecretRef = &corev1.SecretReference{ - Name: scParams[common.ControllerExpandSecretName], - Namespace: scParams[common.ControllerExpandSecretNamespace], + Name: scParams[config.ControllerExpandSecretName], + Namespace: scParams[config.ControllerExpandSecretNamespace], } } if pv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete && options.StorageClass.Parameters["secretFinalizer"] == "true" { - secret, err := j.K8sClient.GetSecret(ctx, scParams[common.ProvisionerSecretName], scParams[common.ProvisionerSecretNamespace]) + secret, err := j.K8sClient.GetSecret(ctx, scParams[config.ProvisionerSecretName], scParams[config.ProvisionerSecretNamespace]) if err != nil { provisionerLog.Error(err, "Get Secret error") j.metrics.provisionErrors.Inc() @@ -199,7 +198,7 @@ func (j *provisionerService) Provision(ctx context.Context, options provisioncon } provisionerLog.V(1).Info("Add Finalizer", "namespace", secret.Namespace, "name", secret.Name) - err = resource.AddSecretFinalizer(ctx, j.K8sClient, secret, common.Finalizer) + err = resource.AddSecretFinalizer(ctx, j.K8sClient, secret, config.Finalizer) if err != nil { provisionerLog.Error(err, "Fails to add a finalizer to the secret") } @@ -253,7 +252,7 @@ func (j *provisionerService) Delete(ctx context.Context, volume *corev1.Persiste } if shouldRemoveFinalizer { provisionerLog.V(1).Info("Remove Finalizer", "namespace", secretNamespace, "name", secretName) - if err = resource.RemoveSecretFinalizer(ctx, j.K8sClient, secret, common.Finalizer); err != nil { + if err = resource.RemoveSecretFinalizer(ctx, j.K8sClient, secret, config.Finalizer); err != nil { return err } } diff --git a/pkg/fuse/grace/grace.go b/pkg/fuse/grace/grace.go deleted file mode 100644 index 4847210c78..0000000000 --- a/pkg/fuse/grace/grace.go +++ /dev/null @@ -1,446 +0,0 @@ -/* - Copyright 2023 Juicedata Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package grace - -import ( - "bufio" - "context" - "fmt" - "net" - "os" - "path" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/klog/v2" - - "github.com/juicedata/juicefs-csi-driver/pkg/common" - "github.com/juicedata/juicefs-csi-driver/pkg/config" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" - "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount/builder" - k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" - "github.com/juicedata/juicefs-csi-driver/pkg/util" - "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" -) - -var log = klog.NewKlogr().WithName("grace") - -func ServeGfShutdown(addr string) error { - _ = os.RemoveAll(addr) - - listener, err := net.Listen("unix", addr) - if err != nil { - log.Error(err, "error listening on socket") - return err - } - - log.Info("Serve gracefully shutdown is listening", "addr", addr) - - go func() { - defer listener.Close() - for { - conn, err := listener.Accept() - if err != nil { - log.Error(err, "error accepting connection") - continue - } - - log.Info("Start to graceful shutdown") - go handleShutdown(conn) - } - }() - return nil -} - -func handleShutdown(conn net.Conn) { - defer conn.Close() - - buf := make([]byte, 1024) - n, err := conn.Read(buf) - if err != nil { - log.Error(err, "error reading from connection") - return - } - - message := string(buf[:n]) - - var recreate bool - ss := strings.Split(message, " ") - name := ss[0] - if len(ss) == 2 { - recreate = true - } - - log.V(1).Info("Received shutdown message", "message", message) - - client, err := k8s.NewClient() - if err != nil { - log.Error(err, "failed to create k8s client") - return - } - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Minute) - defer cancel() - - mountPod, err := client.GetPod(ctx, name, config.Namespace) - if err != nil { - sendMessage(conn, "FAIL get pod") - log.Error(err, "get pod error", "name", name) - return - } - if mountPod.Spec.NodeName != config.NodeName { - sendMessage(conn, "FAIL pod is not on node") - return - } - ce := util.ContainSubString(mountPod.Spec.Containers[0].Command, "metaurl") - hashVal := mountPod.Labels[common.PodJuiceHashLabelKey] - if hashVal == "" { - log.Info("pod has no hash label") - return - } - log.V(1).Info("get hash val from pod", "pod", mountPod.Name, "hash", hashVal) - pu := &podUpgrade{ - client: client, - pod: mountPod, - recreate: recreate, - ce: ce, - hashVal: hashVal, - } - if err := pu.gracefulShutdown(ctx, conn); err != nil { - log.Error(err, "graceful shutdown error") - return - } -} - -type podUpgrade struct { - client *k8s.K8sClient - pod *corev1.Pod - recreate bool - ce bool - hashVal string - newVersion string -} - -func (p *podUpgrade) gracefulShutdown(ctx context.Context, conn net.Conn) error { - lock := config.GetPodLock(p.hashVal) - err := func() error { - lock.Lock() - defer lock.Unlock() - var jfsConf *util.JuiceConf - var err error - - if jfsConf, err = p.prepareShutdown(ctx, conn); err != nil { - sendMessage(conn, "FAIL "+err.Error()) - return err - } - - if err := p.sighup(ctx, conn, jfsConf); err != nil { - sendMessage(conn, "FAIL "+err.Error()) - return err - } - return nil - }() - if err != nil { - return err - } - - if p.recreate { - p.waitForUpgrade(ctx, conn) - } - return nil -} - -func (p *podUpgrade) sighup(ctx context.Context, conn net.Conn, jfsConf *util.JuiceConf) error { - // send SIGHUP to mount pod - for i := 0; i < 600; i++ { - log.Info("kill -s SIGHUP", "pid", jfsConf.Pid, "pod", p.pod.Name) - sendMessage(conn, "send SIGHUP to mount pod") - if stdout, stderr, err := p.client.ExecuteInContainer( - ctx, - p.pod.Name, - p.pod.Namespace, - common.MountContainerName, - []string{"kill", "-s", "SIGHUP", strconv.Itoa(jfsConf.Pid)}, - ); err != nil { - log.V(1).Info("kill -s SIGHUP", "pid", jfsConf.Pid, "stdout", stdout, "stderr", stderr, "error", err) - continue - } - upgradeEvtMsg := fmt.Sprintf("Upgrade binary to %s in %s", p.newVersion, common.MountContainerName) - if p.recreate { - upgradeEvtMsg = "Upgrade pod with recreating" - sendMessage(conn, upgradeEvtMsg) - } else { - sendMessage(conn, "SUCCESS "+upgradeEvtMsg) - } - if err := p.client.CreateEvent(ctx, *p.pod, corev1.EventTypeNormal, "Upgrade", upgradeEvtMsg); err != nil { - log.Error(err, "fail to create event") - } - return nil - } - sendMessage(conn, "FAIL to send SIGHUP to mount pod") - log.Info("mount point of mount pod is busy, stop upgrade", "podName", p.pod.Name) - return nil -} - -func (p *podUpgrade) prepareShutdown(ctx context.Context, conn net.Conn) (*util.JuiceConf, error) { - mntPath, _, err := util.GetMountPathOfPod(*p.pod) - if err != nil { - return nil, err - } - - hashVal := p.pod.Labels[common.PodJuiceHashLabelKey] - - // get pid and sid from /.config - msg := "get pid from config" - sendMessage(conn, msg) - log.V(1).Info(msg, "path", mntPath, "pod", p.pod.Name) - var conf []byte - err = util.DoWithTimeout(ctx, 2*time.Second, func() error { - conf, err = os.ReadFile(path.Join(mntPath, ".config")) - return err - }) - jfsConf, err := util.ParseConfig(conf) - if err != nil { - return nil, err - } - sendMessage(conn, fmt.Sprintf("pid in mount pod: %d", jfsConf.Pid)) - - cJob, err := builder.NewCanaryJob(ctx, p.client, p.pod, p.recreate) - if err != nil { - return nil, err - } - sendMessage(conn, fmt.Sprintf("create canary job %s", cJob.Name)) - if _, err := p.client.CreateJob(ctx, cJob); err != nil { - log.Error(err, "create canary pod error", "name", p.pod.Name) - return nil, err - } - - sendMessage(conn, "wait for canary job completed") - if err := resource.WaitForJobComplete(ctx, p.client, cJob.Name, 5*time.Minute); err != nil { - log.Error(err, "canary job is not complete, delete it.", "job", cJob.Name) - _ = p.client.DeleteJob(ctx, cJob.Name, cJob.Namespace) - return nil, err - } - - sendMessage(conn, fmt.Sprintf("new image: %s", cJob.Spec.Template.Spec.Containers[0].Image)) - sendMessage(conn, "validate new version") - v := p.validateVersion(ctx, conn) - if !v { - return nil, fmt.Errorf("new version is not supported") - } - - if p.recreate { - // set fuse fd to -1 in mount pod - - // update sid - if p.ce { - passfd.GlobalFds.UpdateSid(hashVal, jfsConf.Meta.Sid) - log.V(1).Info("update sid", "mountPod", p.pod.Name, "sid", jfsConf.Meta.Sid) - sendMessage(conn, fmt.Sprintf("sid in mount pod: %d", jfsConf.Meta.Sid)) - } - - // close fuse fd in mount pod - commPath, err := resource.GetCommPath("/tmp", *p.pod) - if err != nil { - return nil, err - } - msg = "close fuse fd in mount pod" - sendMessage(conn, msg) - fuseFd, _ := passfd.GetFuseFd(commPath, true) - for i := 0; i < 100 && fuseFd < 0; i++ { - time.Sleep(time.Millisecond * 100) - fuseFd, _ = passfd.GetFuseFd(commPath, true) - } - if fuseFd < 0 { - return nil, fmt.Errorf("fail to recv FUSE fd from %s", commPath) - } - log.Info("recv FUSE fd", "fd", fuseFd) - } else { - // upgrade binary - msg = "upgrade binary to mount pod" - log.V(1).Info(msg, "pod", p.pod.Name) - sendMessage(conn, msg) - if err := p.uploadBinary(ctx); err != nil { - return nil, err - } - } - return jfsConf, nil -} - -func (p *podUpgrade) validateVersion(ctx context.Context, conn net.Conn) bool { - hashVal := p.pod.Labels[common.PodJuiceHashLabelKey] - if hashVal == "" { - return false - } - // read from version file - var ( - v []byte - err error - ) - err = util.DoWithTimeout(ctx, 2*time.Second, func() error { - v, err = os.ReadFile(fmt.Sprintf("/tmp/%s/version", hashVal)) - return err - }) - if err != nil { - log.Error(err, "read version file error", "hash", hashVal) - sendMessage(conn, fmt.Sprintf("FAIL read version file error: %v", err)) - return false - } - p.newVersion = string(v) - if p.recreate { - supported := util.SupportUpgradeRecreate(p.ce, string(v)) - if !supported { - sendMessage(conn, fmt.Sprintf("FAIL new version %s is not supported", string(v))) - } - return supported - } - supported := util.SupportUpgradeBinary(p.ce, string(v)) - if !supported { - sendMessage(conn, fmt.Sprintf("FAIL new version %s is not supported", string(v))) - } - return supported -} - -func (p *podUpgrade) waitForUpgrade(ctx context.Context, conn net.Conn) { - sendMessage(conn, "wait for upgrade...") - hashVal := p.pod.Labels[common.PodJuiceHashLabelKey] - if hashVal == "" { - return - } - t := time.NewTicker(1 * time.Second) - defer t.Stop() - ctx, cancel := context.WithTimeout(ctx, 20*time.Second) - defer cancel() - reportDeleted := false - for { - select { - case <-t.C: - po, err := p.client.GetPod(ctx, p.pod.Name, p.pod.Namespace) - if err != nil && !k8serrors.IsNotFound(err) { - log.Error(err, "get pod error", "pod", p.pod.Name) - sendMessage(conn, fmt.Sprintf("WARNING get pod error: %v", err)) - continue - } - if po != nil { - if resource.IsPodComplete(po) { - sendMessage(conn, fmt.Sprintf("Mount pod %s received signal and completed", p.pod.Name)) - } - } else if !reportDeleted { - sendMessage(conn, fmt.Sprintf("Mount pod %s is deleted", p.pod.Name)) - reportDeleted = true - } - labelSelector := &metav1.LabelSelector{MatchLabels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodJuiceHashLabelKey: hashVal, - }} - fieldSelector := &fields.Set{"spec.nodeName": config.NodeName} - pods, err := p.client.ListPod(ctx, config.Namespace, labelSelector, fieldSelector) - if err != nil { - log.Error(err, "List pod error") - sendMessage(conn, fmt.Sprintf("WARNING list pod error: %v", err)) - continue - } - for _, po := range pods { - if po.DeletionTimestamp == nil && !resource.IsPodComplete(&po) && po.Name != p.pod.Name { - if resource.IsPodReady(&po) { - sendMessage(conn, fmt.Sprintf("SUCCESS Upgrade mount pod and recreate one: %s", po.Name)) - return - } else { - sendMessage(conn, fmt.Sprintf("Wait for new mount pod ready: %s", po.Name)) - } - } - } - case <-ctx.Done(): - sendMessage(conn, "FAIL Upgrade mount pod timeout") - return - } - } -} - -func (p *podUpgrade) uploadBinary(ctx context.Context) error { - if p.ce { - stdout, stderr, err := p.client.ExecuteInContainer( - ctx, - p.pod.Name, - p.pod.Namespace, - common.MountContainerName, - []string{"sh", "-c", "rm -rf /usr/local/bin/juicefs && mv /tmp/juicefs /usr/local/bin/juicefs"}, - ) - if err != nil { - log.Error(err, "upload binary error", "pod", p.pod.Name, "stdout", stdout, "stderr", stderr) - return err - } - return nil - } - - stdout, stderr, err := p.client.ExecuteInContainer( - ctx, - p.pod.Name, - p.pod.Namespace, - common.MountContainerName, - []string{"sh", "-c", "rm -rf /usr/bin/juicefs && mv /tmp/juicefs /usr/bin/juicefs && rm -rf /usr/local/juicefs/mount/jfsmount && mv /tmp/jfsmount /usr/local/juicefs/mount/jfsmount"}, - ) - if err != nil { - log.Error(err, "upload binary error", "pod", p.pod.Name, "stdout", stdout, "stderr", stderr) - return err - } - return nil - -} - -func TriggerShutdown(socketPath string, name string, restart bool) error { - conn, err := net.Dial("unix", socketPath) - if err != nil { - log.Error(err, "error connecting to socket") - return err - } - defer conn.Close() - - message := name - if restart { - message = fmt.Sprintf("%s RESTART", name) - } - - _, err = conn.Write([]byte(message)) - if err != nil { - log.Error(err, "error sending message") - return err - } - log.Info("trigger gracefully shutdown successfully", "name", name) - - scanner := bufio.NewScanner(conn) - for scanner.Scan() { - message = scanner.Text() - log.Info(message) - if strings.HasPrefix(message, "SUCCESS") || strings.HasPrefix(message, "FAIL") { - break - } - } - - return scanner.Err() -} - -func sendMessage(conn net.Conn, message string) { - _, err := conn.Write([]byte(message + "\n")) - if err != nil { - log.V(1).Info("error sending message", "message", message, "error", err) - } -} diff --git a/pkg/fuse/passfd/passfd.go b/pkg/fuse/passfd.go similarity index 80% rename from pkg/fuse/passfd/passfd.go rename to pkg/fuse/passfd.go index 6efd86dfb3..63170298c9 100644 --- a/pkg/fuse/passfd/passfd.go +++ b/pkg/fuse/passfd.go @@ -1,5 +1,5 @@ /* - Copyright 2023 Juicedata Inc + Copyright 2024 Juicedata Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ limitations under the License. */ -package passfd +package fuse import ( "context" @@ -27,22 +27,15 @@ import ( "syscall" "time" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/klog/v2" k8sMount "k8s.io/utils/mount" - "github.com/juicedata/juicefs-csi-driver/pkg/common" - "github.com/juicedata/juicefs-csi-driver/pkg/config" - k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" ) var fdLog = klog.NewKlogr().WithName("passfd") type Fds struct { - client *k8s.K8sClient globalMu sync.Mutex basePath string fds map[string]*fd @@ -50,10 +43,9 @@ type Fds struct { var GlobalFds *Fds -func InitGlobalFds(ctx context.Context, client *k8s.K8sClient, basePath string) error { +func InitGlobalFds(ctx context.Context, basePath string) error { GlobalFds = &Fds{ globalMu: sync.Mutex{}, - client: client, basePath: basePath, fds: make(map[string]*fd), } @@ -95,9 +87,8 @@ func (fs *Fds) ParseFuseFds(ctx context.Context) error { } for _, subEntry := range subEntries { if strings.HasPrefix(subEntry.Name(), "fuse_fd_comm.") { - subdir := path.Join(fs.basePath, entry.Name(), subEntry.Name()) - fdLog.V(1).Info("parse fuse fd", "path", subdir) - fs.parseFuse(ctx, entry.Name(), subdir) + fdLog.V(1).Info("parse fuse fd", "path", subEntry.Name()) + fs.parseFuse(ctx, entry.Name(), path.Join(fs.basePath, entry.Name(), subEntry.Name())) } } } @@ -125,6 +116,7 @@ func (fs *Fds) GetFdAddress(ctx context.Context, podHashVal string) (string, err } fs.globalMu.Lock() fs.fds[podHashVal] = &fd{ + fuseMu: sync.Mutex{}, done: make(chan struct{}), fuseFd: 0, fuseSetting: []byte("FUSE"), @@ -178,31 +170,9 @@ func (fs *Fds) CloseFd(podHashVal string) { } func (fs *Fds) parseFuse(ctx context.Context, podHashVal, fusePath string) { - fuseFd, fuseSetting := GetFuseFd(fusePath, false) + fuseFd, fuseSetting := getFuseFd(fusePath) if fuseFd <= 0 { - // get fuse fd error, try to get mount pod - labelSelector := &metav1.LabelSelector{MatchLabels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodJuiceHashLabelKey: podHashVal, - }} - fieldSelector := &fields.Set{"spec.nodeName": config.NodeName} - pods, err := fs.client.ListPod(ctx, config.Namespace, labelSelector, fieldSelector) - if err != nil { - fdLog.Error(err, "list pods error") - return - } - var mountPod *corev1.Pod - for _, pod := range pods { - if pod.DeletionTimestamp == nil { - mountPod = &pod - break - } - } - if mountPod == nil { - fdLog.V(1).Info("get fuse fd error and mount pod not found, ignore it", "hashVal", podHashVal, "fusePath", fusePath) - // if can not get fuse fd, do not serve for it - return - } + return } serverPath := path.Join(fs.basePath, podHashVal, "fuse_fd_csi_comm.sock") @@ -210,6 +180,7 @@ func (fs *Fds) parseFuse(ctx context.Context, podHashVal, fusePath string) { fdLog.V(1).Info("fuse fd path of pod", "hashVal", podHashVal, "fusePath", fusePath) f := &fd{ + fuseMu: sync.Mutex{}, done: make(chan struct{}), fuseFd: 0, fuseSetting: []byte("FUSE"), @@ -226,11 +197,11 @@ func (fs *Fds) parseFuse(ctx context.Context, podHashVal, fusePath string) { } type fd struct { - done chan struct{} + fuseMu sync.Mutex + done chan struct{} fuseFd int fuseSetting []byte - sid uint64 serverAddress string // server for pod serverAddressInPod string // server path in pod @@ -293,14 +264,14 @@ func (fs *Fds) handleFDRequest(podHashVal string, conn *net.UnixConn) { return } var fds = []int{0} - fs.globalMu.Lock() + f.fuseMu.Lock() if f.fuseFd > 0 { fds = append(fds, f.fuseFd) fdLog.V(1).Info("send FUSE fd", "fd", f.fuseFd) } err := putFd(conn, f.fuseSetting, fds...) if err != nil { - fs.globalMu.Unlock() + f.fuseMu.Unlock() fdLog.Error(err, "send fuse fds error") return } @@ -308,7 +279,7 @@ func (fs *Fds) handleFDRequest(podHashVal string, conn *net.UnixConn) { _ = syscall.Close(f.fuseFd) f.fuseFd = -1 } - fs.globalMu.Unlock() + f.fuseMu.Unlock() var msg []byte msg, fds, err = getFd(conn, 1) @@ -317,7 +288,7 @@ func (fs *Fds) handleFDRequest(podHashVal string, conn *net.UnixConn) { return } - fs.globalMu.Lock() + f.fuseMu.Lock() if string(msg) != "CLOSE" && f.fuseFd <= 0 && len(fds) >= 1 { f.fuseFd = fds[0] f.fuseSetting = msg @@ -328,41 +299,19 @@ func (fs *Fds) handleFDRequest(podHashVal string, conn *net.UnixConn) { } fdLog.V(1).Info("recv msg and fds", "msg", string(msg), "fd", fds) } - fs.fds[podHashVal] = f - fs.globalMu.Unlock() -} - -func (fs *Fds) UpdateSid(podHashVal string, sid uint64) { - f := fs.fds[podHashVal] - if f == nil { - return - } + f.fuseMu.Unlock() fs.globalMu.Lock() - f.sid = sid fs.fds[podHashVal] = f fs.globalMu.Unlock() } -func (fs *Fds) GetSid(podHashVal string) uint64 { - f := fs.fds[podHashVal] - if f == nil { - return 0 - } - - fs.globalMu.Lock() - sid := f.sid - fs.globalMu.Unlock() - return sid -} - -func GetFuseFd(path string, close bool) (int, []byte) { +func getFuseFd(path string) (int, []byte) { var exists bool if err := util.DoWithTimeout(context.TODO(), time.Second*3, func() (err error) { exists, err = k8sMount.PathExists(path) return }); err != nil { - fdLog.V(1).Info("path exists error", "path", path) return -1, nil } @@ -382,20 +331,9 @@ func GetFuseFd(path string, close bool) (int, []byte) { } fdLog.V(1).Info("get fd and msg", "fd", fds) _ = syscall.Close(fds[0]) - if close { - fdLog.V(1).Info("send close fuse fd") - _ = putFd(conn.(*net.UnixConn), []byte("CLOSE"), 0) // close it - if len(fds) > 1 { - // close it in csi also - _ = syscall.Close(fds[1]) - fdLog.Info("fd ") - return fds[1], msg - } - return fds[0], msg - } if len(fds) > 1 { - fdLog.V(1).Info("send FUSE fd", "fd", fds[1]) err = putFd(conn.(*net.UnixConn), msg, fds[1]) + fdLog.V(1).Info("send FUSE fd", "fd", fds[1]) if err != nil { fdLog.Error(err, "send FUSE error") } diff --git a/pkg/juicefs/juicefs.go b/pkg/juicefs/juicefs.go index 7b992b0706..912c5112b7 100644 --- a/pkg/juicefs/juicefs.go +++ b/pkg/juicefs/juicefs.go @@ -40,7 +40,6 @@ import ( k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" podmount "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -173,10 +172,7 @@ func (fs *jfs) BindTarget(ctx context.Context, bindSource, target string) error } // target is bind by other path, umount it log.Info("target bind mount to other path, umount it", "target", target) - _ = util.DoWithTimeout(ctx, defaultCheckTimeout, func() error { - util.UmountPath(ctx, target) - return nil - }) + util.UmountPath(ctx, target) } // bind target to mountpath log.Info("binding source at target", "source", bindSource, "target", target) @@ -521,8 +517,8 @@ func (j *juicefs) JfsUnmount(ctx context.Context, volumeId, mountPath string) er } // get pod by label labelSelector := &metav1.LabelSelector{MatchLabels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodUniqueIdLabelKey: uniqueId, + config.PodTypeKey: config.PodTypeValue, + config.PodUniqueIdLabelKey: uniqueId, }} fieldSelector := &fields.Set{"spec.nodeName": config.NodeName} pods, err := j.K8sClient.ListPod(ctx, config.Namespace, labelSelector, fieldSelector) @@ -541,7 +537,7 @@ func (j *juicefs) JfsUnmount(ctx context.Context, volumeId, mountPath string) er } if mountPod != nil { podName = mountPod.Name - hashVal = mountPod.Labels[common.PodJuiceHashLabelKey] + hashVal = mountPod.Labels[config.PodJuiceHashLabelKey] if hashVal == "" { return fmt.Errorf("pod %s/%s has no hash label", mountPod.Namespace, mountPod.Name) } @@ -581,10 +577,7 @@ func (j *juicefs) CreateTarget(ctx context.Context, target string) error { return os.MkdirAll(target, os.FileMode(0755)) } else if corruptedMnt = mount.IsCorruptedMnt(err); corruptedMnt { // if target is a corrupted mount, umount it - _ = util.DoWithTimeout(ctx, defaultCheckTimeout, func() error { - util.UmountPath(ctx, target) - return nil - }) + util.UmountPath(ctx, target) continue } else { return err diff --git a/pkg/juicefs/mocks/mock_jfs.go b/pkg/juicefs/mocks/mock_jfs.go index afc830b815..22ec1df9a6 100644 --- a/pkg/juicefs/mocks/mock_jfs.go +++ b/pkg/juicefs/mocks/mock_jfs.go @@ -5,12 +5,11 @@ package mocks import ( - "context" - "reflect" + context "context" + reflect "reflect" - "github.com/golang/mock/gomock" - - "github.com/juicedata/juicefs-csi-driver/pkg/config" + gomock "github.com/golang/mock/gomock" + config "github.com/juicedata/juicefs-csi-driver/pkg/config" ) // MockJfs is a mock of Jfs interface. diff --git a/pkg/juicefs/mount/builder/cci-serverless.go b/pkg/juicefs/mount/builder/cci-serverless.go index 285e58cd44..4a2940ab43 100644 --- a/pkg/juicefs/mount/builder/cci-serverless.go +++ b/pkg/juicefs/mount/builder/cci-serverless.go @@ -24,7 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" utilpointer "k8s.io/utils/pointer" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/util/security" ) @@ -166,7 +165,7 @@ func (r *CCIBuilder) genCCIServerlessVolumes() ([]corev1.Volume, []corev1.Volume func (r *CCIBuilder) genNonPrivilegedContainer() corev1.Container { rootUser := int64(0) return corev1.Container{ - Name: common.MountContainerName, + Name: config.MountContainerName, Image: r.BaseBuilder.jfsSetting.Attr.Image, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ @@ -179,7 +178,7 @@ func (r *CCIBuilder) genNonPrivilegedContainer() corev1.Container { }, Env: []corev1.EnvVar{ { - Name: common.JfsInsideContainer, + Name: config.JfsInsideContainer, Value: "1", }, }, diff --git a/pkg/juicefs/mount/builder/common.go b/pkg/juicefs/mount/builder/common.go index eb6e07cdbb..609bd20c82 100644 --- a/pkg/juicefs/mount/builder/common.go +++ b/pkg/juicefs/mount/builder/common.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/util" "github.com/juicedata/juicefs-csi-driver/pkg/util/security" @@ -38,6 +37,7 @@ const ( JfsDirName = "jfs-dir" UpdateDBDirName = "updatedb" UpdateDBCfgFile = "/etc/updatedb.conf" + JfsFuseFdPathName = "jfs-fuse-fd" JfsFuseFsPathInPod = "/tmp" JfsFuseFsPathInHost = "/var/run/juicefs-csi" JfsCommEnv = "JFS_SUPER_COMM" @@ -54,8 +54,8 @@ func (r *BaseBuilder) genPodTemplate(baseCnGen func() corev1.Container) *corev1. ObjectMeta: metav1.ObjectMeta{ Namespace: r.jfsSetting.Attr.Namespace, Labels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodUniqueIdLabelKey: r.jfsSetting.UniqueId, + config.PodTypeKey: config.PodTypeValue, + config.PodUniqueIdLabelKey: r.jfsSetting.UniqueId, }, Annotations: make(map[string]string), }, @@ -94,7 +94,7 @@ func (r *BaseBuilder) genCommonJuicePod(cnGen func() corev1.Container) *corev1.P gracePeriod = *r.jfsSetting.Attr.TerminationGracePeriodSeconds } pod.Spec.TerminationGracePeriodSeconds = &gracePeriod - controllerutil.AddFinalizer(pod, common.Finalizer) + controllerutil.AddFinalizer(pod, config.Finalizer) volumes, volumeMounts := r._genJuiceVolumes() pod.Spec.Volumes = volumes @@ -267,8 +267,8 @@ func (r *BaseBuilder) genMetricsPort() int32 { // _genMetadata generates labels & annotations func (r *BaseBuilder) _genMetadata() (labels map[string]string, annotations map[string]string) { labels = map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodUniqueIdLabelKey: r.jfsSetting.UniqueId, + config.PodTypeKey: config.PodTypeValue, + config.PodUniqueIdLabelKey: r.jfsSetting.UniqueId, } annotations = map[string]string{} @@ -279,12 +279,12 @@ func (r *BaseBuilder) _genMetadata() (labels map[string]string, annotations map[ annotations[k] = v } if r.jfsSetting.DeletedDelay != "" { - annotations[common.DeleteDelayTimeKey] = r.jfsSetting.DeletedDelay + annotations[config.DeleteDelayTimeKey] = r.jfsSetting.DeletedDelay } - annotations[common.JuiceFSUUID] = r.jfsSetting.UUID - annotations[common.UniqueId] = r.jfsSetting.UniqueId + annotations[config.JuiceFSUUID] = r.jfsSetting.UUID + annotations[config.UniqueId] = r.jfsSetting.UniqueId if r.jfsSetting.CleanCache { - annotations[common.CleanCache] = "true" + annotations[config.CleanCache] = "true" } return } diff --git a/pkg/juicefs/mount/builder/container.go b/pkg/juicefs/mount/builder/container.go index b3fadf048e..8d923776df 100644 --- a/pkg/juicefs/mount/builder/container.go +++ b/pkg/juicefs/mount/builder/container.go @@ -59,13 +59,13 @@ func (r *ContainerBuilder) NewMountSidecar() *corev1.Pod { // delete fuse passfd path for i, vm := range pod.Spec.Containers[0].VolumeMounts { - if vm.Name == config.JfsFuseFdPathName { + if vm.Name == JfsFuseFdPathName { pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts[:i], pod.Spec.Containers[0].VolumeMounts[i+1:]...) break } } for i, v := range pod.Spec.Volumes { - if v.Name == config.JfsFuseFdPathName { + if v.Name == JfsFuseFdPathName { pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...) break } diff --git a/pkg/juicefs/mount/builder/job.go b/pkg/juicefs/mount/builder/job.go index 0adcb8c10c..1907470d92 100644 --- a/pkg/juicefs/mount/builder/job.go +++ b/pkg/juicefs/mount/builder/job.go @@ -17,7 +17,6 @@ limitations under the License. package builder import ( - "context" "crypto/sha256" "fmt" "strings" @@ -25,17 +24,12 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" - k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" "github.com/juicedata/juicefs-csi-driver/pkg/util/security" ) -var log = klog.NewKlogr().WithName("job-builder") - const DefaultJobTTLSecond = int32(5) type JobBuilder struct { @@ -110,7 +104,7 @@ func (r *JobBuilder) newJob(jobName string) *batchv1.Job { Name: jobName, Namespace: r.jfsSetting.Attr.Namespace, Labels: map[string]string{ - common.PodTypeKey: common.JobTypeValue, + config.PodTypeKey: config.JobTypeValue, }, }, Spec: batchv1.JobSpec{ @@ -137,7 +131,7 @@ func (r *JobBuilder) newCleanJob(jobName string) *batchv1.Job { Name: jobName, Namespace: r.jfsSetting.Attr.Namespace, Labels: map[string]string{ - common.PodTypeKey: common.JobTypeValue, + config.PodTypeKey: config.JobTypeValue, }, }, Spec: batchv1.JobSpec{ @@ -225,75 +219,3 @@ func NewFuseAbortJob(mountpod *corev1.Pod, devMinor uint32) *batchv1.Job { }, } } - -// NewCanaryJob -// restart: pull image ahead -// !restart: for download binary -func NewCanaryJob(ctx context.Context, client *k8s.K8sClient, mountPod *corev1.Pod, restart bool) (*batchv1.Job, error) { - attr, err := config.GenPodAttrWithMountPod(ctx, client, mountPod) - if err != nil { - return nil, err - } - volumeId := mountPod.Labels[common.PodUniqueIdLabelKey] - name := GenJobNameByVolumeId(volumeId) + "-canary" - if _, err := client.GetJob(ctx, name, config.Namespace); err == nil { - log.Info("canary job already exists, delete it first", "name", name) - if err := client.DeleteJob(ctx, name, config.Namespace); err != nil { - log.Error(err, "delete canary job error", "name", name) - return nil, err - } - } - - log.Info("create canary job", "image", attr.Image, "name", name) - var ( - mounts []corev1.VolumeMount - volumes []corev1.Volume - ) - for _, v := range mountPod.Spec.Volumes { - if v.Name == config.JfsFuseFdPathName { - volumes = append(volumes, v) - } - } - for _, c := range mountPod.Spec.Containers[0].VolumeMounts { - if c.Name == config.JfsFuseFdPathName { - mounts = append(mounts, c) - } - } - cmd := "juicefs version > /tmp/version" - if !restart { - ce := util.ContainSubString(mountPod.Spec.Containers[0].Command, "format") - if ce { - cmd = fmt.Sprintf("%s && cp /usr/local/bin/juicefs /tmp/juicefs", cmd) - } else { - cmd = fmt.Sprintf("%s && cp /usr/bin/juicefs /tmp/juicefs && cp /usr/local/juicefs/mount/jfsmount /tmp/jfsmount", cmd) - } - } - ttl := DefaultJobTTLSecond - cJob := batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: config.Namespace, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: config.Namespace, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: attr.Image, - Name: "canary", - Command: []string{"sh", "-c", cmd}, - VolumeMounts: mounts, - }}, - NodeName: mountPod.Spec.NodeName, - RestartPolicy: corev1.RestartPolicyNever, - Volumes: volumes, - }, - }, - TTLSecondsAfterFinished: &ttl, - }, - } - return &cJob, nil -} diff --git a/pkg/juicefs/mount/builder/pod.go b/pkg/juicefs/mount/builder/pod.go index 24fb5b7fe9..ffdb31615e 100644 --- a/pkg/juicefs/mount/builder/pod.go +++ b/pkg/juicefs/mount/builder/pod.go @@ -26,9 +26,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" + "github.com/juicedata/juicefs-csi-driver/pkg/fuse" "github.com/juicedata/juicefs-csi-driver/pkg/util" ) @@ -48,7 +47,6 @@ func NewPodBuilder(setting *config.JfsSetting, capacity int64) *PodBuilder { // NewMountPod generates a pod with juicefs client func (r *PodBuilder) NewMountPod(podName string) (*corev1.Pod, error) { pod := r.genCommonJuicePod(r.genCommonContainer) - pod.Spec.RestartPolicy = corev1.RestartPolicyOnFailure pod.Name = podName mountCmd := r.genMountCommand() @@ -65,7 +63,7 @@ func (r *PodBuilder) NewMountPod(podName string) (*corev1.Pod, error) { // inject fuse fd if podName != "" && util.SupportFusePass(pod.Spec.Containers[0].Image) { - fdAddress, err := passfd.GlobalFds.GetFdAddress(context.TODO(), r.jfsSetting.HashVal) + fdAddress, err := fuse.GlobalFds.GetFdAddress(context.TODO(), r.jfsSetting.HashVal) if err != nil { return nil, err } @@ -109,7 +107,7 @@ func (r *PodBuilder) genCommonContainer() corev1.Container { isPrivileged := true rootUser := int64(0) return corev1.Container{ - Name: common.MountContainerName, + Name: config.MountContainerName, Image: r.BaseBuilder.jfsSetting.Attr.Image, SecurityContext: &corev1.SecurityContext{ Privileged: &isPrivileged, @@ -117,7 +115,7 @@ func (r *PodBuilder) genCommonContainer() corev1.Container { }, Env: []corev1.EnvVar{ { - Name: common.JfsInsideContainer, + Name: config.JfsInsideContainer, Value: "1", }, }, @@ -256,7 +254,7 @@ func (r *PodBuilder) genPodVolumes() ([]corev1.Volume, []corev1.VolumeMount) { }, }, { - Name: config.JfsFuseFdPathName, + Name: JfsFuseFdPathName, VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ Path: path.Join(JfsFuseFsPathInHost, r.jfsSetting.HashVal), @@ -272,7 +270,7 @@ func (r *PodBuilder) genPodVolumes() ([]corev1.Volume, []corev1.VolumeMount) { MountPropagation: &mp, }, { - Name: config.JfsFuseFdPathName, + Name: JfsFuseFdPathName, MountPath: JfsFuseFsPathInPod, }, } @@ -327,7 +325,7 @@ func (r *PodBuilder) genCleanCachePod() *corev1.Pod { ObjectMeta: metav1.ObjectMeta{ Namespace: r.jfsSetting.Attr.Namespace, Labels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, + config.PodTypeKey: config.PodTypeValue, }, Annotations: make(map[string]string), }, diff --git a/pkg/juicefs/mount/builder/pod_test.go b/pkg/juicefs/mount/builder/pod_test.go index 068b15da38..c1a09a80e1 100644 --- a/pkg/juicefs/mount/builder/pod_test.go +++ b/pkg/juicefs/mount/builder/pod_test.go @@ -28,9 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" + "github.com/juicedata/juicefs-csi-driver/pkg/fuse" ) var ( @@ -47,14 +46,14 @@ var ( Name: "juicefs-node-test", Namespace: config.Namespace, Labels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodUniqueIdLabelKey: "", + config.PodTypeKey: config.PodTypeValue, + config.PodUniqueIdLabelKey: "", }, Annotations: map[string]string{ - common.JuiceFSUUID: "", - common.UniqueId: "", + config.JuiceFSUUID: "", + config.UniqueId: "", }, - Finalizers: []string{common.Finalizer}, + Finalizers: []string{config.Finalizer}, }, Spec: corev1.PodSpec{ Volumes: []corev1.Volume{ @@ -67,7 +66,7 @@ var ( }, }, }, { - Name: config.JfsFuseFdPathName, + Name: JfsFuseFdPathName, VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ Path: path.Join(JfsFuseFsPathInHost, "test"), @@ -91,6 +90,9 @@ var ( Env: []corev1.EnvVar{{ Name: "JFS_FOREGROUND", Value: "1", + }, { + Name: JfsCommEnv, + Value: "tmp/fuse_fd_csi_comm.sock", }}, EnvFrom: []corev1.EnvFromSource{{ SecretRef: &corev1.SecretEnvSource{ @@ -105,7 +107,7 @@ var ( MountPath: config.PodMountBase, MountPropagation: &mp, }, { - Name: config.JfsFuseFdPathName, + Name: JfsFuseFdPathName, MountPath: "/tmp", }, { @@ -117,11 +119,6 @@ var ( Privileged: &isPrivileged, RunAsUser: &rootUser, }, - Lifecycle: &corev1.Lifecycle{ - PreStop: &corev1.Handler{ - Exec: &corev1.ExecAction{Command: []string{"sh", "-c", "+e", fmt.Sprintf("umount %s -l; rmdir %s; exit 0", "/jfs/default-imagenet", "/jfs/default-imagenet")}}, - }, - }, Ports: []corev1.ContainerPort{ { Name: "metrics", @@ -130,7 +127,7 @@ var ( }, }}, TerminationGracePeriodSeconds: &gracePeriod, - RestartPolicy: corev1.RestartPolicyOnFailure, + RestartPolicy: corev1.RestartPolicyAlways, NodeName: "node", Hostname: "test", PriorityClassName: config.JFSMountPriorityName, @@ -213,7 +210,7 @@ func Test_getCacheDirVolumes(t *testing.T) { func TestNewMountPod(t *testing.T) { defer func() { _ = os.RemoveAll("tmp") }() - passfd.InitTestFds() + fuse.InitTestFds() config.NodeName = "node" config.Namespace = "" podLabelTest := corev1.Pod{} diff --git a/pkg/juicefs/mount/builder/secret.go b/pkg/juicefs/mount/builder/secret.go index a31a69ebdc..0a08211c1e 100644 --- a/pkg/juicefs/mount/builder/secret.go +++ b/pkg/juicefs/mount/builder/secret.go @@ -23,7 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/juicedata/juicefs-csi-driver/pkg/common" + "github.com/juicedata/juicefs-csi-driver/pkg/config" ) const ( @@ -104,7 +104,7 @@ func (r *BaseBuilder) NewSecret() corev1.Secret { Namespace: r.jfsSetting.Attr.Namespace, Name: r.jfsSetting.SecretName, Labels: map[string]string{ - common.JuicefsSecretLabelKey: "true", + config.JuicefsSecretLabelKey: "true", }, }, StringData: data, diff --git a/pkg/juicefs/mount/builder/vci-serverless.go b/pkg/juicefs/mount/builder/vci-serverless.go index 6ed60a2a37..2d6fd42f92 100644 --- a/pkg/juicefs/mount/builder/vci-serverless.go +++ b/pkg/juicefs/mount/builder/vci-serverless.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/util/json" utilpointer "k8s.io/utils/pointer" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/util/security" ) @@ -198,14 +197,14 @@ func (r *VCIBuilder) genVCIServerlessVolumes() ([]corev1.Volume, []corev1.Volume func (r *VCIBuilder) genNonPrivilegedContainer() corev1.Container { rootUser := int64(0) return corev1.Container{ - Name: common.MountContainerName, + Name: config.MountContainerName, Image: r.BaseBuilder.jfsSetting.Attr.Image, SecurityContext: &corev1.SecurityContext{ RunAsUser: &rootUser, }, Env: []corev1.EnvVar{ { - Name: common.JfsInsideContainer, + Name: config.JfsInsideContainer, Value: "1", }, }, @@ -214,5 +213,5 @@ func (r *VCIBuilder) genNonPrivilegedContainer() corev1.Container { func (r *VCIBuilder) genMountContainerName() string { pvcName := r.pvc.Name - return fmt.Sprintf("%s-%s", common.MountContainerName, pvcName) + return fmt.Sprintf("%s-%s", config.MountContainerName, pvcName) } diff --git a/pkg/juicefs/mount/pod_mount.go b/pkg/juicefs/mount/pod_mount.go index 7209b892ca..e2356f0771 100644 --- a/pkg/juicefs/mount/pod_mount.go +++ b/pkg/juicefs/mount/pod_mount.go @@ -38,9 +38,8 @@ import ( "k8s.io/klog/v2" k8sMount "k8s.io/utils/mount" - "github.com/juicedata/juicefs-csi-driver/pkg/common" jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" + "github.com/juicedata/juicefs-csi-driver/pkg/fuse" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount/builder" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" @@ -228,7 +227,7 @@ func (p *PodMount) JUmount(ctx context.Context, target, podName string) error { // close socket if util.SupportFusePass(po.Spec.Containers[0].Image) { - passfd.GlobalFds.StopFd(ctx, po.Labels[common.PodJuiceHashLabelKey]) + fuse.GlobalFds.StopFd(ctx, po.Labels[jfsConfig.PodJuiceHashLabelKey]) } // delete related secret @@ -314,9 +313,9 @@ func (p *PodMount) JDeleteVolume(ctx context.Context, jfsSetting *jfsConfig.JfsS func (p *PodMount) genMountPodName(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) (string, error) { log := util.GenLog(ctx, p.log, "genMountPodName") labelSelector := &metav1.LabelSelector{MatchLabels: map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodUniqueIdLabelKey: jfsSetting.UniqueId, - common.PodJuiceHashLabelKey: jfsSetting.HashVal, + jfsConfig.PodTypeKey: jfsConfig.PodTypeValue, + jfsConfig.PodUniqueIdLabelKey: jfsSetting.UniqueId, + jfsConfig.PodJuiceHashLabelKey: jfsSetting.HashVal, }} pods, err := p.K8sClient.ListPod(ctx, jfsConfig.Namespace, labelSelector, nil) if err != nil { @@ -375,7 +374,7 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin return err } newPod.Annotations[key] = jfsSetting.TargetPath - newPod.Labels[common.PodJuiceHashLabelKey] = jfsSetting.HashVal + newPod.Labels[jfsConfig.PodJuiceHashLabelKey] = jfsSetting.HashVal if jfsConfig.GlobalConfig.EnableNodeSelector { nodeSelector := map[string]string{ "kubernetes.io/hostname": newPod.Spec.NodeName, @@ -400,7 +399,7 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin } if util.SupportFusePass(jfsSetting.Attr.Image) { - if err := passfd.GlobalFds.ServeFuseFd(ctx, newPod.Labels[common.PodJuiceHashLabelKey]); err != nil { + if err := fuse.GlobalFds.ServeFuseFd(ctx, newPod.Labels[jfsConfig.PodJuiceHashLabelKey]); err != nil { log.Error(err, "serve fuse fd error") } } @@ -424,12 +423,6 @@ func (p *PodMount) createOrAddRef(ctx context.Context, podName string, jfsSettin if err := p.createOrUpdateSecret(ctx, &secret); err != nil { return err } - // update mount path - jfsSetting.MountPath, _, err = util.GetMountPathOfPod(*oldPod) - if err != nil { - log.Error(err, "Get mount path of pod error", "podName", podName) - return err - } return p.AddRefOfMount(ctx, jfsSetting.TargetPath, podName) } } @@ -440,17 +433,6 @@ func (p *PodMount) waitUtilMountReady(ctx context.Context, jfsSetting *jfsConfig if err == nil { return nil } - if util.SupportFusePass(jfsSetting.Attr.Image) { - logger.Error(err, "pod is not ready within 60s") - // mount pod hang probably, close fd - logger.Info("close fuse fd") - passfd.GlobalFds.CloseFd(jfsSetting.HashVal) - // umount it - _ = util.DoWithTimeout(ctx, defaultCheckTimeout, func() error { - util.UmountPath(ctx, jfsSetting.MountPath) - return nil - }) - } // mountpoint not ready, get mount pod log for detail log, err := p.getErrContainerLog(ctx, podName) if err != nil { @@ -531,7 +513,7 @@ func (p *PodMount) AddRefOfMount(ctx context.Context, target string, podName str } annotation[key] = target // delete deleteDelayAt when there ars refs - delete(annotation, common.DeleteDelayAtKey) + delete(annotation, jfsConfig.DeleteDelayAtKey) return resource.ReplacePodAnnotation(ctx, p.K8sClient, exist, annotation) }) if err != nil { @@ -548,8 +530,8 @@ func (p *PodMount) setUUIDAnnotation(ctx context.Context, podName string, uuid s if err != nil { return err } - logger.Info("set pod annotation", "podName", podName, "key", common.JuiceFSUUID, "uuid", uuid) - return resource.AddPodAnnotation(ctx, p.K8sClient, pod, map[string]string{common.JuiceFSUUID: uuid}) + logger.Info("set pod annotation", "podName", podName, "key", jfsConfig.JuiceFSUUID, "uuid", uuid) + return resource.AddPodAnnotation(ctx, p.K8sClient, pod, map[string]string{jfsConfig.JuiceFSUUID: uuid}) } func (p *PodMount) setMountLabel(ctx context.Context, uniqueId, mountPodName string, podName, podNamespace string) (err error) { @@ -560,7 +542,7 @@ func (p *PodMount) setMountLabel(ctx context.Context, uniqueId, mountPodName str return err } logger.Info("set mount info in pod", "podName", podName) - if err := resource.AddPodLabel(ctx, p.K8sClient, pod, map[string]string{common.UniqueId: ""}); err != nil { + if err := resource.AddPodLabel(ctx, p.K8sClient, pod, map[string]string{jfsConfig.UniqueId: ""}); err != nil { return err } diff --git a/pkg/juicefs/mount/pod_mount_test.go b/pkg/juicefs/mount/pod_mount_test.go index c185cfde81..d9ddb3db5f 100644 --- a/pkg/juicefs/mount/pod_mount_test.go +++ b/pkg/juicefs/mount/pod_mount_test.go @@ -36,10 +36,9 @@ import ( k8sexec "k8s.io/utils/exec" "k8s.io/utils/mount" - "github.com/juicedata/juicefs-csi-driver/pkg/common" jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/driver/mocks" - "github.com/juicedata/juicefs-csi-driver/pkg/fuse/passfd" + "github.com/juicedata/juicefs-csi-driver/pkg/fuse" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" "github.com/juicedata/juicefs-csi-driver/pkg/util" ) @@ -48,7 +47,7 @@ var testA = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-a", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "a", + jfsConfig.PodUniqueIdLabelKey: "a", }, }, Spec: corev1.PodSpec{ @@ -60,7 +59,7 @@ var testB = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-b", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "b", + jfsConfig.PodUniqueIdLabelKey: "b", }, Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, @@ -74,7 +73,7 @@ var testC = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-c", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "c", + jfsConfig.PodUniqueIdLabelKey: "c", }, Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc"}, @@ -88,7 +87,7 @@ var testD = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-d", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "d", + jfsConfig.PodUniqueIdLabelKey: "d", }, Annotations: map[string]string{"a": "b", util.GetReferenceKey("/mnt/def"): "/mnt/def"}, @@ -102,7 +101,7 @@ var testE = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-e", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "e", + jfsConfig.PodUniqueIdLabelKey: "e", }, Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc", @@ -118,7 +117,7 @@ var testF = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-f", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "f", + jfsConfig.PodUniqueIdLabelKey: "f", }, }, Spec: corev1.PodSpec{ @@ -140,7 +139,7 @@ var testG = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-g", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "g", + jfsConfig.PodUniqueIdLabelKey: "g", }, Annotations: map[string]string{ util.GetReferenceKey("/mnt/abc"): "/mnt/abc", @@ -148,10 +147,7 @@ var testG = &corev1.Pod{ }, }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: "juicedata/mount:ce-v1.2.1", - Command: []string{"sh", "-c", "exec mount.juicefs juicefs-test-node-j /jfs/juicefs-test-node-j"}, - }}, + Containers: []corev1.Container{{Image: "juicedata/mount:ce-v1.2.1"}}, }, Status: corev1.PodStatus{ Phase: corev1.PodRunning, @@ -169,14 +165,11 @@ var testH = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "juicefs-test-node-h", Labels: map[string]string{ - common.PodUniqueIdLabelKey: "h", + jfsConfig.PodUniqueIdLabelKey: "h", }, }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: "juicedata/mount:ce-v1.2.1", - Command: []string{"sh", "-c", "exec mount.juicefs juicefs-test-node-h /jfs/juicefs-test-node-h"}, - }}, + Containers: []corev1.Container{{Image: "juicedata/mount:ce-v1.2.1"}}, }, Status: corev1.PodStatus{ Phase: corev1.PodRunning, @@ -283,7 +276,7 @@ func TestAddRefOfMountWithMock(t *testing.T) { func TestJUmount(t *testing.T) { defer func() { _ = os.RemoveAll("tmp") }() fakeClientSet := fake.NewSimpleClientset() - passfd.InitTestFds() + fuse.InitTestFds() type args struct { podName string @@ -652,8 +645,8 @@ func TestWaitUntilMount(t *testing.T) { wantErr: false, wantAnno: map[string]string{ util.GetReferenceKey("/mnt/iii"): "/mnt/iii", - common.UniqueId: "", - common.JuiceFSUUID: "", + jfsConfig.UniqueId: "", + jfsConfig.JuiceFSUUID: "", }, }, } @@ -672,9 +665,9 @@ func TestWaitUntilMount(t *testing.T) { hashVal := GenHashOfSetting(klog.NewKlogr(), *tt.args.jfsSetting) tt.args.jfsSetting.HashVal = hashVal tt.pod.Labels = map[string]string{ - common.PodTypeKey: common.PodTypeValue, - common.PodUniqueIdLabelKey: tt.args.jfsSetting.UniqueId, - common.PodJuiceHashLabelKey: hashVal, + jfsConfig.PodTypeKey: jfsConfig.PodTypeValue, + jfsConfig.PodUniqueIdLabelKey: tt.args.jfsSetting.UniqueId, + jfsConfig.PodJuiceHashLabelKey: hashVal, } tt.pod.Spec.NodeName = jfsConfig.NodeName _, _ = p.K8sClient.CreatePod(context.TODO(), tt.pod) diff --git a/pkg/k8sclient/client.go b/pkg/k8sclient/client.go index d966ee5840..0b0c644af6 100644 --- a/pkg/k8sclient/client.go +++ b/pkg/k8sclient/client.go @@ -19,7 +19,6 @@ package k8sclient import ( "bytes" "context" - "fmt" "io" "net/url" "os" @@ -43,8 +42,6 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" "k8s.io/klog/v2" - - "github.com/juicedata/juicefs-csi-driver/pkg/util" ) const ( @@ -89,14 +86,6 @@ func NewClient() (*K8sClient, error) { if config == nil { return nil, status.Error(codes.NotFound, "Can't get kube InClusterConfig") } - return newClient(config) -} - -func NewClientWithConfig(config *rest.Config) (*K8sClient, error) { - return newClient(config) -} - -func newClient(config *rest.Config) (*K8sClient, error) { config.Timeout = timeout if os.Getenv("KUBE_QPS") != "" { @@ -126,33 +115,31 @@ func newClient(config *rest.Config) (*K8sClient, error) { } func (k *K8sClient) CreatePod(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, error) { - log := util.GenLog(ctx, clientLog, "") if pod == nil { - log.Info("Create pod: pod is nil") + clientLog.Info("Create pod: pod is nil") return nil, nil } - log.V(1).Info("Create pod", "name", pod.Name) + clientLog.V(1).Info("Create pod", "name", pod.Name) mntPod, err := k.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { - log.V(1).Info("Can't create pod", "name", pod.Name, "error", err) + clientLog.Info("Can't create pod", "name", pod.Name, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) GetPod(ctx context.Context, podName, namespace string) (*corev1.Pod, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get pod", "name", podName) + clientLog.V(1).Info("Get pod", "name", podName) mntPod, err := k.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get pod", "name", podName, "namespace", namespace, "error", err) + clientLog.V(1).Info("Can't get pod", "name", podName, "namespace", namespace, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) ListPod(ctx context.Context, namespace string, labelSelector *metav1.LabelSelector, filedSelector *fields.Set) ([]corev1.Pod, error) { - log := util.GenLog(ctx, clientLog, "") + clientLog.V(1).Info("List pod", "labelSelector", labelSelector.String(), "fieldSelector", filedSelector) listOptions := metav1.ListOptions{} if k.enableAPIServerListCache { // set ResourceVersion="0" means the list response is returned from apiserver cache instead of etcd @@ -164,24 +151,21 @@ func (k *K8sClient) ListPod(ctx context.Context, namespace string, labelSelector return nil, err } listOptions.LabelSelector = labelMap.String() - log.V(1).Info("List pod", "labelSelector", listOptions.LabelSelector) } if filedSelector != nil { listOptions.FieldSelector = fields.SelectorFromSet(*filedSelector).String() - log.V(1).Info("List pod", "fieldSelector", listOptions.FieldSelector) } podList, err := k.CoreV1().Pods(namespace).List(ctx, listOptions) if err != nil { - log.V(1).Info("Can't list pod", "namespace", namespace, "error", err) + clientLog.V(1).Info("Can't list pod", "namespace", namespace, "labelSelector", labelSelector.String(), "error", err) return nil, err } return podList.Items, nil } func (k *K8sClient) ListNode(ctx context.Context, labelSelector *metav1.LabelSelector) ([]corev1.Node, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("List node by labelSelector", "labelSelector", labelSelector.String()) + clientLog.V(1).Info("List node by labelSelector", "labelSelector", labelSelector.String()) listOptions := metav1.ListOptions{} if labelSelector != nil { labelMap, err := metav1.LabelSelectorAsSelector(labelSelector) @@ -193,15 +177,14 @@ func (k *K8sClient) ListNode(ctx context.Context, labelSelector *metav1.LabelSel nodeList, err := k.CoreV1().Nodes().List(ctx, listOptions) if err != nil { - log.V(1).Info("Can't list node", "labelSelector", labelSelector.String(), "error", err) + clientLog.V(1).Info("Can't list node", "labelSelector", labelSelector.String(), "error", err) return nil, err } return nodeList.Items, nil } func (k *K8sClient) GetPodLog(ctx context.Context, podName, namespace, containerName string) (string, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get pod log", "name", podName) + clientLog.V(1).Info("Get pod log", "name", podName) tailLines := int64(20) req := k.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{ Container: containerName, @@ -223,131 +206,119 @@ func (k *K8sClient) GetPodLog(ctx context.Context, podName, namespace, container } func (k *K8sClient) PatchPod(ctx context.Context, pod *corev1.Pod, data []byte, pt types.PatchType) error { - log := util.GenLog(ctx, clientLog, "") if pod == nil { - log.Info("Patch pod: pod is nil") + clientLog.Info("Patch pod: pod is nil") return nil } - log.V(1).Info("Patch pod", "name", pod.Name) + clientLog.V(1).Info("Patch pod", "name", pod.Name) _, err := k.CoreV1().Pods(pod.Namespace).Patch(ctx, pod.Name, pt, data, metav1.PatchOptions{}) return err } func (k *K8sClient) UpdatePod(ctx context.Context, pod *corev1.Pod) error { - log := util.GenLog(ctx, clientLog, "") if pod == nil { - log.Info("Update pod: pod is nil") + clientLog.Info("Update pod: pod is nil") return nil } - log.V(1).Info("Update pod", "name", pod.Name) + clientLog.V(1).Info("Update pod", "name", pod.Name) _, err := k.CoreV1().Pods(pod.Namespace).Update(ctx, pod, metav1.UpdateOptions{}) return err } func (k *K8sClient) DeletePod(ctx context.Context, pod *corev1.Pod) error { - log := util.GenLog(ctx, clientLog, "") if pod == nil { - log.Info("Delete pod: pod is nil") + clientLog.Info("Delete pod: pod is nil") return nil } - log.V(1).Info("Delete pod", "name", pod.Name) + clientLog.V(1).Info("Delete pod", "name", pod.Name) return k.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) } func (k *K8sClient) GetSecret(ctx context.Context, secretName, namespace string) (*corev1.Secret, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get secret", "name", secretName) + clientLog.V(1).Info("Get secret", "name", secretName) secret, err := k.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get secret", "name", secretName, "namespace", namespace, "error", err) + clientLog.V(1).Info("Can't get secret", "name", secretName, "namespace", namespace, "error", err) return nil, err } return secret, nil } func (k *K8sClient) CreateSecret(ctx context.Context, secret *corev1.Secret) (*corev1.Secret, error) { - log := util.GenLog(ctx, clientLog, "") if secret == nil { - log.Info("Create secret: secret is nil") + clientLog.Info("Create secret: secret is nil") return nil, nil } - log.V(1).Info("Create secret", "name", secret.Name) + clientLog.V(1).Info("Create secret", "name", secret.Name) s, err := k.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) if err != nil { - log.Info("Can't create secret", "name", secret.Name, "error", err) + clientLog.Info("Can't create secret", "name", secret.Name, "error", err) return nil, err } return s, nil } func (k *K8sClient) UpdateSecret(ctx context.Context, secret *corev1.Secret) error { - log := util.GenLog(ctx, clientLog, "") if secret == nil { - log.Info("Update secret: secret is nil") + clientLog.Info("Update secret: secret is nil") return nil } - log.V(1).Info("Update secret", "name", secret.Name) + clientLog.V(1).Info("Update secret", "name", secret.Name) _, err := k.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) return err } func (k *K8sClient) DeleteSecret(ctx context.Context, secretName string, namespace string) error { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Delete secret", "name", secretName) + clientLog.V(1).Info("Delete secret", "name", secretName) return k.CoreV1().Secrets(namespace).Delete(ctx, secretName, metav1.DeleteOptions{}) } func (k *K8sClient) PatchSecret(ctx context.Context, secret *corev1.Secret, data []byte, pt types.PatchType) error { - log := util.GenLog(ctx, clientLog, "") if secret == nil { - log.Info("Patch secret: secret is nil") + clientLog.Info("Patch secret: secret is nil") return nil } - log.V(1).Info("Patch secret", "name", secret.Name) + clientLog.V(1).Info("Patch secret", "name", secret.Name) _, err := k.CoreV1().Secrets(secret.Namespace).Patch(ctx, secret.Name, pt, data, metav1.PatchOptions{}) return err } func (k *K8sClient) GetJob(ctx context.Context, jobName, namespace string) (*batchv1.Job, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get job", "name", jobName) + clientLog.V(1).Info("Get job", "name", jobName) job, err := k.BatchV1().Jobs(namespace).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get job", "name", jobName, "namespace", namespace, "error", err) + clientLog.V(1).Info("Can't get job", "name", jobName, "namespace", namespace, "error", err) return nil, err } return job, nil } func (k *K8sClient) CreateJob(ctx context.Context, job *batchv1.Job) (*batchv1.Job, error) { - log := util.GenLog(ctx, clientLog, "") if job == nil { - log.Info("Create job: job is nil") + clientLog.Info("Create job: job is nil") return nil, nil } - log.V(1).Info("Create job", "name", job.Name) + clientLog.V(1).Info("Create job", "name", job.Name) created, err := k.BatchV1().Jobs(job.Namespace).Create(ctx, job, metav1.CreateOptions{}) if err != nil { - log.Info("Can't create job", "name", job.Name, "error", err) + clientLog.Info("Can't create job", "name", job.Name, "error", err) return nil, err } return created, nil } func (k *K8sClient) UpdateJob(ctx context.Context, job *batchv1.Job) error { - log := util.GenLog(ctx, clientLog, "") if job == nil { - log.Info("Update job: job is nil") + clientLog.Info("Update job: job is nil") return nil } - log.V(1).Info("Update job", "name", job.Name) + clientLog.V(1).Info("Update job", "name", job.Name) _, err := k.BatchV1().Jobs(job.Namespace).Update(ctx, job, metav1.UpdateOptions{}) return err } func (k *K8sClient) DeleteJob(ctx context.Context, jobName string, namespace string) error { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Delete job", "name", jobName) + clientLog.V(1).Info("Delete job", "name", jobName) policy := metav1.DeletePropagationBackground return k.BatchV1().Jobs(namespace).Delete(ctx, jobName, metav1.DeleteOptions{ PropagationPolicy: &policy, @@ -355,19 +326,17 @@ func (k *K8sClient) DeleteJob(ctx context.Context, jobName string, namespace str } func (k *K8sClient) GetPersistentVolume(ctx context.Context, pvName string) (*corev1.PersistentVolume, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get pv", "name", pvName) + clientLog.V(1).Info("Get pv", "name", pvName) pv, err := k.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get pv", "name", pvName, "error", err) + clientLog.V(1).Info("Can't get pv", "name", pvName, "error", err) return nil, err } return pv, nil } func (k *K8sClient) ListPersistentVolumes(ctx context.Context, labelSelector *metav1.LabelSelector, filedSelector *fields.Set) ([]corev1.PersistentVolume, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("List pvs", "labelSelector", labelSelector.String(), "fieldSelector", filedSelector) + clientLog.V(1).Info("List pvs", "labelSelector", labelSelector.String(), "fieldSelector", filedSelector) listOptions := metav1.ListOptions{} if labelSelector != nil { labelMap, err := metav1.LabelSelectorAsMap(labelSelector) @@ -381,7 +350,7 @@ func (k *K8sClient) ListPersistentVolumes(ctx context.Context, labelSelector *me } pvList, err := k.CoreV1().PersistentVolumes().List(ctx, listOptions) if err != nil { - log.V(1).Info("Can't list pv", "error", err) + clientLog.V(1).Info("Can't list pv", "error", err) return nil, err } return pvList.Items, nil @@ -403,74 +372,67 @@ func (k *K8sClient) ListPersistentVolumesByVolumeHandle(ctx context.Context, vol } func (k *K8sClient) ListStorageClasses(ctx context.Context) ([]storagev1.StorageClass, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("List storageclass") + clientLog.V(1).Info("List storageclass") scList, err := k.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) if err != nil { - log.V(1).Info("Can't list pv", "error", err) + clientLog.V(1).Info("Can't list pv", "error", err) return nil, err } return scList.Items, nil } func (k *K8sClient) GetPersistentVolumeClaim(ctx context.Context, pvcName, namespace string) (*corev1.PersistentVolumeClaim, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get pvc", "name", pvcName, "namespace", namespace) + clientLog.V(1).Info("Get pvc", "name", pvcName, "namespace", namespace) mntPod, err := k.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get pvc", "name", pvcName, "namespace", namespace, "error", err) + clientLog.V(1).Info("Can't get pvc", "name", pvcName, "namespace", namespace, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) GetReplicaSet(ctx context.Context, rsName, namespace string) (*appsv1.ReplicaSet, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get replicaset", "name", rsName, "namespace", namespace) + clientLog.V(1).Info("Get replicaset", "name", rsName, "namespace", namespace) rs, err := k.AppsV1().ReplicaSets(namespace).Get(ctx, rsName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get replicaset", "name", rsName, "namespace", namespace, "error", err) + clientLog.V(1).Info("Can't get replicaset", "name", rsName, "namespace", namespace, "error", err) return nil, err } return rs, nil } func (k *K8sClient) GetStatefulSet(ctx context.Context, stsName, namespace string) (*appsv1.StatefulSet, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get statefulset", "name", stsName, "namespace", namespace) + clientLog.V(1).Info("Get statefulset", "name", stsName, "namespace", namespace) sts, err := k.AppsV1().StatefulSets(namespace).Get(ctx, stsName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get statefulset", "name", stsName, "namespace", namespace, "error", err) + clientLog.V(1).Info("Can't get statefulset", "name", stsName, "namespace", namespace, "error", err) return nil, err } return sts, nil } func (k *K8sClient) GetStorageClass(ctx context.Context, scName string) (*storagev1.StorageClass, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get sc", "name", scName) + clientLog.V(1).Info("Get sc", "name", scName) mntPod, err := k.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) if err != nil { - log.V(1).Info("Can't get sc", "name", scName, "error", err) + clientLog.V(1).Info("Can't get sc", "name", scName, "error", err) return nil, err } return mntPod, nil } func (k *K8sClient) GetDaemonSet(ctx context.Context, dsName, namespace string) (*appsv1.DaemonSet, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get ds", "name", dsName) + clientLog.V(1).Info("Get ds", "name", dsName) ds, err := k.AppsV1().DaemonSets(namespace).Get(ctx, dsName, metav1.GetOptions{}) if err != nil { - log.Info("Can't get DaemonSet", "name", dsName, "error", err) + clientLog.Info("Can't get DaemonSet", "name", dsName, "error", err) return nil, err } return ds, nil } -func (k *K8sClient) ExecuteInContainer(ctx context.Context, podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Execute command pod", "command", cmd, "container", containerName, "pod", podName, "namespace", namespace) +func (k *K8sClient) ExecuteInContainer(podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { + clientLog.V(1).Info("Execute command pod", "command", cmd, "container", containerName, "pod", podName, "namespace", namespace) const tty = false req := k.CoreV1().RESTClient().Post(). @@ -511,49 +473,3 @@ func execute(method string, url *url.URL, config *restclient.Config, stdin io.Re Tty: tty, }) } - -func (k *K8sClient) GetConfigMap(ctx context.Context, cmName, namespace string) (*corev1.ConfigMap, error) { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Get configmap", "name", cmName) - cm, err := k.CoreV1().ConfigMaps(namespace).Get(ctx, cmName, metav1.GetOptions{}) - if err != nil { - log.V(1).Info("Can't get configMap", "name", cmName, "namespace", namespace, "error", err) - return nil, err - } - return cm, nil -} - -func (k *K8sClient) CreateEvent(ctx context.Context, pod corev1.Pod, evtType, reason, message string) error { - log := util.GenLog(ctx, clientLog, "") - log.V(1).Info("Create event", "name", pod.Name) - now := time.Now() - _, err := k.CoreV1().Events(pod.Namespace).Create(ctx, &corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", pod.Name, now.UnixNano()), - Namespace: pod.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: pod.Kind, - Namespace: pod.Namespace, - Name: pod.Name, - UID: pod.UID, - APIVersion: pod.APIVersion, - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "juicefs-csi-node", - Host: pod.Spec.NodeName, - }, - FirstTimestamp: metav1.Time{Time: now}, - LastTimestamp: metav1.Time{Time: now}, - Type: evtType, - ReportingController: "juicefs-csi-node", - ReportingInstance: pod.Spec.NodeName, - }, metav1.CreateOptions{}) - if err != nil { - log.V(1).Info("Can't create event", "podName", pod.Name, "error", err) - return err - } - return nil -} diff --git a/pkg/util/resource/job.go b/pkg/util/resource/job.go index 0df00c40df..210de7466e 100644 --- a/pkg/util/resource/job.go +++ b/pkg/util/resource/job.go @@ -17,21 +17,12 @@ package resource import ( - "context" - "fmt" "time" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/klog/v2" - - "github.com/juicedata/juicefs-csi-driver/pkg/config" - k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) -var log = klog.NewKlogr().WithName("job-util") - func IsJobCompleted(job *batchv1.Job) bool { for _, cond := range job.Status.Conditions { if cond.Status == corev1.ConditionTrue && cond.Type == batchv1.JobComplete { @@ -69,30 +60,3 @@ func IsJobShouldBeRecycled(job *batchv1.Job) bool { ttlTime := job.Status.CompletionTime.Add(time.Duration(*job.Spec.TTLSecondsAfterFinished) * time.Second) return ttlTime.Before(time.Now()) } - -func WaitForJobComplete(ctx context.Context, client *k8s.K8sClient, name string, timeout time.Duration) error { - waitCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - // Wait until the mount point is ready - log.Info("waiting for job complete", "name", name) - timer := time.NewTicker(500 * time.Millisecond) - for { - select { - case <-waitCtx.Done(): - return fmt.Errorf("job %s is not complete eventually", name) - case <-timer.C: - job, err := client.GetJob(waitCtx, name, config.Namespace) - if err != nil { - if err == context.Canceled || err == context.DeadlineExceeded { - return fmt.Errorf("job %s is not complete eventually", name) - } - if k8serrors.IsNotFound(err) { - return nil - } - } - if IsJobCompleted(job) { - return nil - } - } - } -} diff --git a/pkg/util/resource/pod.go b/pkg/util/resource/pod.go index ef3f4b4472..e2700a7986 100644 --- a/pkg/util/resource/pod.go +++ b/pkg/util/resource/pod.go @@ -21,7 +21,6 @@ import ( "encoding/json" "fmt" "os" - "path" "strings" "syscall" "time" @@ -30,7 +29,6 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -64,26 +62,6 @@ func IsPodError(pod *corev1.Pod) bool { return containError(pod.Status.ContainerStatuses) } -func IsPodComplete(pod *corev1.Pod) bool { - var reason string - for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { - container := pod.Status.ContainerStatuses[i] - - if container.State.Waiting != nil && container.State.Waiting.Reason != "" { - reason = container.State.Waiting.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { - reason = container.State.Terminated.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) - } - } - } - return reason == "Completed" -} - func IsPodResourceError(pod *corev1.Pod) bool { if pod.Status.Phase == corev1.PodFailed { if strings.Contains(pod.Status.Reason, "OutOf") { @@ -261,7 +239,7 @@ func WaitUtilMountReady(ctx context.Context, podName, mntPath string, timeout ti log.Info("Mount point is ready", "podName", podName) return nil } - log.V(1).Info("Mount point is not ready, wait for it", "mountPath", mntPath, "podName", podName) + log.V(1).Info("Mount point is not ready, wait for it", "podName", podName) } time.Sleep(time.Millisecond * 500) } @@ -270,12 +248,12 @@ func WaitUtilMountReady(ctx context.Context, podName, mntPath string, timeout ti } func ShouldDelay(ctx context.Context, pod *corev1.Pod, Client *k8s.K8sClient) (shouldDelay bool, err error) { - delayStr, delayExist := pod.Annotations[common.DeleteDelayTimeKey] + delayStr, delayExist := pod.Annotations[config.DeleteDelayTimeKey] if !delayExist { // not set delete delay return false, nil } - delayAtStr, delayAtExist := pod.Annotations[common.DeleteDelayAtKey] + delayAtStr, delayAtExist := pod.Annotations[config.DeleteDelayAtKey] if !delayAtExist { // need to add delayAt annotation d, err := util.GetTimeAfterDelay(delayStr) @@ -283,7 +261,7 @@ func ShouldDelay(ctx context.Context, pod *corev1.Pod, Client *k8s.K8sClient) (s resourceLog.Error(err, "delayDelete: can't parse delay time", "time", d) return false, nil } - addAnnotation := map[string]string{common.DeleteDelayAtKey: d} + addAnnotation := map[string]string{config.DeleteDelayAtKey: d} resourceLog.Info("delayDelete: add annotation to pod", "annotations", addAnnotation, "podName", pod.Name) if err := AddPodAnnotation(ctx, Client, pod, addAnnotation); err != nil { resourceLog.Error(err, "delayDelete: Update pod error", "podName", pod.Name) @@ -299,6 +277,39 @@ func ShouldDelay(ctx context.Context, pod *corev1.Pod, Client *k8s.K8sClient) (s return time.Now().Before(delayAt), nil } +func GetMountPathOfPod(pod corev1.Pod) (string, string, error) { + if len(pod.Spec.Containers) == 0 { + return "", "", fmt.Errorf("pod %v has no container", pod.Name) + } + cmd := pod.Spec.Containers[0].Command + if cmd == nil || len(cmd) < 3 { + return "", "", fmt.Errorf("get error pod command:%v", cmd) + } + sourcePath, volumeId, err := parseMntPath(cmd[2]) + if err != nil { + return "", "", err + } + return sourcePath, volumeId, nil +} + +// parseMntPath return mntPath, volumeId (/jfs/volumeId, volumeId err) +func parseMntPath(cmd string) (string, string, error) { + cmds := strings.Split(cmd, "\n") + mountCmd := cmds[len(cmds)-1] + args := strings.Fields(mountCmd) + if args[0] == "exec" { + args = args[1:] + } + if len(args) < 3 || !strings.HasPrefix(args[2], config.PodMountBase) { + return "", "", fmt.Errorf("err cmd:%s", cmd) + } + argSlice := strings.Split(args[2], "/") + if len(argSlice) < 3 { + return "", "", fmt.Errorf("err mntPath:%s", args[2]) + } + return args[2], argSlice[2], nil +} + func GetPVWithVolumeHandleOrAppInfo(ctx context.Context, client *k8s.K8sClient, volumeHandle string, volCtx map[string]string) (*corev1.PersistentVolume, *corev1.PersistentVolumeClaim, error) { if client == nil { return nil, nil, fmt.Errorf("k8s client is nil") @@ -306,7 +317,7 @@ func GetPVWithVolumeHandleOrAppInfo(ctx context.Context, client *k8s.K8sClient, pv, err := client.GetPersistentVolume(ctx, volumeHandle) if k8serrors.IsNotFound(err) { // failed to get pv by volumeHandle, try to get pv by appName and appNamespace - appName, appNamespace := volCtx[common.PodInfoName], volCtx[common.PodInfoNamespace] + appName, appNamespace := volCtx[config.PodInfoName], volCtx[config.PodInfoNamespace] appPod, err := client.GetPod(ctx, appName, appNamespace) if err != nil { return nil, nil, err @@ -343,11 +354,3 @@ func GetPVWithVolumeHandleOrAppInfo(ctx context.Context, client *k8s.K8sClient, } return pv, pvc, nil } - -func GetCommPath(basePath string, pod corev1.Pod) (string, error) { - hashVal := pod.Labels[common.PodJuiceHashLabelKey] - if hashVal == "" { - return "", fmt.Errorf("pod %s/%s has no hash label", pod.Namespace, pod.Name) - } - return path.Join(basePath, hashVal, "fuse_fd_comm.1"), nil -} diff --git a/pkg/util/resource/pod_test.go b/pkg/util/resource/pod_test.go index a025886d97..0e58344176 100644 --- a/pkg/util/resource/pod_test.go +++ b/pkg/util/resource/pod_test.go @@ -500,6 +500,87 @@ func TestIsPodResourceError(t *testing.T) { } } +func TestGetMountPathOfPod(t *testing.T) { + type args struct { + pod corev1.Pod + } + var normalPod = corev1.Pod{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pvc-node01-xxx", + Image: "juicedata/juicefs-csi-driver:v0.10.6", + Command: []string{"sh", "-c", "/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, + }, + }, + }} + tests := []struct { + name string + args args + want string + want1 string + wantErr bool + }{ + { + name: "get mntPath from pod cmd success", + args: args{pod: normalPod}, + want: "/jfs/pvc-xxx", + want1: "pvc-xxx", + wantErr: false, + }, + { + name: "nil pod ", + args: args{pod: corev1.Pod{}}, + want: "", + want1: "", + wantErr: true, + }, + { + name: "err-pod cmd <3", + //args: args{cmd: "/bin/mount.juicefs redis://127.0.0.1/6379"}, + args: args{pod: corev1.Pod{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pvc-node01-xxx", + Image: "juicedata/juicefs-csi-driver:v0.10.6", + Command: []string{"sh", "-c"}, + }, + }}}}, + want: "", + want1: "", + wantErr: true, + }, + { + name: "err-cmd sourcePath no MountBase prefix", + args: args{pod: corev1.Pod{Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pvc-node01-xxx", + Image: "juicedata/juicefs-csi-driver:v0.10.6", + Command: []string{"sh", "-c", "/bin/mount.juicefs redis://127.0.0.1/6379 /err-jfs/pvc-xxx}"}, + }, + }}}}, + want: "", + want1: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := GetMountPathOfPod(tt.args.pod) + if (err != nil) != tt.wantErr { + t.Errorf("ParseMntPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ParseMntPath() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("ParseMntPath() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + func TestGetAllRefKeys(t *testing.T) { type args struct { pod corev1.Pod @@ -563,3 +644,85 @@ func TestGetAllRefKeys(t *testing.T) { }) } } + +func TestParseMntPath(t *testing.T) { + type args struct { + cmd string + } + tests := []struct { + name string + args args + want string + want1 string + wantErr bool + }{ + { + name: "get sourcePath from pod cmd success", + args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, + want: "/jfs/pvc-xxx", + want1: "pvc-xxx", + wantErr: false, + }, + { + name: "get sourcePath from pod cmd with exec success", + args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\nexec /bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, + want: "/jfs/pvc-xxx", + want1: "pvc-xxx", + wantErr: false, + }, + { + name: "without init cmd", + args: args{cmd: "/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, + want: "/jfs/pvc-xxx", + want1: "pvc-xxx", + wantErr: false, + }, + { + name: "with create subpath", + args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n" + + "/bin/mount.juicefs ${metaurl} /mnt/jfs -o buffer-size=300,cache-size=100,enable-xattr\n" + + "if [ ! -d /mnt/jfs/pvc-fb2ec20c-474f-4804-9504-966da4af9b73 ]; then mkdir -m 777 /mnt/jfs/pvc-fb2ec20c-474f-4804-9504-966da4af9b73; fi;\n" + + "umount /mnt/jfs -l\n" + + "/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, + want: "/jfs/pvc-xxx", + want1: "pvc-xxx", + wantErr: false, + }, + { + name: "err-pod cmd args <3", + args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379"}, + want: "", + want1: "", + wantErr: true, + }, + { + name: "err-cmd sourcePath no MountBase prefix", + args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379 /err-jfs/pvc-xxx"}, + want: "", + want1: "", + wantErr: true, + }, + { + name: "err-cmd sourcePath length err", + args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379 /jfs"}, + want: "", + want1: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := parseMntPath(tt.args.cmd) + if (err != nil) != tt.wantErr { + t.Errorf("parseMntPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("parseMntPath() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("parseMntPath() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} diff --git a/pkg/util/resource/pvc.go b/pkg/util/resource/pvc.go index 0967d242ca..42f204424c 100644 --- a/pkg/util/resource/pvc.go +++ b/pkg/util/resource/pvc.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/api/core/v1" - "github.com/juicedata/juicefs-csi-driver/pkg/common" + "github.com/juicedata/juicefs-csi-driver/pkg/config" k8s "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" ) @@ -146,8 +146,8 @@ func (meta *ObjectMeta) ResolveSecret(str string, pvName string) string { func CheckForSecretFinalizer(ctx context.Context, client *k8s.K8sClient, volume *v1.PersistentVolume) (shouldRemoveFinalizer bool, err error) { sc := volume.Spec.StorageClassName - secretNamespace := volume.Spec.PersistentVolumeSource.CSI.VolumeAttributes[common.ProvisionerSecretNamespace] - secretName := volume.Spec.PersistentVolumeSource.CSI.VolumeAttributes[common.ProvisionerSecretName] + secretNamespace := volume.Spec.PersistentVolumeSource.CSI.VolumeAttributes[config.ProvisionerSecretNamespace] + secretName := volume.Spec.PersistentVolumeSource.CSI.VolumeAttributes[config.ProvisionerSecretName] if sc == "" || secretNamespace == "" || secretName == "" { resourceLog.Info("Cannot check for the secret", "storageClass", sc, "secretNamespace", secretNamespace, "secretName", secretName) return false, nil @@ -161,8 +161,8 @@ func CheckForSecretFinalizer(ctx context.Context, client *k8s.K8sClient, volume if pv.Name == volume.Name || pv.DeletionTimestamp != nil || pv.Spec.StorageClassName != sc { continue } - pvSecretNamespace := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[common.ProvisionerSecretNamespace] - pvSecretName := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[common.ProvisionerSecretName] + pvSecretNamespace := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[config.ProvisionerSecretNamespace] + pvSecretName := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[config.ProvisionerSecretName] // Cannot remove the secret if it is used by another pv if secretNamespace == pvSecretNamespace && secretName == pvSecretName { resourceLog.Info("PV uses the same secret", "pvName", pv.Name, "pv secret namespace", pvSecretNamespace, "pv secret name", pvSecretName) diff --git a/pkg/util/resource/terminal.go b/pkg/util/resource/terminal.go index e9c6f1dba8..a039f38ed4 100644 --- a/pkg/util/resource/terminal.go +++ b/pkg/util/resource/terminal.go @@ -17,7 +17,6 @@ package resource import ( - "bytes" "context" "encoding/json" "io" @@ -181,41 +180,3 @@ func DownloadPodFile(client kubernetes.Interface, cfg *rest.Config, writer io.Wr return nil } - -func SmoothUpgrade(client kubernetes.Interface, cfg *rest.Config, h Handler, csiName, name, namespace string, restart bool) error { - cmds := []string{"juicefs-csi-driver", "upgrade", name} - if restart { - cmds = append(cmds, "--restart") - } - req := client.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(csiName). - Namespace(namespace).SubResource("exec") - req.VersionedParams(&corev1.PodExecOptions{ - Command: cmds, - Container: "juicefs-plugin", - Stdin: true, - Stdout: true, - Stderr: true, - TTY: true, - }, scheme.ParameterCodec) - - var sout, serr bytes.Buffer - executor, err := remotecommand.NewSPDYExecutor(cfg, "POST", req.URL()) - if err != nil { - resourceLog.Error(err, "Failed to create SPDY executor") - return err - } - if err := executor.Stream(remotecommand.StreamOptions{ - Stdin: h, - Stdout: h, - Stderr: h, - TerminalSizeQueue: h, - Tty: true, - }); err != nil { - resourceLog.Error(err, "Failed to stream", "stdout", sout, "stderr", serr) - return err - } - - return nil -} diff --git a/pkg/util/util.go b/pkg/util/util.go index 46e07380aa..3b0f2ebe02 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -19,7 +19,6 @@ package util import ( "context" "crypto/sha256" - "encoding/json" "errors" "fmt" "math" @@ -36,7 +35,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "k8s.io/utils/io" ) @@ -51,6 +49,10 @@ var ( utilLog = klog.NewKlogr().WithName("util") ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + type mountInfo struct { // Unique ID for the mount (maybe reused after umount). id int @@ -246,15 +248,6 @@ func ContainsPrefix(slice []string, s string) bool { return false } -func ContainSubString(slice []string, s string) bool { - for _, item := range slice { - if strings.Contains(item, s) { - return true - } - } - return false -} - func GetReferenceKey(target string) string { h := sha256.New() h.Write([]byte(target)) @@ -312,9 +305,8 @@ var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz") func RandStringRunes(n int) string { b := make([]rune, n) - r := rand.New(rand.NewSource(time.Now().UnixNano())) for i := range b { - b[i] = letterRunes[r.Intn(len(letterRunes))] + b[i] = letterRunes[rand.Intn(len(letterRunes))] } return string(b) } @@ -367,39 +359,6 @@ func UmountPath(ctx context.Context, sourcePath string) { } } -func GetMountPathOfPod(pod corev1.Pod) (string, string, error) { - if len(pod.Spec.Containers) == 0 { - return "", "", fmt.Errorf("pod %v has no container", pod.Name) - } - cmd := pod.Spec.Containers[0].Command - if cmd == nil || len(cmd) < 3 { - return "", "", fmt.Errorf("get error pod command:%v", cmd) - } - sourcePath, volumeId, err := parseMntPath(cmd[2]) - if err != nil { - return "", "", err - } - return sourcePath, volumeId, nil -} - -// parseMntPath return mntPath, volumeId (/jfs/volumeId, volumeId err) -func parseMntPath(cmd string) (string, string, error) { - cmds := strings.Split(cmd, "\n") - mountCmd := cmds[len(cmds)-1] - args := strings.Fields(mountCmd) - if args[0] == "exec" { - args = args[1:] - } - if len(args) < 3 || !strings.HasPrefix(args[2], "/jfs") { - return "", "", fmt.Errorf("err cmd:%s", cmd) - } - argSlice := strings.Split(args[2], "/") - if len(argSlice) < 3 { - return "", "", fmt.Errorf("err mntPath:%s", args[2]) - } - return args[2], argSlice[2], nil -} - // CheckExpectValue Check if the key has the expected value func CheckExpectValue(m map[string]string, key string, targetValue string) bool { if len(m) == 0 { @@ -509,9 +468,6 @@ type ClientVersion struct { const ceImageRegex = `ce-v(\d+)\.(\d+)\.(\d+)` const eeImageRegex = `ee-(\d+)\.(\d+)\.(\d+)` -const ceVersionRegex = `version (\d+)\.(\d+)\.(\d+)+` -const ceDevVersionRegex = `version (\d+)\.(\d+)\.(\d+)-dev` -const eeVersionRegex = `version (\d+)\.(\d+)\.(\d+) ` func (v ClientVersion) LessThan(o ClientVersion) bool { if o.Dev { @@ -521,16 +477,19 @@ func (v ClientVersion) LessThan(o ClientVersion) bool { if v.Dev { return false } - if v.Major != o.Major { - return v.Major < o.Major + if o.Major > v.Major { + return true } - if v.Minor != o.Minor { - return v.Minor < o.Minor + if o.Minor > v.Minor { + return true } - return v.Patch < o.Patch + if o.Patch > v.Patch { + return true + } + return false } -func parseClientVersionFromImage(image string) ClientVersion { +func parseClientVersion(image string) ClientVersion { if image == "" { return ClientVersion{} } @@ -564,48 +523,8 @@ func parseClientVersionFromImage(image string) ClientVersion { return version } -func parseClientVersion(ce bool, version string) ClientVersion { - v := ClientVersion{IsCe: ce} - var re *regexp.Regexp - if !ce { - re = regexp.MustCompile(eeVersionRegex) - } else { - if strings.Contains(version, "dev") { - re = regexp.MustCompile(ceDevVersionRegex) - v.Dev = true - } else { - re = regexp.MustCompile(ceVersionRegex) - } - } - - matches := re.FindStringSubmatch(version) - if len(matches) == 4 { - v.Major, _ = strconv.Atoi(matches[1]) - v.Minor, _ = strconv.Atoi(matches[2]) - v.Patch, _ = strconv.Atoi(matches[3]) - } - return v -} - -func SupportUpgradeRecreate(ce bool, version string) bool { - v := parseClientVersion(ce, version) - return supportFusePass(v) -} - -func SupportUpgradeBinary(ce bool, version string) bool { - v := parseClientVersion(ce, version) - return supportUpgradeBinary(v) -} - func SupportFusePass(image string) bool { - v := parseClientVersionFromImage(image) - if v.Dev { - return false - } - return supportFusePass(v) -} - -func supportFusePass(v ClientVersion) bool { + v := parseClientVersion(image) ceFuseVersion := ClientVersion{ IsCe: true, Dev: false, @@ -625,42 +544,3 @@ func supportFusePass(v ClientVersion) bool { } return !v.LessThan(eeFuseVersion) } - -func supportUpgradeBinary(v ClientVersion) bool { - ceFuseVersion := ClientVersion{ - IsCe: true, - Dev: false, - Major: 1, - Minor: 2, - Patch: 0, - } - eeFuseVersion := ClientVersion{ - IsCe: false, - Dev: false, - Major: 5, - Minor: 0, - Patch: 0, - } - if v.IsCe { - return !v.LessThan(ceFuseVersion) - } - return !v.LessThan(eeFuseVersion) -} - -type JuiceConf struct { - Meta struct { - Sid uint64 - } - Pid int - PPid int -} - -func ParseConfig(conf []byte) (*JuiceConf, error) { - var juiceConf JuiceConf - err := json.Unmarshal(conf, &juiceConf) - if err != nil { - klog.Errorf("ParseConfig: %v", err) - return nil, err - } - return &juiceConf, nil -} diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 8655aa6def..8871954954 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -27,7 +27,6 @@ import ( . "github.com/agiledragon/gomonkey/v2" . "github.com/smartystreets/goconvey/convey" - corev1 "k8s.io/api/core/v1" ) func TestContainsString(t *testing.T) { @@ -665,7 +664,7 @@ func TestParseClientVersion(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := parseClientVersionFromImage(tt.args.image); !reflect.DeepEqual(got, tt.want) { + if got := parseClientVersion(tt.args.image); !reflect.DeepEqual(got, tt.want) { t.Errorf("ParseClientVersion() = %v, want %v", got, tt.want) } }) @@ -681,31 +680,16 @@ func TestClientVersion_SupportFusePass(t *testing.T) { { name: "dev", image: "juicedata/mount:ee-nightly", - want: false, + want: true, }, { name: "ce-1.2.1", image: "juicedata/mount:ce-v1.2.1", want: true, }, - { - name: "ce-1.3.0", - image: "juicedata/mount:ce-v1.3.0", - want: true, - }, - { - name: "ce-2.0.0", - image: "juicedata/mount:ce-v2.0.0", - want: true, - }, { name: "ee-5.1.0", - image: "juicedata/mount:ee-5.1.0-xxx", - want: true, - }, - { - name: "ee-6.1.0", - image: "juicedata/mount:ee-6.1.0-xxx", + image: "juicedata/mount:ee-v5.1.0-xxx", want: true, }, } @@ -717,302 +701,3 @@ func TestClientVersion_SupportFusePass(t *testing.T) { }) } } - -func TestGetMountPathOfPod(t *testing.T) { - type args struct { - pod corev1.Pod - } - var normalPod = corev1.Pod{Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "pvc-node01-xxx", - Image: "juicedata/juicefs-csi-driver:v0.10.6", - Command: []string{"sh", "-c", "/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, - }, - }, - }} - tests := []struct { - name string - args args - want string - want1 string - wantErr bool - }{ - { - name: "get mntPath from pod cmd success", - args: args{pod: normalPod}, - want: "/jfs/pvc-xxx", - want1: "pvc-xxx", - wantErr: false, - }, - { - name: "nil pod ", - args: args{pod: corev1.Pod{}}, - want: "", - want1: "", - wantErr: true, - }, - { - name: "err-pod cmd <3", - //args: args{cmd: "/bin/mount.juicefs redis://127.0.0.1/6379"}, - args: args{pod: corev1.Pod{Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "pvc-node01-xxx", - Image: "juicedata/juicefs-csi-driver:v0.10.6", - Command: []string{"sh", "-c"}, - }, - }}}}, - want: "", - want1: "", - wantErr: true, - }, - { - name: "err-cmd sourcePath no MountBase prefix", - args: args{pod: corev1.Pod{Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "pvc-node01-xxx", - Image: "juicedata/juicefs-csi-driver:v0.10.6", - Command: []string{"sh", "-c", "/bin/mount.juicefs redis://127.0.0.1/6379 /err-jfs/pvc-xxx}"}, - }, - }}}}, - want: "", - want1: "", - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1, err := GetMountPathOfPod(tt.args.pod) - if (err != nil) != tt.wantErr { - t.Errorf("ParseMntPath() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("ParseMntPath() got = %v, want %v", got, tt.want) - } - if got1 != tt.want1 { - t.Errorf("ParseMntPath() got1 = %v, want %v", got1, tt.want1) - } - }) - } -} - -func TestParseMntPath(t *testing.T) { - type args struct { - cmd string - } - tests := []struct { - name string - args args - want string - want1 string - wantErr bool - }{ - { - name: "get sourcePath from pod cmd success", - args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, - want: "/jfs/pvc-xxx", - want1: "pvc-xxx", - wantErr: false, - }, - { - name: "get sourcePath from pod cmd with exec success", - args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\nexec /bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, - want: "/jfs/pvc-xxx", - want1: "pvc-xxx", - wantErr: false, - }, - { - name: "without init cmd", - args: args{cmd: "/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, - want: "/jfs/pvc-xxx", - want1: "pvc-xxx", - wantErr: false, - }, - { - name: "with create subpath", - args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n" + - "/bin/mount.juicefs ${metaurl} /mnt/jfs -o buffer-size=300,cache-size=100,enable-xattr\n" + - "if [ ! -d /mnt/jfs/pvc-fb2ec20c-474f-4804-9504-966da4af9b73 ]; then mkdir -m 777 /mnt/jfs/pvc-fb2ec20c-474f-4804-9504-966da4af9b73; fi;\n" + - "umount /mnt/jfs -l\n" + - "/bin/mount.juicefs redis://127.0.0.1/6379 /jfs/pvc-xxx"}, - want: "/jfs/pvc-xxx", - want1: "pvc-xxx", - wantErr: false, - }, - { - name: "err-pod cmd args <3", - args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379"}, - want: "", - want1: "", - wantErr: true, - }, - { - name: "err-cmd sourcePath no MountBase prefix", - args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379 /err-jfs/pvc-xxx"}, - want: "", - want1: "", - wantErr: true, - }, - { - name: "err-cmd sourcePath length err", - args: args{cmd: "/usr/local/bin/juicefs format --storage=s3 --bucket=http://juicefs-bucket.minio.default.svc.cluster.local:9000 --access-key=minioadmin --secret-key=${secretkey} ${metaurl} ce-secret\n/bin/mount.juicefs redis://127.0.0.1/6379 /jfs"}, - want: "", - want1: "", - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1, err := parseMntPath(tt.args.cmd) - if (err != nil) != tt.wantErr { - t.Errorf("parseMntPath() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("parseMntPath() got = %v, want %v", got, tt.want) - } - if got1 != tt.want1 { - t.Errorf("parseMntPath() got1 = %v, want %v", got1, tt.want1) - } - }) - } -} - -func TestSupportUpgradeRecreate(t *testing.T) { - type args struct { - ce bool - version string - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "test-5.1", - args: args{ - ce: false, - version: "juicefs version 5.1.0 (2024-09-09 5a1303e2)", - }, - want: true, - }, - { - name: "test-5.0", - args: args{ - ce: false, - version: "juicefs version 5.0.0 (2024-09-09 5a1303e2)", - }, - want: false, - }, - { - name: "test-4.9", - args: args{ - ce: false, - version: "JuiceFS version 4.9.0 (2023-03-28 bfeaf6a)", - }, - want: false, - }, - { - name: "test-1.2.0", - args: args{ - ce: true, - version: "juicefs version 1.2.0+2024-06-18.873c47b9", - }, - want: false, - }, - { - name: "test-1.1.0", - args: args{ - ce: true, - version: "juicefs version 1.1.0+2023-09-04.08c4ae62", - }, - want: false, - }, - { - name: "test-dev", - args: args{ - ce: true, - version: "juicefs version 1.3.0-dev+2024-08-23.f4e98bd3", - }, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := SupportUpgradeRecreate(tt.args.ce, tt.args.version); got != tt.want { - t.Errorf("SupportUpgradeRecreate() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestSupportUpgradeBinary(t *testing.T) { - type args struct { - ce bool - version string - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "test-5.1", - args: args{ - ce: false, - version: "juicefs version 5.1.0 (2024-09-09 5a1303e2)", - }, - want: true, - }, - { - name: "test-5.0", - args: args{ - ce: false, - version: "juicefs version 5.0.0 (2024-09-09 5a1303e2)", - }, - want: true, - }, - { - name: "test-4.9", - args: args{ - ce: false, - version: "JuiceFS version 4.9.0 (2023-03-28 bfeaf6a)", - }, - want: false, - }, - { - name: "test-1.2.0", - args: args{ - ce: true, - version: "juicefs version 1.2.0+2024-06-18.873c47b9", - }, - want: true, - }, - { - name: "test-1.1.0", - args: args{ - ce: true, - version: "juicefs version 1.1.0+2023-09-04.08c4ae62", - }, - want: false, - }, - { - name: "test-dev", - args: args{ - ce: true, - version: "juicefs version 1.3.0-dev+2024-08-23.f4e98bd3", - }, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := SupportUpgradeBinary(tt.args.ce, tt.args.version); got != tt.want { - t.Errorf("SupportUpgradeBinary() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/webhook/handler/handler.go b/pkg/webhook/handler/handler.go index deb29a3b33..43cc6ca333 100644 --- a/pkg/webhook/handler/handler.go +++ b/pkg/webhook/handler/handler.go @@ -26,7 +26,6 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" @@ -66,13 +65,13 @@ func (s *SidecarHandler) Handle(ctx context.Context, request admission.Request) } // check if pod has done label - if util.CheckExpectValue(pod.Labels, common.InjectSidecarDone, common.True) { + if util.CheckExpectValue(pod.Labels, config.InjectSidecarDone, config.True) { handlerLog.Info("skip mutating the pod because injection is done.", "name", pod.Name, "namespace", pod.Namespace) return admission.Allowed("skip mutating the pod because injection is done") } // check if pod has disable label - if util.CheckExpectValue(pod.Labels, common.InjectSidecarDisable, common.True) { + if util.CheckExpectValue(pod.Labels, config.InjectSidecarDisable, config.True) { handlerLog.Info("skip mutating the pod because injection is disabled.", "name", pod.Name, "namespace", pod.Namespace) return admission.Allowed("skip mutating the pod because injection is disabled") } diff --git a/pkg/webhook/handler/mutate/sidecar.go b/pkg/webhook/handler/mutate/sidecar.go index d7c029b8a1..fdf5615a0e 100644 --- a/pkg/webhook/handler/mutate/sidecar.go +++ b/pkg/webhook/handler/mutate/sidecar.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/klog/v2" - "github.com/juicedata/juicefs-csi-driver/pkg/common" "github.com/juicedata/juicefs-csi-driver/pkg/config" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs" "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount/builder" @@ -258,7 +257,7 @@ func (s *SidecarMutate) injectLabel(pod *corev1.Pod) { metaObj.Labels = map[string]string{} } - metaObj.Labels[common.InjectSidecarDone] = common.True + metaObj.Labels[config.InjectSidecarDone] = config.True metaObj.DeepCopyInto(&pod.ObjectMeta) } diff --git a/pkg/webhook/handler/mutate/sidecar_test.go b/pkg/webhook/handler/mutate/sidecar_test.go index 4c3e25e133..1148680793 100644 --- a/pkg/webhook/handler/mutate/sidecar_test.go +++ b/pkg/webhook/handler/mutate/sidecar_test.go @@ -17,12 +17,11 @@ package mutate import ( + volconf "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" "path/filepath" "reflect" "testing" - volconf "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/webhook/handler/register.go b/pkg/webhook/handler/register.go index d2e2ddd58e..7e1c29dfbc 100644 --- a/pkg/webhook/handler/register.go +++ b/pkg/webhook/handler/register.go @@ -42,7 +42,7 @@ func Register(mgr manager.Manager, client *k8sclient.K8sClient) { server.Register(SidecarPath, &webhook.Admission{Handler: NewSidecarHandler(client, false)}) webhookLog.Info("Registered webhook handler for sidecar", "path", SidecarPath) server.Register(ServerlessPath, &webhook.Admission{Handler: NewSidecarHandler(client, true)}) - webhookLog.Info("Registered webhook handler path for serverless", "path", ServerlessPath) + webhookLog.Info("Registered webhook handler path %s for serverless", "path", ServerlessPath) if config.ValidatingWebhook { server.Register(SecretPath, &webhook.Admission{Handler: NewSecretHandler(client)}) server.Register(PVPath, &webhook.Admission{Handler: NewPVHandler(client)}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 1613f35dd7..18db8bc83e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -823,6 +823,12 @@ packages: '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + '@types/eslint-scope@3.7.7': + resolution: {integrity: sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==} + + '@types/eslint@8.56.10': + resolution: {integrity: sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==} + '@types/estree@1.0.5': resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} @@ -1167,8 +1173,8 @@ packages: resolution: {integrity: sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==} engines: {node: '>= 4'} - enhanced-resolve@5.17.1: - resolution: {integrity: sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==} + enhanced-resolve@5.17.0: + resolution: {integrity: sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==} engines: {node: '>=10.13.0'} entities@2.2.0: @@ -2103,8 +2109,8 @@ packages: resolution: {integrity: sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==} engines: {node: '>=10.13.0'} - webpack@5.94.0: - resolution: {integrity: sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==} + webpack@5.92.0: + resolution: {integrity: sha512-Bsw2X39MYIgxouNATyVpCNVWBCuUwDgWtN78g6lSdPJRLaQ/PUVm/oXcaRAyY/sMFoKFQrsPeqvTizWtq7QPCA==} engines: {node: '>=10.13.0'} hasBin: true peerDependencies: @@ -3010,7 +3016,7 @@ snapshots: '@docusaurus/logger': 2.4.3 '@svgr/webpack': 6.5.1 escape-string-regexp: 4.0.0 - file-loader: 6.2.0(webpack@5.94.0) + file-loader: 6.2.0(webpack@5.92.0) fs-extra: 10.1.0 github-slugger: 1.5.0 globby: 11.1.0 @@ -3021,8 +3027,8 @@ snapshots: resolve-pathname: 3.0.0 shelljs: 0.8.5 tslib: 2.6.3 - url-loader: 4.1.1(file-loader@6.2.0(webpack@5.94.0))(webpack@5.94.0) - webpack: 5.94.0 + url-loader: 4.1.1(file-loader@6.2.0(webpack@5.92.0))(webpack@5.92.0) + webpack: 5.92.0 transitivePeerDependencies: - '@swc/core' - esbuild @@ -3195,6 +3201,16 @@ snapshots: dependencies: '@types/ms': 0.7.34 + '@types/eslint-scope@3.7.7': + dependencies: + '@types/eslint': 8.56.10 + '@types/estree': 1.0.5 + + '@types/eslint@8.56.10': + dependencies: + '@types/estree': 1.0.5 + '@types/json-schema': 7.0.15 + '@types/estree@1.0.5': {} '@types/is-empty@1.2.3': {} @@ -3554,7 +3570,7 @@ snapshots: emojis-list@3.0.0: {} - enhanced-resolve@5.17.1: + enhanced-resolve@5.17.0: dependencies: graceful-fs: 4.2.11 tapable: 2.2.1 @@ -3622,11 +3638,11 @@ snapshots: dependencies: format: 0.2.2 - file-loader@6.2.0(webpack@5.94.0): + file-loader@6.2.0(webpack@5.92.0): dependencies: loader-utils: 2.0.4 schema-utils: 3.3.0 - webpack: 5.94.0 + webpack: 5.92.0 fill-range@7.1.1: dependencies: @@ -4409,14 +4425,14 @@ snapshots: tapable@2.2.1: {} - terser-webpack-plugin@5.3.10(webpack@5.94.0): + terser-webpack-plugin@5.3.10(webpack@5.92.0): dependencies: '@jridgewell/trace-mapping': 0.3.25 jest-worker: 27.5.1 schema-utils: 3.3.0 serialize-javascript: 6.0.2 terser: 5.31.1 - webpack: 5.94.0 + webpack: 5.92.0 terser@5.31.1: dependencies: @@ -4545,14 +4561,14 @@ snapshots: dependencies: punycode: 2.3.1 - url-loader@4.1.1(file-loader@6.2.0(webpack@5.94.0))(webpack@5.94.0): + url-loader@4.1.1(file-loader@6.2.0(webpack@5.92.0))(webpack@5.92.0): dependencies: loader-utils: 2.0.4 mime-types: 2.1.35 schema-utils: 3.3.0 - webpack: 5.94.0 + webpack: 5.92.0 optionalDependencies: - file-loader: 6.2.0(webpack@5.94.0) + file-loader: 6.2.0(webpack@5.92.0) util-deprecate@1.0.2: {} @@ -4605,8 +4621,9 @@ snapshots: webpack-sources@3.2.3: {} - webpack@5.94.0: + webpack@5.92.0: dependencies: + '@types/eslint-scope': 3.7.7 '@types/estree': 1.0.5 '@webassemblyjs/ast': 1.12.1 '@webassemblyjs/wasm-edit': 1.12.1 @@ -4615,7 +4632,7 @@ snapshots: acorn-import-attributes: 1.9.5(acorn@8.12.0) browserslist: 4.23.1 chrome-trace-event: 1.0.4 - enhanced-resolve: 5.17.1 + enhanced-resolve: 5.17.0 es-module-lexer: 1.5.3 eslint-scope: 5.1.1 events: 3.3.0 @@ -4627,7 +4644,7 @@ snapshots: neo-async: 2.6.2 schema-utils: 3.3.0 tapable: 2.2.1 - terser-webpack-plugin: 5.3.10(webpack@5.94.0) + terser-webpack-plugin: 5.3.10(webpack@5.92.0) watchpack: 2.4.1 webpack-sources: 3.2.3 transitivePeerDependencies: diff --git a/scripts/juicefs-csi-webhook-install.sh b/scripts/juicefs-csi-webhook-install.sh index f1b34a8b2f..b0bd1c2384 100755 --- a/scripts/juicefs-csi-webhook-install.sh +++ b/scripts/juicefs-csi-webhook-install.sh @@ -102,6 +102,7 @@ rules: - apiGroups: - "" resources: + - pods - persistentvolumes - persistentvolumeclaims - persistentvolumeclaims/status @@ -110,15 +111,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - update - apiGroups: - storage.k8s.io resources: @@ -854,6 +846,7 @@ rules: - apiGroups: - "" resources: + - pods - persistentvolumes - persistentvolumeclaims - persistentvolumeclaims/status @@ -862,15 +855,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - update - apiGroups: - storage.k8s.io resources: