Skip to content

Commit

Permalink
Change CVMFS chart to access CVMFS via CSI driver
Browse files Browse the repository at this point in the history
  • Loading branch information
PMax5 committed Aug 2, 2024
1 parent dd28649 commit a50c3fa
Show file tree
Hide file tree
Showing 7 changed files with 74 additions and 234 deletions.
10 changes: 5 additions & 5 deletions swan-cern/templates/hadoop-token-generator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,17 @@ spec:
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /cvmfs
name: cvmfs
- name: cvmfs
mountPath: /cvmfs
mountPropagation: HostToContainer
- name: swan-secrets
mountPath: /hadoop-token-generator/hadoop.cred
subPath: hadoop.cred

volumes:
- name: cvmfs
hostPath:
path: /var/cvmfs
type: Directory
persistentVolumeClaim:
claimName: cvmfs
- name: swan-secrets
secret:
defaultMode: 400
Expand Down
127 changes: 14 additions & 113 deletions swan-cern/values.yaml
Original file line number Diff line number Diff line change
@@ -1,66 +1,15 @@
swan:
cvmfs:
deployDaemonSet: &cvmfsDeployDS true
deployCsiDriver: &cvmfsDeployCSI false
useCsiDriver: &cvmfsUseCSI false
prefetcher:
enabled: true
jobs:
# ROOT
cron_opennotebook_root_kernel:
command: >-
source /cvmfs/sft.cern.ch/lcg/views/LCG_105a_swan/x86_64-centos7-gcc11-opt/setup.sh &&
(timeout 20s python3 -m JupyROOT.kernel.rootkernel > /dev/null 2>&1 || true)
minute: '*/15'
# NXCALS
cron_opennotebook_nxcals:
command: >-
source /cvmfs/sft.cern.ch/lcg/views/LCG_105a_nxcals_pro/x86_64-centos7-gcc11-opt/setup.sh &&
(timeout 20s python3 -m ipykernel > /dev/null 2>&1 || true) &&
(timeout 20s python3 -c 'import pyspark' || true)
minute: '*/15'
# CUDA
cron_opennotebook_cuda:
command: >-
(lsmod | grep nvidia) &&
source /cvmfs/sft.cern.ch/lcg/views/LCG_105a_cuda/x86_64-centos7-gcc11-opt/setup.sh &&
(timeout 20s python3 -c 'import tensorflow' || true) &&
(timeout 20s python3 -c 'import torch' || true)
minute: '*/15'
repositories:
- cvmfs-config.cern.ch
- sft.cern.ch
- sft-nightlies.cern.ch
- alice.cern.ch
- alice-ocdb.cern.ch
- alice-nightlies.cern.ch
- alpha.cern.ch
- ams.cern.ch
- atlas.cern.ch
- atlas-condb.cern.ch
- atlas-nightlies.cern.ch
- clicbp.cern.ch
- cms.cern.ch
- cms-ib.cern.ch
- cms-bril.cern.ch
- compass.cern.ch
- compass-condb.cern.ch
- fcc.cern.ch
- ganga.cern.ch
- geant4.cern.ch
- grid.cern.ch
- lhcb.cern.ch
- lhcb-condb.cern.ch
- lhcbdev.cern.ch
- na61.cern.ch
- na62.cern.ch
- projects.cern.ch
- ship.cern.ch
- sw.hsf.org
- sndlhc.cern.ch
resources:
requests:
memory: 1.5G
cvmfs-csi:
extraConfigMaps:
cvmfs-csi-default-local:
default.local: |
CVMFS_HTTP_PROXY="http://ca-proxy.cern.ch:3128"
CVMFS_QUOTA_LIMIT=20000
CVMFS_CACHE_BASE=/cvmfs-localcache
automountDaemonUnmountTimeout: 1800
cvmfs-csi-config-d:
sft.cern.ch.conf: |
CVMFS_HTTP_PROXY='http://ca-proxy-sft.cern.ch:3128;http://ca-proxy.cern.ch:3128'
eos:
deployDaemonSet: &eosDeployDS false
deployCsiDriver: &eosDeployCSI true
Expand Down Expand Up @@ -102,11 +51,11 @@ swan:
subPath: sparkk8s.cred
- name: cvmfs
mountPath: /cvmfs
mountPropagation: HostToContainer
extraVolumes:
- name: cvmfs
hostPath:
path: /var/cvmfs
type: Directory
persistentVolumeClaim:
claimName: cvmfs
- name: swan-jh
configMap:
name: swan-scripts
Expand Down Expand Up @@ -195,54 +144,6 @@ swan:
timeout: 14400
checkEosAuth: true
hooksDir: /srv/jupyterhub/culler
cvmfs:
deployDaemonSet: *cvmfsDeployDS
deployCsiDriver: *cvmfsDeployCSI
useCsiDriver: *cvmfsUseCSI
repositories:
- mount: cvmfs-config.cern.ch
- mount: sft.cern.ch
proxy: 'http://ca-proxy-sft.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: sft-nightlies.cern.ch
proxy: 'http://ca-proxy-sft.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: alice.cern.ch
proxy: 'http://ca-proxy-alice.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: alice-ocdb.cern.ch
proxy: 'http://ca-proxy-alice.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: alice-nightlies.cern.ch
proxy: 'http://ca-proxy-alice.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: alpha.cern.ch
- mount: ams.cern.ch
- mount: atlas.cern.ch
proxy: 'http://ca-proxy-atlas.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: atlas-condb.cern.ch
proxy: 'http://ca-proxy-atlas.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: atlas-nightlies.cern.ch
proxy: 'http://ca-proxy-atlas.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: clicbp.cern.ch
- mount: cms.cern.ch
proxy: 'http://cmsmeyproxy.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: cms-ib.cern.ch
proxy: 'http://cmsmeyproxy.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: cms-bril.cern.ch
proxy: 'http://cmsmeyproxy.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: compass.cern.ch
proxy: 'http://ca-proxy-compass.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: compass-condb.cern.ch
proxy: 'http://ca-proxy-compass.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: fcc.cern.ch
- mount: ganga.cern.ch
- mount: geant4.cern.ch
- mount: lhcb.cern.ch
proxy: 'http://ca-proxy-lhcb.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: lhcb-condb.cern.ch
proxy: 'http://ca-proxy-lhcb.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: lhcbdev.cern.ch
proxy: 'http://ca-proxy-lhcb.cern.ch:3128;http://ca-proxy.cern.ch:3128'
- mount: na61.cern.ch
- mount: na62.cern.ch
- mount: projects.cern.ch
- mount: sw.hsf.org
eos:
deployDaemonSet: *eosDeployDS
deployCsiDriver: *eosDeployCSI
Expand Down
11 changes: 3 additions & 8 deletions swan/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,7 @@ dependencies:
version: 5.1.27-1
repository: http://registry.cern.ch/chartrepo/cern
condition: eos.deployCsiDriver

- name: cvmfs
version: 0.0.8
repository: oci://registry.cern.ch/sciencebox/charts
condition: cvmfs.deployDaemonSet
- name: cvmfs-csi
version: 0.1.0
repository: http://registry.cern.ch/chartrepo/cern
condition: cvmfs.deployCsiDriver
version: 2.4.1
repository: oci://registry.cern.ch/kubernetes/charts
condition: cvmfs-csi.deployCsiDriver
53 changes: 12 additions & 41 deletions swan/files/swan_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,50 +217,21 @@ def swan_pod_hook(spawner, pod):
pass

# Manage CVMFS access
if get_config("custom.cvmfs.deployDaemonSet", False):
# Access via bind-mount from the host
logging.info("CVMFS access via DaemonSet")
c.SwanKubeSpawner.volumes.append(
V1Volume(
name='cvmfs',
host_path=V1HostPathVolumeSource(
path='/var/cvmfs'
)
c.SwanKubeSpawner.volumes.append(
V1Volume(
name='cvmfs',
persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
claim_name='cvmfs'
)
)
c.SwanKubeSpawner.volume_mounts.append(
V1VolumeMount(
name='cvmfs',
mount_path='/cvmfs',
mount_propagation='HostToContainer'
)
)
c.SwanKubeSpawner.volume_mounts.append(
V1VolumeMount(
name='cvmfs',
mount_path='/cvmfs',
mount_propagation='HostToContainer'
)
elif (get_config("custom.cvmfs.deployCsiDriver", False) or \
get_config("custom.cvmfs.useCsiDriver", False)):
# Access via CSI driver (persistent volume claims)
logging.info("CVMFS access via CSI driver")
cvmfs_repos = get_config('custom.cvmfs.repositories', [])
for cvmfs_repo_path in cvmfs_repos:
cvmfs_repo_id = cvmfs_repo_path['mount'].replace('.', '-')
c.SwanKubeSpawner.volumes.append(
V1Volume(
name='cvmfs-'+cvmfs_repo_id,
persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
claim_name='cvmfs-'+cvmfs_repo_id+'-pvc'
)
)
)
c.SwanKubeSpawner.volume_mounts.append(
V1VolumeMount(
name='cvmfs-'+cvmfs_repo_id,
mount_path='/cvmfs/'+cvmfs_repo_path['mount'],
read_only=True
)
)
else:
# No access to CVMFS provided -- Nothing will work.
logging.warning("CVMFS access not provided -- singleuser session will fail. Please review your configuration.")
pass
)

# Required for swan systemuser.sh
c.SwanKubeSpawner.cmd = None
19 changes: 19 additions & 0 deletions swan/templates/cvmfs/storage-class.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cvmfs
provisioner: cvmfs.csi.cern.ch
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cvmfs
spec:
accessModes:
- ReadOnlyMany
resources:
requests:
# Volume size value has no effect and is ignored
# by the driver, but must be non-zero.
storage: 1
storageClassName: cvmfs
28 changes: 0 additions & 28 deletions swan/templates/storageclasses.yaml

This file was deleted.

60 changes: 21 additions & 39 deletions swan/values.yaml
Original file line number Diff line number Diff line change
@@ -1,45 +1,32 @@
#
# CVMFS access
# - deployDaemonSet deploys CVMFS pods exposing `/cvmfs` path on the host.
# Access to CVMFS is provided by bind-mounting `/cvmfs` from the host.
# - deployCsiDriver deploys a cluster-wide storage driver for CVMFS.
# Access to CVMFS is provided by persistent volume claims.
# - useCsiDriver has to be used in case the hosting infrastructure provides
# a CSI driver to access CVMFS (i.e., it is not needed to deploy additional pods).
# Access to CVMFS is provided by persistent volume claims (identical to deployCsiDriver).
# - repositories defines which CVMFS repos have to be mounted into singleusers' pods.
# The value passed depends on the method used, DaemonSet VS CSI driver.
#
# Defaults best support a stand-alone small deployment:
# - Deploy a DaemonSet pod with the CVMFS client running inside
# - Chunks from upstream are fetched connecting to the CERN Stratum 1 server
# - We deploy a cluster-wide storage driver for CVMFS, provided by the
# Kubernetes team at CERN (https://gitlab.cern.ch/kubernetes/storage/cvmfs-csi).
# This solution is based on automounting.
#
# Warning:
# - It is discouraged to enable more than one at once.
# - By setting all to false, access to EOS will not be possible
# and singleuser's session will not be able to start.
#
cvmfs:
deployDaemonSet: &cvmfsDeployDS true
deployCsiDriver: &cvmfsDeployCSI false
useCsiDriver: &cvmfsUseCSI false
repositories: &cvmfsRepos
- cvmfs-config.cern.ch
- sft.cern.ch
- sft-nightlies.cern.ch
mountOptions:
hostMountpoint: /var/cvmfs
# Prefetcher is provided only by the daemonSet
cvmfs-csi:
automountHostPath: /var/cvmfs-blue
deployCsiDriver: true
prefetcher:
enabled: true
# Jobs to warmup ROOT, NXCALS and CUDA stacks respectively
jobs:
# Python3 kernel
cron_opennotebook_python3_kernel:
command: >-
- name: cron_warmup_lcg_releases
schedule: "*/15 * * * *"
script: |-
#!/bin/bash
source /cvmfs/sft.cern.ch/lcg/views/LCG_105a_swan/x86_64-centos7-gcc11-opt/setup.sh &&
(timeout 20s python3 -m ipykernel > /dev/null 2>&1 || true)
minute: '*/15'
(timeout 20s python3 -m JupyROOT.kernel.rootkernel > /dev/null 2>&1 || true)
source /cvmfs/sft.cern.ch/lcg/views/LCG_105a_nxcals_pro/x86_64-centos7-gcc11-opt/setup.sh &&
(timeout 20s python3 -m ipykernel > /dev/null 2>&1 || true) &&
(timeout 20s python3 -c 'import pyspark' || true)
(lsmod | grep nvidia) &&
source /cvmfs/sft.cern.ch/lcg/views/LCG_105a_cuda/x86_64-centos7-gcc11-opt/setup.sh &&
(timeout 20s python3 -c 'import tensorflow' || true) &&
(timeout 20s python3 -c 'import torch' || true)
#
# EOS access
# - deployDaemonSet deploys EOS fusex pods exposing `/eos` path on the host.
Expand Down Expand Up @@ -231,11 +218,6 @@ jupyterhub:
timeout: 7200
users: true
checkEosAuth: false
cvmfs:
deployDaemonSet: *cvmfsDeployDS
deployCsiDriver: *cvmfsDeployCSI
useCsiDriver: *cvmfsUseCSI
repositories: *cvmfsRepos
eos:
deployDaemonSet: *eosDeployDS
deployCsiDriver: *eosDeployCSI
Expand Down

0 comments on commit a50c3fa

Please sign in to comment.