diff --git a/.github/workflows/site_deploy.yml b/.github/workflows/site_deploy.yml index 62d1518bad..aa10a49234 100644 --- a/.github/workflows/site_deploy.yml +++ b/.github/workflows/site_deploy.yml @@ -15,13 +15,17 @@ on: required: true default: '23' vcell_site: - description: 'rel or alpha' + description: 'rel or alpha or test' required: true default: 'alpha' server_only: description: 'Deploy only the server components?' required: true default: 'false' + deployment_type: + description: 'swarm or kubernetes (choose kubernetes for vcell_site="test")' + required: true + default: 'swarm' jobs: build: name: Build client installers @@ -195,29 +199,67 @@ jobs: singularity remote login -u ${{ secrets.ACTION_USER }} -p ${{ secrets.ACTION_TOKEN }} oras://ghcr.io singularity pull $BATCH_SINGULARITY_FILENAME oras://${VCELL_REPO_NAMESPACE}/vcell-batch-singularity:${{ github.event.inputs.vcell_version }}.${{ github.event.inputs.vcell_build }} singularity pull $OPT_SINGULARITY_FILENAME oras://${VCELL_REPO_NAMESPACE}/vcell-opt-singularity:${{ github.event.inputs.vcell_version }}.${{ github.event.inputs.vcell_build }} - - name: deploy to site + - name: deploy to kubernetes site + if: ${{ github.event.inputs.deployment_type == 'kubernetes' }} + run: | + set -ux + cd docker/swarm + ssh -t ${{ secrets.CD_FULL_USER }}@${VCELL_MANAGER_NODE} sudo docker login -u ${{ secrets.ACTION_USER }} -p ${{ secrets.ACTION_TOKEN }} ghcr.io + if ${{ github.event.inputs.server_only != 'true' }}; then + # build and install the client installers and the singularity images (kubernetes cluster deployments are separate) + ./deploy-action-kubernetes.sh \ + --ssh-user ${{ secrets.CD_FULL_USER }} \ + --install-singularity \ + --build-installers \ + --installer-deploy-dir $VCELL_INSTALLER_REMOTE_DIR \ + ${VCELL_MANAGER_NODE} \ + ./${VCELL_CONFIG_FILE_NAME} + export VCELL_SITE_CAMEL=`cat $VCELL_CONFIG_FILE_NAME | grep VCELL_SITE_CAMEL | cut -d"=" -f2` + ssh ${{ secrets.CD_FULL_USER }}@${VCELL_MANAGER_NODE} \ + installer_deploy_dir=$VCELL_INSTALLER_REMOTE_DIR vcell_siteCamel=$VCELL_SITE_CAMEL vcell_version=$VCELL_VERSION vcell_build=$VCELL_BUILD \ + 'bash -s' < link-installers.sh + else + # build and install only the singularity images (kubernetes cluster deployments are separate) + ./deploy-action-kubernetes.sh \ + --ssh-user ${{ secrets.CD_FULL_USER }} \ + --install-singularity \ + ${VCELL_MANAGER_NODE} \ + ./${VCELL_CONFIG_FILE_NAME} + fi + - name: deploy to swarm site + if: ${{ github.event.inputs.deployment_type == 'swarm' }} run: | set -ux cd docker/swarm ssh -t ${{ secrets.CD_FULL_USER }}@${VCELL_MANAGER_NODE} sudo docker login -u ${{ secrets.ACTION_USER }} -p ${{ secrets.ACTION_TOKEN }} ghcr.io if ${{ github.event.inputs.server_only != 'true' }}; then - ./deploy-action.sh \ - --ssh-user ${{ secrets.CD_FULL_USER }} --install-singularity --build-installers --installer-deploy-dir $VCELL_INSTALLER_REMOTE_DIR \ - ${VCELL_MANAGER_NODE} \ - ./${VCELL_CONFIG_FILE_NAME} ${VCELL_DEPLOY_REMOTE_DIR}/config/${VCELL_CONFIG_FILE_NAME} \ - ./docker-compose.yml ${VCELL_DEPLOY_REMOTE_DIR}/config/docker-compose_${VCELL_TAG}.yml \ - vcell${VCELL_SITE} + # build and install the client installers, singularity images, and docker swarm configuration + ./deploy-action-swarm.sh \ + --ssh-user ${{ secrets.CD_FULL_USER }} \ + --install-singularity \ + --build-installers \ + --installer-deploy-dir $VCELL_INSTALLER_REMOTE_DIR \ + ${VCELL_MANAGER_NODE} \ + ./${VCELL_CONFIG_FILE_NAME} \ + ${VCELL_DEPLOY_REMOTE_DIR}/config/${VCELL_CONFIG_FILE_NAME} \ + ./docker-compose.yml \ + ${VCELL_DEPLOY_REMOTE_DIR}/config/docker-compose_${VCELL_TAG}.yml \ + vcell${VCELL_SITE} export VCELL_SITE_CAMEL=`cat $VCELL_CONFIG_FILE_NAME | grep VCELL_SITE_CAMEL | cut -d"=" -f2` ssh ${{ secrets.CD_FULL_USER }}@${VCELL_MANAGER_NODE} \ installer_deploy_dir=$VCELL_INSTALLER_REMOTE_DIR vcell_siteCamel=$VCELL_SITE_CAMEL vcell_version=$VCELL_VERSION vcell_build=$VCELL_BUILD \ 'bash -s' < link-installers.sh else - ./deploy-action.sh \ - --ssh-user ${{ secrets.CD_FULL_USER }} --install-singularity \ - ${VCELL_MANAGER_NODE} \ - ./${VCELL_CONFIG_FILE_NAME} ${VCELL_DEPLOY_REMOTE_DIR}/config/${VCELL_CONFIG_FILE_NAME} \ - ./docker-compose.yml ${VCELL_DEPLOY_REMOTE_DIR}/config/docker-compose_${VCELL_TAG}.yml \ - vcell${VCELL_SITE} + # build and install only the singularity images and docker swarm configuration + ./deploy-action-swarm.sh \ + --ssh-user ${{ secrets.CD_FULL_USER }} \ + --install-singularity \ + ${VCELL_MANAGER_NODE} \ + ./${VCELL_CONFIG_FILE_NAME} \ + ${VCELL_DEPLOY_REMOTE_DIR}/config/${VCELL_CONFIG_FILE_NAME} \ + ./docker-compose.yml \ + ${VCELL_DEPLOY_REMOTE_DIR}/config/docker-compose_${VCELL_TAG}.yml \ + vcell${VCELL_SITE} fi - name: Setup tmate session 3 uses: mxschmitt/action-tmate@v3 diff --git a/docker/swarm/deploy-action-kubernetes.sh b/docker/swarm/deploy-action-kubernetes.sh new file mode 100755 index 0000000000..527a125578 --- /dev/null +++ b/docker/swarm/deploy-action-kubernetes.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash + +set -ux + +show_help() { + echo "Deploys or updates a deployment of VCell on remote Kubernetes cluster" + echo "" + echo "usage: deploy-action-kubernetes.sh [OPTIONS] REQUIRED-ARGUMENTS" + echo "" + echo " REQUIRED-ARGUMENTS" + echo " manager-node any node for ssh access ( vcellapi-test.cam.uchc.edu )" + echo "" + echo " local-config-file local config file for setting environment" + echo "" + echo " [OPTIONS]" + echo " -h | --help show this message" + echo "" + echo " --ssh-user user user for ssh to node [defaults to current user id using whoami]" + echo " (user must have passwordless sudo for docker commands on manager-node)" + echo "" + echo " --build-installers optionally build client installers and place in ./generated_installers dir" + echo "" + echo " --installer-deploy-dir /path/to/installer/dir" + echo " directory for installers accessible to users" + echo " typically a web-accessible location to download the client installers for each platform" + echo "" + echo " --install-singularity optionally install batch and opt singularity images on each compute node in 'vcell' SLURM partition" + echo "" + echo "" + echo "example:" + echo "" + echo "deploy-action-kubernetes.sh \\" + echo " --ssh-user vcell \\" + echo " --install_singularity \\" + echo " --build_installers --installer_deploy_dir /share/apps/vcell3/apache_webroot/htdocs/webstart/Test \\" + echo " vcellapi-test.cam.uchc.edu \\" + echo " ./server.config" + exit 1 +} + +if [[ $# -lt 6 ]]; then + show_help +fi + +ssh_user=$(whoami) +installer_deploy_dir= +build_installers=false +install_singularity=false +while :; do + case $1 in + -h|--help) + show_help + exit + ;; + --ssh-user) + shift + ssh_user=$1 + ;; + --installer-deploy-dir) + shift + installer_deploy_dir=$1 + ;; + --install-singularity) + install_singularity=true + ;; + --build-installers) + build_installers=true + ;; + -?*) + printf 'ERROR: Unknown option: %s\n' "$1" >&2 + echo "" + show_help + ;; + *) # Default case: No more options, so break out of the loop. + break + esac + shift +done + +if [[ $# -ne 6 ]] ; then + show_help +fi + +manager_node=$1 +local_config_file=$2 + +# get settings from config file +vcell_siteCamel=$(grep VCELL_SITE_CAMEL "$local_config_file" | cut -d"=" -f2) +vcell_version=$(grep VCELL_VERSION_NUMBER "$local_config_file" | cut -d"=" -f2) +vcell_build=$(grep VCELL_BUILD_NUMBER "$local_config_file" | cut -d"=" -f2) +batch_singularity_filename=$(grep VCELL_BATCH_SINGULARITY_FILENAME "$local_config_file" | cut -d"=" -f2) +opt_singularity_filename=$(grep VCELL_OPT_SINGULARITY_FILENAME "$local_config_file" | cut -d"=" -f2) +slurm_singularity_central_dir=$(grep VCELL_SLURM_CENTRAL_SINGULARITY_DIR "$local_config_file" | cut -d"=" -f2) + + +# +# install the singularity images on the cluster nodes +# +if [ "$install_singularity" == "true" ]; then + + echo "" + pushd ../build/singularity-vm || (echo "pushd ../build/singularity-vm failed"; exit 1) + echo "" + echo "CURRENT DIRECTORY IS $PWD" + + # + # get configuration from config file and load into current bash environment + # + echo "" + + if [ ! -e "./${batch_singularity_filename}" ]; then + echo "failed to find local batch singularity image file $batch_singularity_filename in ./singularity-vm directory" + exit 1 + fi + + if ! scp "./${batch_singularity_filename}" "$ssh_user@$manager_node:${slurm_singularity_central_dir}"; then + echo "failed to copy batch singularity image to server" + exit 1 + fi + + if [ ! -e "./${opt_singularity_filename}" ]; then + echo "failed to find local opt singularity image file $opt_singularity_filename in ./singularity-vm directory" + exit 1 + fi + + if ! scp "./${opt_singularity_filename}" "$ssh_user@$manager_node:${slurm_singularity_central_dir}"; then + echo "failed to copy opt singularity image to server" + exit 1 + fi + + echo "popd" + popd || (echo "popd failed"; exit 1) +fi + + +# +# if --build-installers, then generate client installers, placing then in ./generated_installers +# if --installer-deploy-dir, then also copy installers to $installer_deploy_dir +# *** unimplemented *** (if --link-installers, then also link installers to version independent installer names for each platform) +# +if [ "$build_installers" == "true" ]; then + # + # if --installer-deploy-dir, then copy the installers from ./generated_installers directory to the installer deploy directory + # + if [ ! -z "$installer_deploy_dir" ]; then + # vcell_siteCamel=Alpha + # vcell_version=7.0.0 + # vcell_build=19 + # version=7_0_0_19 + version=$(echo "${vcell_version}_${vcell_build}" | tr '.' _) + if ! scp "./generated_installers/VCell_${vcell_siteCamel}_windows-x64_${version}_64bit.exe" \ + "./generated_installers/VCell_${vcell_siteCamel}_unix_${version}_32bit.sh" \ + "./generated_installers/VCell_${vcell_siteCamel}_macos_${version}_64bit.dmg" \ + "./generated_installers/VCell_${vcell_siteCamel}_windows-x32_${version}_32bit.exe" \ + "./generated_installers/VCell_${vcell_siteCamel}_unix_${version}_64bit.sh" \ + "./generated_installers/updates.xml" \ + "./generated_installers/updates_linux32.xml" \ + "./generated_installers/updates_linux64.xml" \ + "./generated_installers/updates_mac64.xml" \ + "./generated_installers/updates_win32.xml" \ + "./generated_installers/updates_win64.xml" \ + "./generated_installers/output.txt" \ + "./generated_installers/md5sums" \ + "./generated_installers/sha256sums" \ + "$ssh_user@$manager_node:${installer_deploy_dir}"; + then + echo "failed to copy installers"; + exit 1; + fi + + fi +fi + + +echo "exited normally" + +exit 0 diff --git a/docker/swarm/deploy-action.sh b/docker/swarm/deploy-action-swarm.sh similarity index 92% rename from docker/swarm/deploy-action.sh rename to docker/swarm/deploy-action-swarm.sh index 5c9201a405..9764af92b9 100755 --- a/docker/swarm/deploy-action.sh +++ b/docker/swarm/deploy-action-swarm.sh @@ -5,7 +5,7 @@ set -ux show_help() { echo "Deploys or updates a deployment of VCell on remote Docker swarm cluster" echo "" - echo "usage: deploy.sh [OPTIONS] REQUIRED-ARGUMENTS" + echo "usage: deploy-action-swarm.sh [OPTIONS] REQUIRED-ARGUMENTS" echo "" echo " REQUIRED-ARGUMENTS" echo " manager-node swarm node with manager role ( vcellapi.cam.uchc.edu or vcellapi-beta.cam.uchc.edu )" @@ -36,23 +36,26 @@ show_help() { echo "" echo " --build-installers optionally build client installers and place in ./generated_installers dir" echo "" - echo " --installer-deploy-dir /path/to/intstaller/dir" + echo " --installer-deploy-dir /path/to/installer/dir" echo " directory for installers accessible to users" echo " typically a web-accessible location to download the client installers for each platform" echo "" -# echo " --link-installers optionally create symbolic links for newly created client installers" -# echo " for permanent 'latest' web links fr each platform" -# echo "" echo " --install-singularity optionally install batch and opt singularity images on each compute node in 'vcell' SLURM partition" echo "" echo "" echo "example:" echo "" - echo "deploy.sh --ssh-user vcell --ssh-key ~/.ssh/schaff_rsa \\" - echo " vcell-service.cam.uchc.edu \\" - echo " ./server.config /usr/local/deploy/Test/server_01.config \\" - echo " ./docker-compose.yml /usr/local/deploy/Test/docker-compose_01.yml \\" - echo " vcelltest" + echo "deploy-action-swarm.sh \\" + echo " --ssh-user vcell \\" + echo " --ssh-key ~/.ssh/schaff_rsa \\" + echo " --install_singularity \\" + echo " --build_installers --installer_deploy_dir /share/apps/vcell3/apache_webroot/htdocs/webstart/Rel \\" + echo " vcellapi.cam.uchc.edu \\" + echo " ./server.config \\" + echo " /usr/local/deploy/Test/server_01.config \\" + echo " ./docker-compose.yml \\" + echo " /usr/local/deploy/Test/docker-compose_01.yml \\" + echo " vcellrel" exit 1 } @@ -90,9 +93,6 @@ while :; do --build-installers) build_installers=true ;; -# --link-installers) -# link_installers=true -# ;; -?*) printf 'ERROR: Unknown option: %s\n' "$1" >&2 echo "" @@ -235,5 +235,3 @@ fi echo "exited normally" exit 0 - - diff --git a/docker/swarm/deploy.sh b/docker/swarm/deploy.sh deleted file mode 100755 index 5e68d061a7..0000000000 --- a/docker/swarm/deploy.sh +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env bash - -shopt -s -o nounset - -show_help() { - echo "Deploys or updates a deployment of VCell on remote Docker swarm cluster" - echo "" - echo "usage: deploy.sh [OPTIONS] REQUIRED-ARGUMENTS" - echo "" - echo " REQUIRED-ARGUMENTS" - echo " manager-node swarm node with manager role ( vcellapi.cam.uchc.edu or vcellapi-beta.cam.uchc.edu )" - echo "" - echo " local-config-file local config file for deployment, copied to manager-node including:" - echo " VCELL_REPO_NAMESPACE=(repo/namespace | namespace)" - echo " (e.g. schaff or vcell-docker.cam.uchc.edu:5000/schaff )" - echo " (must be reachable from swarm nodes and include namespace)" - echo " VCELL_TAG=tag (e.g. dev | 7.0.0-alpha.4 | f98dfe3)" - echo "" - echo " remote-config-file absolute path of target config file on remote manager-node" - echo " WARNING: will overwrite remote file" - echo "" - echo " local-compose-file local docker-compose.yml file for deployment, copied to manager-node" - echo "" - echo " remote-compose-file absolute path of target docker-compose.yml file on remote manager-node" - echo " WARNING: will overwrite remote file" - echo "" - echo " stack-name name of Docker swarm stack" - echo "" - echo " [OPTIONS]" - echo " -h | --help show this message" - echo "" - echo " --ssh-user user user for ssh to node [defaults to current user id using whoami]" - echo " (user must have passwordless sudo for docker commands on manager-node)" - echo "" - echo " --ssh-key keyfile ssh key for passwordless ssh to node" - echo "" - echo " --build-installers optionally build client installers and place in ./generated_installers dir" - echo "" - echo " --installer-deploy-dir /path/to/intstaller/dir" - echo " directory for installers accessible to users" - echo " typically a web-accessible location to download the client installers for each platform" - echo "" - echo " --link-installers optionally create symbolic links for newly created client installers" - echo " for permanent 'latest' web links fr each platform" - echo "" - echo " --install-singularity optionally install singularity image on each compute node in 'vcell' SLURM partition" - echo "" - echo "" - echo "example:" - echo "" - echo "deploy.sh --ssh-user vcell --ssh-key ~/.ssh/schaff_rsa \\" - echo " vcell-service.cam.uchc.edu \\" - echo " ./server.config /usr/local/deploy/Test/server_01.config \\" - echo " ./docker-compose.yml /usr/local/deploy/Test/docker-compose_01.yml \\" - echo " vcelltest" - exit 1 -} - -if [[ $# -lt 6 ]]; then - show_help -fi - -ssh_user=$(whoami) -ssh_key= -installer_deploy_dir= -build_installers=false -link_installers=false -install_singularity=false -while :; do - case $1 in - -h|--help) - show_help - exit - ;; - --ssh-user) - shift - ssh_user=$1 - ;; - --ssh-key) - shift - ssh_key="-i $1" - ;; - --installer-deploy-dir) - shift - installer_deploy_dir=$1 - ;; - --install-singularity) - install_singularity=true - ;; - --build-installers) - build_installers=true - ;; - --link-installers) - link_installers=true - ;; - -?*) - printf 'ERROR: Unknown option: %s\n' "$1" >&2 - echo "" - show_help - ;; - *) # Default case: No more options, so break out of the loop. - break - esac - shift -done - -if [[ $# -ne 6 ]] ; then - show_help -fi - -manager_node=$1 -local_config_file=$2 -remote_config_file=$3 -local_compose_file=$4 -remote_compose_file=$5 -stack_name=$6 - -# get settings from config file -vcell_siteCamel=$(grep VCELL_SITE_CAMEL "$local_config_file" | cut -d"=" -f2) -vcell_version=$(grep VCELL_VERSION_NUMBER "$local_config_file" | cut -d"=" -f2) -vcell_build=$(grep VCELL_BUILD_NUMBER "$local_config_file" | cut -d"=" -f2) -batch_singularity_filename=$(grep VCELL_BATCH_SINGULARITY_FILENAME "$local_config_file" | cut -d"=" -f2) -batch_singularity_image_external=$(grep VCELL_BATCH_SINGULARITY_IMAGE_EXTERNAL "$local_config_file" | cut -d"=" -f2) -opt_singularity_filename=$(grep VCELL_OPT_SINGULARITY_FILENAME "$local_config_file" | cut -d"=" -f2) -opt_singularity_image_external=$(grep VCELL_OPT_SINGULARITY_IMAGE_EXTERNAL "$local_config_file" | cut -d"=" -f2) -#partitionName=$(grep VCELL_SLURM_PARTITION "$local_config_file" | cut -d"=" -f2) -batchHost=$(grep VCELL_BATCH_HOST $local_config_file | cut -d"=" -f2) -slurm_singularity_central_dir=$(grep VCELL_SLURM_CENTRAL_SINGULARITY_DIR "$local_config_file" | cut -d"=" -f2) - - -echo "" -echo "coping $local_config_file to $manager_node:$remote_config_file as user $ssh_user" -cmd="scp $ssh_key $local_config_file $ssh_user@$manager_node:$remote_config_file" -echo $cmd -($cmd) || (echo "failed to upload config file" && exit 1) - -echo "" -echo "coping $local_compose_file to $manager_node:$remote_compose_file as user $ssh_user" -cmd="scp $ssh_key $local_compose_file $ssh_user@$manager_node:$remote_compose_file" -echo "$cmd" -($cmd) || (echo "failed to upload docker-compose file" && exit 1) - - -# -# install the singularity image on the cluster nodes -# -if [ "$install_singularity" == "true" ]; then - - echo "" - cmd="pushd ../build/singularity-vm" - pushd ../build/singularity-vm || (echo "failed to pushd to ../build/singularity-vm"; exit 1) - echo "" - echo "CURRENT DIRECTORY IS $PWD" - - # - # get configuration from config file and load into current bash environment - # - echo "" - - if [ ! -e "./${batch_singularity_filename}" ]; then - echo "failed to find batch local singularity image file $batch_singularity_filename in ./singularity-vm directory" - exit 1 - fi - if [ ! -e "./${opt_singularity_filename}" ]; then - echo "failed to find opt local singularity image file $opt_singularity_filename in ./singularity-vm directory" - exit 1 - fi - - echo "mkdir -p ${slurm_singularity_central_dir}" - mkdir -p "${slurm_singularity_central_dir}" - echo "cp ./${batch_singularity_filename} ${slurm_singularity_central_dir}" - cp "./${batch_singularity_filename}" "${slurm_singularity_central_dir}" - echo "cp ./${opt_singularity_filename} ${slurm_singularity_central_dir}" - cp "./${opt_singularity_filename}" "${slurm_singularity_central_dir}" - - echo "popd" - popd || (echo "popd failed"; exit 1) -fi - - -# -# deploy the stack on remote cluster -# -echo "" -echo "deploying stack $stack_name to $manager_node using config in $manager_node:$remote_config_file" -localmachine=$(hostname) -if [ "$localmachine" == "$manager_node" ]; then - echo "env \$(cat $remote_config_file | xargs) docker stack deploy -c $remote_compose_file $stack_name" - if ! env $(xargs < "$remote_config_file") docker stack deploy -c "$remote_compose_file" "$stack_name"; - then echo "failed to deploy stack" && exit 1; fi -else - cmd="ssh $ssh_key -t $ssh_user@$manager_node sudo env \$(cat $remote_config_file | xargs) docker stack deploy -c $remote_compose_file $stack_name" - echo "$cmd" - ($cmd) || (echo "failed to deploy stack" && exit 1) -fi - -# -# if --build-installers, then generate client installers, placing then in ./generated_installers -# if --installer-deploy-dir, then also copy installers to $installer_deploy_dir -# if --link-installers, then also link installers to version independent installer names for each platform -# -if [ "$build_installers" == "true" ]; then - # - # if --installer-deploy-dir, then copy the installers from ./generated_installers directory to the installer deploy directory - # - if [ ! -z "$installer_deploy_dir" ]; then - # vcell_siteCamel=Alpha - # vcell_version=7.0.0 - # vcell_build=19 - # version=7_0_0_19 - version=$(echo "${vcell_version}_${vcell_build}" | tr '.' _) - if ! cp "./generated_installers/VCell_${vcell_siteCamel}_windows-x64_${version}_64bit.exe" \ - "./generated_installers/VCell_${vcell_siteCamel}_windows-x64_${version}_64bit.dat"/* \ - "./generated_installers/VCell_${vcell_siteCamel}_unix_${version}_32bit.sh" \ - "./generated_installers/VCell_${vcell_siteCamel}_macos_${version}_64bit.dmg" \ - "./generated_installers/VCell_${vcell_siteCamel}_windows_${version}_32bit.exe" \ - "./generated_installers/VCell_${vcell_siteCamel}_unix_${version}_64bit.sh" \ - "./generated_installers/updates.xml" \ - "./generated_installers/output.txt" \ - "./generated_installers/md5sums" \ - "${installer_deploy_dir}"; - then - echo "failed to copy installers"; - exit 1; - fi - - # - # if --link-installers, then create symbolic links from permanent paths to most recent installers (for durable web urls). - # - if [ "$link_installers" == "true" ]; then - - pushd "${installer_deploy_dir}" || (echo "pushd to ${installer_deploy_dir} failed"; exit 1) - - if ! ln -sf "VCell_${vcell_siteCamel}_windows-x64_${version}_64bit.exe" \ - "VCell_${vcell_siteCamel}_windows-x64_latest_64bit.exe"; - then echo "failed to create symbolic link for Win64 installer"; exit 1; fi - - if ! ln -sf "VCell_${vcell_siteCamel}_unix_${version}_32bit.sh" \ - "VCell_${vcell_siteCamel}_unix_latest_32bit.sh"; - then echo "failed to create symbolic link for Linux32 installer"; exit 1; fi - - if ! ln -sf "VCell_${vcell_siteCamel}_macos_${version}_64bit.dmg" \ - "VCell_${vcell_siteCamel}_macos_latest_64bit.dmg"; - then echo "failed to create symbolic link for Macos installer"; exit 1; fi - - if ! ln -sf "VCell_${vcell_siteCamel}_windows_${version}_32bit.exe" \ - "VCell_${vcell_siteCamel}_windows_latest_32bit.exe"; - then echo "failed to create symbolic link for Win32 installer"; exit 1; fi - - if ! ln -sf "VCell_${vcell_siteCamel}_unix_${version}_64bit.sh" \ - "VCell_${vcell_siteCamel}_unix_latest_64bit.sh"; - then echo "failed to create symbolic link for Linux64 installer"; exit 1; fi - - popd || (echo "popd failed"; exit 1) - fi - fi -fi - - -echo "exited normally" - -exit 0 - - diff --git a/docker/swarm/serverconfig-uch.sh b/docker/swarm/serverconfig-uch.sh index 6f09306fd0..5b852ee94a 100755 --- a/docker/swarm/serverconfig-uch.sh +++ b/docker/swarm/serverconfig-uch.sh @@ -173,7 +173,7 @@ VCELL_SSH_CMD_RESTORE_TIMEOUT=5 # # write out the environment file to be for: -# 1. deployment actions (e.g. deploy-action.sh) +# 1. deployment actions (e.g. deploy-action-swarm.sh or deploy-action-kubernetes.sh) # 2. runtime environment for the docker stack run command # cat <"$_outputfile"