diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..20fddc72 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "src/tools/pyfirecrest"] + path = src/tools/pyfirecrest + url = https://github.com/ekouts/pyfirecrest.git diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 00000000..bdfd1c46 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,9 @@ +# Contributors + +- Aliaga, Tomas. ETH Zurich - CSCS +- Cruz, Felipe. ETH Zurich - CSCS +- Dabin, Alejandro. ETH Zurich - CSCS +- Dorsch, Juan Pablo. ETH Zurich - CSCS +- Klein, Mark. ETH Zurich - CSCS +- Koutsaniti, Eirini. ETH Zurich - CSCS +- Lezcano, Facundo. UNL-CONICET - CIMEC \ No newline at end of file diff --git a/ci/dev/Jenkinsfile b/ci/dev/Jenkinsfile new file mode 100644 index 00000000..6da2d87f --- /dev/null +++ b/ci/dev/Jenkinsfile @@ -0,0 +1,108 @@ +#!groovy +// +// Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +// +// Please, refer to the LICENSE file in the root directory. +// SPDX-License-Identifier: BSD-3-Clause +// +pipeline { + agent any + stages { + stage('Info') { + steps { + echo "Branch Name: ${env.BRANCH_NAME}" + echo "BUILD_NUMBER : ${env.BUILD_NUMBER}" + echo "BUILD_ID : ${env.BUILD_ID}" + echo "JOB_NAME: ${env.JOB_NAME}" + echo "BUILD_TAG : ${env.BUILD_TAG}" + echo "EXECUTOR_NUMBER : ${env.EXECUTOR_NUMBER}" + echo "NODE_NAME: ${env.NODE_NAME}" + echo "NODE_LABELS : ${env.NODE_LABELS}" + echo "WORKSPACE : ${env.WORKSPACE}" + echo "JENKINS_HOME : ${env.JENKINS_HOME}" + echo "GIT COMMIT: ${env.GIT_COMMIT}" + sh "docker --version" + sh "docker-compose --version" + sh "bash --version" + sh "git --version" + sh "pwd" + sh "ls -la" + sh "git status" + } + } + + stage('Refresh') { + when { + anyOf { + // For now, we use these branches to build everythig from scratch + // See refresh.sh + branch 'master'; + branch 'dev' + } + } + steps { + sh "ci/dev/refresh.sh" + } + } + + stage('Setup') { + steps { + sh "ci/dev/setup.sh" + } + } + + stage('Tests') { + steps { + sh "ci/dev/test.sh" + } + } + } + + post { + always { + script { + // Save the log files + try { + sh "mkdir -p /var/log/jenkins/jobs/${env.JOB_NAME}/builds/${env.BUILD_NUMBER}" + sh "cp -r deploy/test-build/logs/firecrest/* /var/log/jenkins/jobs/${env.JOB_NAME}/builds/${env.BUILD_NUMBER}/." + } catch (errCpLogs) { + echo 'Error while saving log files: ' + errCpLogs.toString() + } + } + } + + success { + script { + // Notify Github on success + withCredentials([string(credentialsId: 'firecrestci_access_token', variable: 'accessToken')]) { + sh 'curl -H "Authorization: token ' + "${accessToken}" + '" "https://api.github.com/repos/eth-cscs/firecrest/statuses/' + "${env.GIT_COMMIT}" + '" \\' + + '-H "Content-Type: application/json" \\' + + '-X POST \\' + + '-d "{\\"state\\": \\"success\\",\\"context\\": \\"continuous-integration/jenkins\\", \\"description\\": \\"Jenkins\\", \\"target_url\\": \\"' + "${env.BUILD_URL}" + '/console\\"}"' + } + } + } + + unsuccessful{ + script { + // Notify Github on failure + withCredentials([string(credentialsId: 'firecrestci_access_token', variable: 'accessToken')]) { + sh 'curl -H "Authorization: token ' + "${accessToken}" + '" "https://api.github.com/repos/eth-cscs/firecrest/statuses/' + "${env.GIT_COMMIT}" + '" \\' + + '-H "Content-Type: application/json" \\' + + '-X POST \\' + + '-d "{\\"state\\": \\"failure\\",\\"context\\": \\"continuous-integration/jenkins\\", \\"description\\": \\"Jenkins\\", \\"target_url\\": \\"' + "${env.BUILD_URL}" + '/console\\"}"' + } + } + } + + cleanup { + script { + try { + sh "ci/dev/clean.sh" + } catch (errCpLogs) { + echo 'Error while trying to clean: ' + errCpLogs.toString() + } + } + } + } +} \ No newline at end of file diff --git a/ci/dev/README.md b/ci/dev/README.md new file mode 100644 index 00000000..90e66c00 --- /dev/null +++ b/ci/dev/README.md @@ -0,0 +1,41 @@ +# FirecREST Testing + +## Requirements + +You can run the tests on any linux machine with... + +- [bash](https://www.gnu.org/software/bash/) >= `4` +- [docker](https://docs.docker.com/engine/install/) >= `20.10` +- [docker-compose](https://docs.docker.com/compose/install/) >= `1.28` + +## Usage + +Clone this repo and cd into it... + +``` +git clone https://github.com/eth-cscs/firecrest +cd firecrest +``` + +To run all tests for the first time simply run... + +``` +ci/dev/run.sh +``` + +Have a look at that script. It will setup and build everything from scratch and run ALL the dev tests. + +If you have already setup everything and just want to re-run some tests without recreating everything +from scratch, you can call the scripts that `ci/dev/test.sh` is calling. + +``` +ci/dev/test.sh +``` + +## Debugging + +If you want to re-test something specific, you can customize the docker calls of the end of `ci/dev/test.sh` +to make an ad-hoc pytest call. + +If the stdout information is not enough, remember to check either the logs in the logs folder created in `ci/dev/setup.sh` +or the container logs with [docker logs](https://docs.docker.com/engine/reference/commandline/logs/). \ No newline at end of file diff --git a/ci/dev/clean.sh b/ci/dev/clean.sh new file mode 100755 index 00000000..6a6a0a61 --- /dev/null +++ b/ci/dev/clean.sh @@ -0,0 +1,16 @@ +#!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +set -euo pipefail + +echo "starting" $0 +WORKSPACE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)/../../ + +rm -rfv ${WORKSPACE}/deploy/test-build/logs/firecrest/* || true +docker-compose -f ${WORKSPACE}/deploy/test-build/docker-compose.yml down -v + +echo "finished" $0 \ No newline at end of file diff --git a/ci/dev/refresh.sh b/ci/dev/refresh.sh new file mode 100755 index 00000000..b0bc73c4 --- /dev/null +++ b/ci/dev/refresh.sh @@ -0,0 +1,25 @@ +#!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +set -euo pipefail + +echo "starting" $0 +WORKSPACE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)/../../ + +echo "removing potential leftovers" +rm -rfv ${WORKSPACE}/deploy/test-build/logs/firecrest || true +docker-compose -f ${WORKSPACE}/deploy/test-build/docker-compose.yml down -v --rmi all --remove-orphans || true +docker rmi f7t-base f7t-tester || echo "no base image to delete, no problem!" + +echo "building images from scratch (no caches)" +# Building from scratch is slower, but prevents cache-invalidation issues, typical of cached CI machines. +# You may want to do this only once a while, not for every feature branch. +docker build -f ${WORKSPACE}/deploy/docker/base/Dockerfile -t f7t-base --no-cache --pull ${WORKSPACE} +docker build -f ${WORKSPACE}/deploy/docker/tester/Dockerfile -t f7t-tester --no-cache --pull ${WORKSPACE} +docker-compose -f ${WORKSPACE}/deploy/test-build/docker-compose.yml build --no-cache + +echo "finished" $0 diff --git a/ci/dev/retest.sh b/ci/dev/retest.sh new file mode 100755 index 00000000..3b495707 --- /dev/null +++ b/ci/dev/retest.sh @@ -0,0 +1,29 @@ +#!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +set -euo pipefail + +echo "starting" $0 +WORKSPACE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)/../../ + +echo "sleeping..." +sleep 120 + +# We start with the reservation tests because other tests still need a proper cleanup step. +echo "running reservation tests..." +docker run --rm -u $(id -u):$(id -g) -v ${WORKSPACE}:/firecrest --network f7t-frontend f7t-tester bash \ + -c 'pytest -m reservations -c test-build.ini' + +echo "running unit tests..." +docker run --rm -u $(id -u):$(id -g) -v ${WORKSPACE}:/firecrest --network f7t-frontend f7t-tester bash \ + -c 'pytest -m "not reservations" -c test-build.ini unit' + +echo "running integration tests..." +docker run --rm -u $(id -u):$(id -g) -v ${WORKSPACE}:/firecrest --network f7t-frontend f7t-tester bash \ + -c 'pytest -m "not reservations" -c test-build.ini integration' + +echo "finished" $0 \ No newline at end of file diff --git a/ci/dev/run.sh b/ci/dev/run.sh new file mode 100755 index 00000000..6d12cc1b --- /dev/null +++ b/ci/dev/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +set -euo pipefail + +echo "starting" $0 +WORKSPACE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) + +$WORKSPACE/refresh.sh +$WORKSPACE/setup.sh +$WORKSPACE/test.sh + +echo "finished" $0 \ No newline at end of file diff --git a/ci/dev/setup.sh b/ci/dev/setup.sh new file mode 100755 index 00000000..3749c403 --- /dev/null +++ b/ci/dev/setup.sh @@ -0,0 +1,26 @@ +#!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +set -euo pipefail + +echo "starting" $0 +WORKSPACE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)/../../ + +echo "prepare fresh logs folder..." +mkdir -pv ${WORKSPACE}/deploy/test-build/logs/firecrest +chmod 775 ${WORKSPACE}/deploy/test-build/logs/firecrest + +echo "adjusting keys..." +chmod 400 ${WORKSPACE}/deploy/test-build/environment/keys/ca-key +chmod 400 ${WORKSPACE}/deploy/test-build/environment/keys/user-key + +echo "building images (with caches)" +docker build -f ${WORKSPACE}/deploy/docker/base/Dockerfile -t f7t-base ${WORKSPACE} +docker build -f ${WORKSPACE}/deploy/docker/tester/Dockerfile -t f7t-tester ${WORKSPACE} +docker-compose -f ${WORKSPACE}/deploy/test-build/docker-compose.yml build + +echo "finished" $0 \ No newline at end of file diff --git a/ci/dev/test.sh b/ci/dev/test.sh new file mode 100755 index 00000000..9f8b4391 --- /dev/null +++ b/ci/dev/test.sh @@ -0,0 +1,35 @@ +#!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +set -euo pipefail + +echo "starting" $0 +WORKSPACE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)/../../ + +${WORKSPACE}/ci/dev/clean.sh + +echo "starting containers..." +docker-compose -f ${WORKSPACE}/deploy/test-build/docker-compose.yml up --build -d + +# TODO: Complete the missing endpoints (readinessProbe like) to allow this kind of wait +# and remove the sleeps from retest.sh +# echo "waiting for Firecrest stack to be ready..." +# attempts=0 +# while [[ "$attempts" -lt 9 && "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:9000)" == "000" ]]; do +# let "attempts+=1" +# echo "API NOT ready, next attempt in 10 seconds" +# sleep 10 +# done +# if [[ "$attempts" -ge 9 ]]; then +# echo "TIMEOUT waiting API. Shutting down cluster..." +# docker-compose -f ${WORKSPACE}/deploy/test-build/docker-compose.yml down -v +# exit 1 +# fi + +${WORKSPACE}/ci/dev/retest.sh + +echo "finished" $0 \ No newline at end of file diff --git a/ci/pre-prod/Jenkinsfile b/ci/pre-prod/Jenkinsfile new file mode 100644 index 00000000..6cf49da8 --- /dev/null +++ b/ci/pre-prod/Jenkinsfile @@ -0,0 +1,239 @@ +#!groovy + +node { + def gitHubUser = 'eth-cscs' + def gitHubRepo = 'firecrest' + + // $after contains the commit id obtained trough github webhook + def longCommit = "$after" + def shortCommit = longCommit.substring(0, 7) + + try { + stage('Checkout source code') { + echo 'Checking out source code' + + git branch: 'master', + url: 'https://github.com/' + gitHubUser + '/' + gitHubRepo + '.git' + + // checkout the specified commit + sh('git checkout ' + longCommit) + } + + stage('Print Env After source checkout') { + echo "Branch Name: ${env.BRANCH_NAME}" + echo "BUILD_NUMBER : ${env.BUILD_NUMBER}" + echo "BUILD_ID : ${env.BUILD_ID}" + echo "JOB_NAME: ${env.JOB_NAME}" + echo "BUILD_TAG : ${env.BUILD_TAG}" + echo "EXECUTOR_NUMBER : ${env.EXECUTOR_NUMBER}" + echo "NODE_NAME: ${env.NODE_NAME}" + echo "NODE_LABELS : ${env.NODE_LABELS}" + echo "WORKSPACE : ${env.WORKSPACE}" + echo "JENKINS_HOME : ${env.JENKINS_HOME}" + } + + stage('Sync ansible playbooks from github repo') { + + ansibleTowerProjectSync( + towerServer: 'awx-local', + async: false, + importTowerLogs: true, + project: 'Pre-Prod Project', + removeColor: false, + throwExceptionWhenFail: true, + verbose: false + ) + } + + stage('Build firecrest images in registry') { + + ansibleTower( + towerServer: 'awx-local', + templateType: 'job', + jobTemplate: 'Pre-Prod Docker Build', + towerLogLevel: 'full', + removeColor: false, + verbose: true, + extraVars: """ + build_tag: $shortCommit + commit_id: $longCommit + """, + async: false, + throwExceptionWhenFail: true + ) + } + + /* + // This should be executed once from awx itself + // since the test server is a permanent host (not dynamically created) + stage('Provisioning of test environment') { + + // Install docker, python etc + ansibleTower( + towerServer: 'awx-local', + templateType: 'job', + jobTemplate: 'Test server provisioning', + towerLogLevel: 'full', + removeColor: false, + verbose: true + async: false + ) + }*/ + + stage('Deploy firecrest in test environment') { + + ansibleTower( + towerServer: 'awx-local', + templateType: 'job', + jobTemplate: 'Deploy firecrest demo', + towerLogLevel: 'full', + removeColor: false, + verbose: true, + extraVars: """ + docker_registry_host: 148.187.97.229:5000 + build_tag: $shortCommit + commit_id: $longCommit + """, + async: false, + throwExceptionWhenFail: true + ) + } + + sh '''#!/bin/sh + sleep 300 + echo "Waiting for containers to get ready..." + ''' + + stage('Run tests against test environment') { + + ansibleTower( + towerServer: 'awx-local', + templateType: 'job', + jobTemplate: 'Run firecrest automated tests', + towerLogLevel: 'full', + removeColor: false, + verbose: true, + async: false, + throwExceptionWhenFail: true + ) + } + + } + catch (e) { + // If there was an exception, the build failed + currentBuild.result = 'FAILED' + throw e + } + finally { + + stage('Fetch firecrest log files') { + + ansibleTower( + towerServer: 'awx-local', + templateType: 'job', + jobTemplate: 'Save firecrest demo deploy log files', + towerLogLevel: 'full', + removeColor: false, + verbose: true, + async: false, + throwExceptionWhenFail: false + ) + } + + // Save Log files + try { + sh "mkdir -p /var/log/jenkins/jobs/${env.JOB_NAME}/builds/${env.BUILD_NUMBER}" + + // retrieve the full path of logs folder inside awx container + // since 'docker cp' command does not allow wildcards + LOGS_PATH_INSIDE_AWX_CONTAINER = sh ( + script: 'docker exec awx_task bash -c "echo /tmp/firecrest_demo_deploy_logs/*/home/centos/firecrest/deploy/demo/logs/firecrest/."', + returnStdout: true + ).trim() + echo "Logs path inside awx_task container: ${LOGS_PATH_INSIDE_AWX_CONTAINER}" + + // copy log files from awx_task container to the job log folder + sh "docker cp awx_task:${LOGS_PATH_INSIDE_AWX_CONTAINER} /var/log/jenkins/jobs/${env.JOB_NAME}/builds/${env.BUILD_NUMBER}/." + + // remove log files in awx container + sh "docker exec awx_task rm -rf ${LOGS_PATH_INSIDE_AWX_CONTAINER.substring(0, LOGS_PATH_INSIDE_AWX_CONTAINER.length()-2)}" + + } catch (errCpLogs) { + echo 'Error while saving log files: ' + errCpLogs.toString() + } + + stage('Stop and remove firecrest containers in test server') { + + ansibleTower( + towerServer: 'awx-local', + templateType: 'job', + jobTemplate: 'Stop and remove firecrest containers', + towerLogLevel: 'full', + removeColor: false, + verbose: true, + async: false, + throwExceptionWhenFail: false + ) + } + + notifyBuildStatusToGitHub(currentBuild.result, gitHubUser, gitHubRepo, longCommit) + } + + if (currentBuild.result == 'FAILED') { + return + } + + stage('Ask for commit promotion') { + + def INPUT_ID = 'Proceed1' + def msg = "" + slackSend(color: 'good', message: msg) + + def userInput = false + timeout(time: 7, unit: 'DAYS') { + userInput = input(id: INPUT_ID, message: "Promote commit $shortCommit", parameters: [[$class: 'BooleanParameterDefinition', defaultValue: false, description: '', name: 'Do you accept?']]) + } + + echo 'userInput: ' + userInput + if (userInput == true) { + echo 'Promotion accepted' + stage('Tag & Push firecrest images to firecrest-preprod and firecrest-tds') { + // Run template to tag docker images + ansibleTower( + towerServer: 'awx-local', + templateType: 'job', + jobTemplate: 'Tag docker images', + towerLogLevel: 'full', + inventory: 'docker_registry', + removeColor: false, + verbose: true, + credential: 'docker_registry_credentials', + extraVars: """ + commit_id: $shortCommit + """, + async: false + ) + } + } else { + echo 'Promotion was rejected.' + } + } + +} + +def notifyBuildStatusToGitHub(String buildStatus, String gitHubUser, String gitHubRepo, String longCommit) { + buildStatus = buildStatus ?: 'SUCCESS' + def status = '' + if (buildStatus == 'SUCCESS') { + status = 'success' + } else { + status = 'failure' + } + + withCredentials([string(credentialsId: 'firecrestci_access_token', variable: 'accessToken')]) { + sh 'curl -H "Authorization: token ' + "${accessToken}" + '" "https://api.github.com/repos/' + gitHubUser + '/' + gitHubRepo + '/statuses/' + longCommit + '" \\' + + '-H "Content-Type: application/json" \\' + + '-X POST \\' + + '-d "{\\"state\\": \\"' + status + '\\",\\"context\\": \\"continuous-integration/jenkins\\", \\"description\\": \\"Jenkins\\", \\"target_url\\": \\"' + "${env.BUILD_URL}" + '/console\\"}"' + } +} diff --git a/ci/pre-prod/build_image_role.yml b/ci/pre-prod/build_image_role.yml new file mode 100644 index 00000000..55188c66 --- /dev/null +++ b/ci/pre-prod/build_image_role.yml @@ -0,0 +1,12 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +--- +- name: Build firecrest microservices images + hosts: all + gather_facts: No + roles: + - { role: build_image_role } diff --git a/ci/pre-prod/build_image_role/tasks/main.yml b/ci/pre-prod/build_image_role/tasks/main.yml new file mode 100644 index 00000000..f5cf2ed3 --- /dev/null +++ b/ci/pre-prod/build_image_role/tasks/main.yml @@ -0,0 +1,95 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +- name: create build directory + file: + path: /home/firecrest/awx-firecrest-build + state: directory + owner: firecrest + group: firecrest + mode: "0755" + +- name: Clone firecrest git repository + git: + repo: "https://github.com/eth-cscs/firecrest.git" + dest: /home/firecrest/awx-firecrest-build + version: "{{ commit_id }}" + +- name: Build base image with build tag + docker_image: + name: "localhost:5000/f7t-base:{{ build_tag }}" + build: + path: /home/firecrest/awx-firecrest-build + dockerfile: ./deploy/docker/base/Dockerfile + pull: yes + source: build + state: present + push: yes + +# TODO: Building because tagging didn't seem to be properly tagging +- name: Build base image with latest tag + docker_image: + name: "localhost:5000/f7t-base:latest" + build: + path: /home/firecrest/awx-firecrest-build + dockerfile: ./deploy/docker/base/Dockerfile + pull: yes + source: build + state: present + push: yes + +- name: Build base image locally + docker_image: + name: "f7t-base" + build: + path: /home/firecrest/awx-firecrest-build + dockerfile: ./deploy/docker/base/Dockerfile + pull: yes + source: build + state: present + +- name: Build container image + docker_image: + name: "localhost:5000/{{ item.key }}:{{ build_tag }}" + build: + path: /home/firecrest/awx-firecrest-build + dockerfile: ./deploy/docker/{{ item.key }}/Dockerfile + # Pull no, because base image is built locally for now + pull: no + source: build + state: present + push: yes + with_dict: "{{ image_definitions }}" + +- name: Build certificator image + docker_image: + name: "localhost:5000/certificator:{{ build_tag }}" + build: + path: /home/firecrest/awx-firecrest-build + dockerfile: ./deploy/docker/certificator/Dockerfile + pull: no + source: build + state: present + push: yes + +- name: build client image + docker_image: + name: "localhost:5000/client:{{ build_tag }}" + build: + path: /home/firecrest/awx-firecrest-build/src/tests/template_client + dockerfile: ./Dockerfile + pull: yes + source: build + state: present + push: yes + +- name: Pull redis and push to local registry + docker_image: + name: redis:5 + repository: "localhost:5000/taskpersistence:{{ build_tag }}" + push: yes + source: pull + diff --git a/ci/pre-prod/build_image_role/vars/main.yml b/ci/pre-prod/build_image_role/vars/main.yml new file mode 100644 index 00000000..dac33e97 --- /dev/null +++ b/ci/pre-prod/build_image_role/vars/main.yml @@ -0,0 +1,21 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +image_definitions: + compute: + build_path: /home/firecrest/awx-firecrest-build + reservations: + build_path: /home/firecrest/awx-firecrest-build + status: + build_path: /home/firecrest/awx-firecrest-build + storage: + build_path: /home/firecrest/awx-firecrest-build + tasks: + build_path: /home/firecrest/awx-firecrest-build + utilities: + build_path: /home/firecrest/awx-firecrest-build + openapi: + build_path: /home/firecrest/awx-firecrest-build diff --git a/ci/pre-prod/deploy_demo_playbook.yml b/ci/pre-prod/deploy_demo_playbook.yml new file mode 100644 index 00000000..a49d919c --- /dev/null +++ b/ci/pre-prod/deploy_demo_playbook.yml @@ -0,0 +1,278 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## + +--- +- name: Deploy Firecrest + gather_facts: No + hosts: all + vars: + firecrest_dir: "/home/centos/firecrest" + + tasks: + + - name: Clean firecrest deploy directory + file: + state: absent + path: "{{ firecrest_dir }}/" + + - name: create firecrest directory + file: + path: "{{ firecrest_dir }}" + state: directory + owner: centos + group: centos + mode: "0755" + + - name: Clone firecrest git repository + git: + repo: "https://github.com/eth-cscs/firecrest.git" + dest: "{{ firecrest_dir }}" + version: "{{ commit_id }}" + + - name: Set ssh keys permissions + shell: chmod 400 ca-key user-key + args: + chdir: "{{ firecrest_dir }}/deploy/test-build/environment/keys" + + - name: Create containers network + community.general.docker_network: + name: firecrest-internal + driver: "bridge" + ipam_config: + - subnet: "192.168.220.0/24" + + - name: Run Taskpersistence + community.general.docker_container: + name: taskpersistence + #image: "{{ docker_registry_host }}/taskpersistence:{{build_tag}}" + image: "redis:5" + command: redis-server /redis.conf + volumes: + - "{{ firecrest_dir }}/deploy/demo/taskpersistence/redis.conf:/redis.conf:ro" + - "{{ firecrest_dir }}/deploy/demo/taskpersistence-data:/data:delegated" + - "{{ firecrest_dir }}/deploy/demo/logs:/var/log:delegated" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.13" + # force to recreate the container + recreate: yes + + - name: Run Tasks + community.general.docker_container: + name: tasks + image: "{{ docker_registry_host }}/tasks:{{build_tag}}" + env_file: "{{ firecrest_dir }}/deploy/demo/common/common.env" + env: + F7T_PERSIST_PORT: "6379" + F7T_PERSIST_PWD: "rediS2200" + F7T_DEBUG_MODE: "True" + F7T_COMPUTE_TASK_EXP_TIME: "86400" + F7T_STORAGE_TASK_EXP_TIME: "2678400" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.6" + published_ports: + - "5003:5003" + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/firecrest:/var/log:delegated" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" + # force to recreate the container + recreate: yes + + - name: Run Certificator + community.general.docker_container: + name: certificator + image: "{{ docker_registry_host }}/certificator:{{build_tag}}" + env_file: "{{ firecrest_dir }}/deploy/demo/common/common.env" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.11" + published_ports: + - "5010:5010" + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/firecrest:/var/log:delegated" + - "{{ firecrest_dir }}/deploy/test-build/environment/keys/user-key.pub:/user-key.pub:ro" + - "{{ firecrest_dir }}/deploy/test-build/environment/keys/ca-key:/ca-key:ro" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" + # force to recreate the container + recreate: yes + + + - name: Run Compute + community.general.docker_container: + name: compute + image: "{{ docker_registry_host }}/compute:{{build_tag}}" + env_file: "{{ firecrest_dir }}/deploy/demo/common/common.env" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.9" + published_ports: + - "5006:5006" + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/firecrest:/var/log:delegated" + - "{{ firecrest_dir }}/deploy/test-build/environment/keys/user-key:/user-key:ro" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" + # force to recreate the container + recreate: yes + + - name: Run Reservations + community.general.docker_container: + name: reservations + image: "{{ docker_registry_host }}/reservations:{{build_tag}}" + env_file: "{{ firecrest_dir }}/deploy/demo/common/common.env" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.8" + published_ports: + - "5005:5005" + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/firecrest:/var/log:delegated" + - "{{ firecrest_dir }}/deploy/test-build/environment/keys/user-key:/user-key:ro" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" + # force to recreate the container + recreate: yes + + - name: Run Status + community.general.docker_container: + name: status + image: "{{ docker_registry_host }}/status:{{build_tag}}" + env_file: "{{ firecrest_dir }}/deploy/demo/common/common.env" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.4" + published_ports: + - "5001:5001" + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/firecrest:/var/log:delegated" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" + # force to recreate the container + recreate: yes + + - name: Run Storage + community.general.docker_container: + name: storage + image: "{{ docker_registry_host }}/storage:{{build_tag}}" + env_file: "{{ firecrest_dir }}/deploy/demo/common/common.env" + env: + F7T_S3_URL: "http://192.168.220.19:9000" + F7T_S3_ACCESS_KEY: "storage_access_key" + F7T_S3_SECRET_KEY: "storage_secret_key" + F7T_STORAGE_POLLING_INTERVAL: "60" + F7T_CERT_CIPHER_KEY: 'Df6UZuoPoJ2u5yRwxNfFQ46Nwy8eW1OGTcuhlqn4ONo=' + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.5" + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/firecrest:/var/log:delegated" + - "{{ firecrest_dir }}/deploy/test-build/environment/keys/user-key:/user-key:ro" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" + # force to recreate the container + recreate: yes + + - name: Run Utilities + community.general.docker_container: + name: utilities + image: "{{ docker_registry_host }}/utilities:{{build_tag}}" + env_file: "{{ firecrest_dir }}/deploy/demo/common/common.env" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.7" + published_ports: + - "5004:5004" + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/firecrest:/var/log:delegated" + - "{{ firecrest_dir }}/deploy/test-build/environment/keys/user-key:/user-key:ro" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" + # force to recreate the container + recreate: yes + + - name: Build demo cluster image + community.general.docker_image: + name: demo_cluster + build: + path: "{{ firecrest_dir }}/deploy/test-build" + dockerfile: ./cluster/Dockerfile + source: build + state: present + # force to rebuild image + force_source: yes + + - name: Run demo cluster + community.general.docker_container: + name: cluster + image: "demo_cluster" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.12" + hostname: cluster + volumes: + - "{{ firecrest_dir }}/deploy/demo/logs/cluster/:/var/log/slurm/:delegated" + # force to recreate the container + recreate: yes + + - name: Keycloack + community.general.docker_container: + name: fckeycloak + image: "jboss/keycloak:4.8.3.Final" + env_file: "{{ firecrest_dir }}/deploy/demo/keycloak/keycloak.env" + env: + KEYCLOAK_IMPORT: "/var/tmp/config.json" + Dkeycloak.migration.realmName: "kcrealm" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.20" + published_ports: + - "{{ lookup('env', 'KEYCLOAK_PORT') | default('8080', True) }}:8080" + + volumes: + - "{{ firecrest_dir }}/deploy/demo/keycloak/config.json:/var/tmp/config.json:ro" + - "{{ firecrest_dir }}/deploy/demo/logs/keycloak:/opt/jboss/keycloak/standalone/log/:delegated" + + - name: Kong + community.general.docker_container: + name: kong + image: "kong:latest" + env: + KONG_DATABASE: "off" + KONG_DECLARATIVE_CONFIG: "/kong.yml" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.21" + published_ports: + - "8000:8000" + volumes: + - "{{ firecrest_dir }}/deploy/demo/kong/kong.yml:/kong.yml:ro" + + - name: Minio + community.general.docker_container: + name: minio + image: "minio/minio" + command: minio server /data + env: + MINIO_ACCESS_KEY: "storage_access_key" + MINIO_SECRET_KEY: "storage_secret_key" + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.19" + published_ports: + - "9000:9000" + volumes: + - "{{ firecrest_dir }}/deploy/demo/minio:/data:delegated" + + - name: Opa + community.general.docker_container: + name: opa + image: "openpolicyagent/opa:0.22.0" + command: run --server --log-level=debug --log-format=json-pretty --tls-cert-file=/ssl/f7t_internal.crt --tls-private-key-file=/ssl/f7t_internal.key /opa-files/data.json /opa-files/policy.rego + networks: + - name: firecrest-internal + ipv4_address: "192.168.220.40" + published_ports: + - "8181:8181" + volumes: + - "{{ firecrest_dir }}/deploy/demo/opa:/opa-files" + - "{{ firecrest_dir }}/deploy/demo/ssl:/ssl" \ No newline at end of file diff --git a/ci/pre-prod/provision_test_server.yml b/ci/pre-prod/provision_test_server.yml new file mode 100644 index 00000000..9dd2b665 --- /dev/null +++ b/ci/pre-prod/provision_test_server.yml @@ -0,0 +1,67 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +--- +- name: Install docker + gather_facts: Yes + hosts: all + + tasks: + - name: Install yum utils + yum: + name: yum-utils + state: latest + + - name: Install device-mapper-persistent-data + yum: + name: device-mapper-persistent-data + state: latest + + - name: Install lvm2 + yum: + name: lvm2 + state: latest + + - name: Add Docker repo + get_url: + url: https://download.docker.com/linux/centos/docker-ce.repo + dest: /etc/yum.repos.d/docer-ce.repo + become: yes + + - name: Install Docker + package: + name: docker-ce + state: latest + become: yes + + - name: Start Docker service + service: + name: docker + state: started + enabled: yes + become: yes + + - name: Install Docker Compose + get_url: + url: https://github.com/docker/compose/releases/download/1.26.0/docker-compose-{{ ansible_system }}-{{ ansible_userspace_architecture }} + dest: /usr/local/bin/docker-compose + mode: 'u+x,g+x' + #mode: 0755 + + - name: Install additional packages + yum: + name: + - epel-release + - python2-pip + - python3-pip + - git + state: present + + - name: Install python packages with pip + pip: + name: + - docker + state: present \ No newline at end of file diff --git a/ci/pre-prod/remove_demo_containers.yml b/ci/pre-prod/remove_demo_containers.yml new file mode 100644 index 00000000..7594648e --- /dev/null +++ b/ci/pre-prod/remove_demo_containers.yml @@ -0,0 +1,57 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## + + +--- +- name: Remove firecrest containers + gather_facts: No + hosts: all + vars: + firecrest_dir: "/home/centos/firecrest" + + tasks: + - name: Stop and remove containers + docker_container: + name: "{{ item }}" + state: absent + with_items: + - taskpersistence + - tasks + - certificator + - compute + - reservations + - status + - storage + - utilities + - cluster + - fckeycloak + - kong + - opa + - minio + + - name: Delete network demo_firecrest-internal + community.general.docker_network: + name: demo_firecrest-internal + state: absent + force: yes + + - name: Delete network firecrest-internal + community.general.docker_network: + name: firecrest-internal + state: absent + force: yes + + - name: Delete network demo_default + community.general.docker_network: + name: demo_default + state: absent + force: yes + + - name: Clean firecrest deploy folder + file: + state: absent + path: "{{ firecrest_dir }}/" \ No newline at end of file diff --git a/ci/pre-prod/run_tests.yml b/ci/pre-prod/run_tests.yml new file mode 100644 index 00000000..e16cb563 --- /dev/null +++ b/ci/pre-prod/run_tests.yml @@ -0,0 +1,43 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +--- +- name: Run tests + gather_facts: no + hosts: all + vars: + # This play will have to start using containerized tests soon, as done in dev + firecrest_tester_dir: "/home/centos/firecrest/deploy/docker/tester" + firecrest_tests_dir: "/home/centos/firecrest/src/tests/automated_tests" + pytest_bin: "/home/centos/.local/bin" + + # Shell module always looks for commands at /bin/sh + # We have to set the full path for pytest in shell tasks. + # Setting PATH variable doesn't work in this case... + #environment: + # PATH: "{{ ansible_env.PATH }}:/home/centos/.local/bin" + tasks: + - name: Install pytest modules + shell: pip3 install --user -r requirements.txt + args: + chdir: "{{ firecrest_tester_dir }}" + + - name: Run unit tests (marker='reservations') + shell: "{{ pytest_bin }}/pytest -m 'reservations' -c demo.ini unit" + args: + chdir: "{{ firecrest_tests_dir }}" + + - name: Run unit tests (marker='not reservations') + shell: "{{ pytest_bin }}/pytest -m 'not reservations' -c demo.ini unit" + args: + chdir: "{{ firecrest_tests_dir }}" + + + - name: Run integration tests + shell: "{{ pytest_bin }}/pytest -m 'not reservations' -c demo.ini integration" + args: + chdir: "{{ firecrest_tests_dir }}" + diff --git a/ci/pre-prod/save_log_files.yml b/ci/pre-prod/save_log_files.yml new file mode 100644 index 00000000..93e4b6c4 --- /dev/null +++ b/ci/pre-prod/save_log_files.yml @@ -0,0 +1,41 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +--- + +# Log files will be copied from testing server to the logs_dest_dir folder located +# at master node (the awx container) + +- name: Save log files of firecrest demo deploy + gather_facts: No + hosts: all + vars: + firecrest_dir: "/home/centos/firecrest" + logs_dest_dir: "/tmp/firecrest_demo_deploy_logs" #temp folder in awx_tasks container + + tasks: + - name: fetch log files + fetch: + src: "{{ firecrest_dir }}/deploy/demo/logs/firecrest/{{ item }}.log" + dest: "{{ logs_dest_dir }}" + register: fetch_output + loop: + - compute + - utilities + - certificator + - status + - tasks + - storage + - reservations + + - debug: var=fetch_output + + # Synchronizes: src an dest are folders in the remote host + #- name: Synchronization + # ansible.posix.synchronize: + # mode: pull + # src: "{{ firecrest_dir }}/deploy/demo/logs/firecrest" + # dest: "/home/centos/tmp/" \ No newline at end of file diff --git a/ci/pre-prod/tag_image_role.yml b/ci/pre-prod/tag_image_role.yml new file mode 100644 index 00000000..895619a8 --- /dev/null +++ b/ci/pre-prod/tag_image_role.yml @@ -0,0 +1,12 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +--- +- name: tag firecrest microservices images + hosts: all + gather_facts: No + roles: + - { role: tag_image_role } \ No newline at end of file diff --git a/ci/pre-prod/tag_image_role/tasks/main.yml b/ci/pre-prod/tag_image_role/tasks/main.yml new file mode 100644 index 00000000..21cf5487 --- /dev/null +++ b/ci/pre-prod/tag_image_role/tasks/main.yml @@ -0,0 +1,15 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## + +- name: Add firecrest-tds tag to firecrest image + docker_image: + name: "localhost:5000/{{ item.key }}:{{ commit_id }}" + repository: "localhost:5000/{{ item.key }}:firecrest-tds" + force_tag: yes + push: yes + source: local + with_dict: "{{ image_definitions }}" \ No newline at end of file diff --git a/ci/pre-prod/tag_image_role/vars/main.yml b/ci/pre-prod/tag_image_role/vars/main.yml new file mode 100644 index 00000000..f35e1d97 --- /dev/null +++ b/ci/pre-prod/tag_image_role/vars/main.yml @@ -0,0 +1,19 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +image_definitions: + compute: + reservations: + status: + storage: + tasks: + utilities: + openapi: + client: + taskpersistence: + #minio: + + diff --git a/deploy/demo/common/common.env b/deploy/demo/common/common.env index 65117c3f..a907e875 100644 --- a/deploy/demo/common/common.env +++ b/deploy/demo/common/common.env @@ -21,6 +21,8 @@ F7T_REALM_RSA_TYPE=RS256 F7T_FIRECREST_SERVICE='firecrest.some.place' # AUTHENTICATION ROLE for FirecREST Service Accounts F7T_AUTH_ROLE='firecrest-sa' +# DEBUG FLAG +F7T_DEBUG_MODE=True #------- # microservices IPs F7T_CERTIFICATOR_IP=192.168.220.11 @@ -31,6 +33,7 @@ F7T_STATUS_IP=192.168.220.4 F7T_STORAGE_IP=192.168.220.5 F7T_TASKS_IP=192.168.220.6 F7T_UTILITIES_IP=192.168.220.7 +F7T_RESERVATIONS_IP=192.168.220.8 #----- ports: F7T_CERTIFICATOR_PORT=5010 F7T_COMPUTE_PORT=5006 @@ -38,18 +41,20 @@ F7T_TASKS_PORT=5003 F7T_STATUS_PORT=5001 F7T_STORAGE_PORT=5002 F7T_UTILITIES_PORT=5004 +F7T_RESERVATIONS_PORT=5005 #------- # microservices urls: used by Kong and between microservices -F7T_CERTIFICATOR_URL=http://192.168.220.11:5010 -F7T_COMPUTE_URL=http://192.168.220.9:5006 -F7T_STATUS_URL=http://192.168.220.4:5001 -F7T_STORAGE_URL=http://192.168.220.5:5002 -F7T_TASKS_URL=http://192.168.220.6:5003 -F7T_UTILITIES_URL=http://192.168.220.7:5004 +F7T_CERTIFICATOR_URL=https://192.168.220.11:5010 +F7T_COMPUTE_URL=https://192.168.220.9:5006 +F7T_STATUS_URL=https://192.168.220.4:5001 +F7T_STORAGE_URL=https://192.168.220.5:5002 +F7T_TASKS_URL=https://192.168.220.6:5003 +F7T_UTILITIES_URL=https://192.168.220.7:5004 +F7T_RESERVATIONS_URL=https://192.168.220.8:5005 # kong_url: used by microservices when return URL to clients F7T_KONG_URL=http://192.168.220.10:8000 #------- -# list of systems +# list of systems #public name for systems, where users except to submit jobs and get files (list with ';') F7T_SYSTEMS_PUBLIC='cluster;cluster' # filesystems mounted in each system @@ -78,12 +83,14 @@ F7T_STORAGE_MAX_FILE_SIZE=5120 F7T_OBJECT_STORAGE='s3v4' # partition for internal transfer F7T_XFER_PARTITION=xfer +# set if account is needed for SLURM job submission +F7T_USE_SLURM_ACCOUNT=True # machine for external transfers F7T_EXT_TRANSFER_MACHINE_PUBLIC='cluster' F7T_EXT_TRANSFER_MACHINE_INTERNAL='192.168.220.12:22' #------- # STATUS: microservices & systems to pool: -F7T_STATUS_SERVICES='certificator;utilities;compute;tasks;storage' +F7T_STATUS_SERVICES='certificator;utilities;compute;tasks;storage;reservations' F7T_STATUS_SYSTEMS='192.168.220.12:22;192.168.220.12:22' #------- # UTILITIES: max size of file for download/upload from filesystem in MB @@ -92,7 +99,7 @@ F7T_UTILITIES_MAX_FILE_SIZE=5 F7T_UTILITIES_TIMEOUT=5 #------ # if enabled FirecREST sends a certificate as command, requires a serverside ssh ForceCommand wrapper -#F7T_SSH_CERTIFICATE_WRAPPER= +F7T_SSH_CERTIFICATE_WRAPPER=True #------ # KONG internal URLs for services F7T_KONG_COMPUTE_URL=http://192.168.220.9:5006 @@ -100,9 +107,13 @@ F7T_KONG_STATUS_URL=http://192.168.220.4:5001 F7T_KONG_STORAGE_URL=http://192.168.220.5:5002 F7T_KONG_TASKS_URL=http://192.168.220.6:5003 F7T_KONG_UTILITIES_URL=http://192.168.220.7:5004 +F7T_KONG_RESERVATIONS_URL=http://192.168.220.8:5005 #------ -#F7T_DEBUG_MODE=True # OPA Vars F7T_OPA_USE=True -F7T_OPA_URL=http://192.168.220.40:8181 -F7T_POLICY_PATH=v1/data/f7t/authz \ No newline at end of file +F7T_OPA_URL=https://192.168.220.40:8181 +F7T_POLICY_PATH=v1/data/f7t/authz +# SSL vars +F7T_USE_SSL=True +F7T_SSL_CRT=/ssl/f7t_internal.crt +F7T_SSL_KEY=/ssl/f7t_internal.key \ No newline at end of file diff --git a/deploy/demo/demo_client/config.py b/deploy/demo/demo_client/config.py index 430fdba4..32a24c16 100644 --- a/deploy/demo/demo_client/config.py +++ b/deploy/demo/demo_client/config.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/deploy/demo/docker-compose.yml b/deploy/demo/docker-compose.yml index f24fdb33..65f12344 100644 --- a/deploy/demo/docker-compose.yml +++ b/deploy/demo/docker-compose.yml @@ -1,3 +1,9 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## version: '3.4' networks: @@ -28,6 +34,7 @@ services: - ./logs/firecrest:/var/log:delegated - ../test-build/environment/keys/user-key.pub:/user-key.pub:ro - ../test-build/environment/keys/ca-key:/ca-key:ro + - ./ssl:/ssl compute: container_name: compute @@ -45,6 +52,7 @@ services: volumes: - ./logs/firecrest:/var/log:delegated - ../test-build/environment/keys/user-key:/user-key:ro + - ./ssl:/ssl status: container_name: status @@ -61,6 +69,7 @@ services: - 5001:5001 volumes: - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl storage: container_name: storage @@ -83,6 +92,7 @@ services: volumes: - ./logs/firecrest:/var/log:delegated - ../test-build/environment/keys/user-key:/user-key:ro + - ./ssl:/ssl tasks: container_name: tasks @@ -107,6 +117,7 @@ services: - 5003:5003 volumes: - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl utilities: container_name: utilities @@ -124,6 +135,25 @@ services: volumes: - ./logs/firecrest:/var/log:delegated - ../test-build/environment/keys/user-key:/user-key:ro + - ./ssl:/ssl + + reservations: + container_name: reservations + build: + context: ../../ + dockerfile: deploy/docker/reservations/Dockerfile + network: host + env_file: + - ./common/common.env + networks: + firecrest-internal: + ipv4_address: 192.168.220.8 + ports: + - 5005:5005 + volumes: + - ./logs/firecrest:/var/log:delegated + - ../test-build/environment/keys/user-key:/user-key:ro + - ./ssl:/ssl # web client client: @@ -174,7 +204,7 @@ services: - ./logs/keycloak:/opt/jboss/keycloak/standalone/log/:delegated kong: - image: kong:latest + image: kong:2.3 container_name: kong environment: - KONG_DATABASE=off @@ -193,7 +223,7 @@ services: - 8000:8000 minio: - image: minio/minio + image: minio/minio:RELEASE.2021-02-01T22-56-52Z command: minio server /data container_name: minio environment: @@ -209,7 +239,7 @@ services: taskpersistence: container_name: taskpersistence - image: redis:latest + image: redis:5 command: redis-server /redis.conf networks: firecrest-internal: @@ -221,7 +251,7 @@ services: opa: image: openpolicyagent/opa:0.22.0 - command: run --server --log-level=debug --log-format=json-pretty /opa-files/data.json /opa-files/policy.rego + command: run --server --log-level=debug --log-format=json-pretty --tls-cert-file=/ssl/f7t_internal.crt --tls-private-key-file=/ssl/f7t_internal.key /opa-files/data.json /opa-files/policy.rego networks: firecrest-internal: ipv4_address: 192.168.220.40 @@ -229,6 +259,7 @@ services: - "8181:8181" volumes: - ./opa:/opa-files + - ./ssl:/ssl openapi: # image: swaggerapi/swagger-ui:v3.22.0 diff --git a/deploy/demo/kong/kong.yml b/deploy/demo/kong/kong.yml index a18cc947..cb4b55a7 100644 --- a/deploy/demo/kong/kong.yml +++ b/deploy/demo/kong/kong.yml @@ -1,3 +1,9 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## _format_version: "1.1" plugins: @@ -28,39 +34,45 @@ routes: services: - name: compute - url: http://192.168.220.9:5006 + url: https://192.168.220.9:5006 routes: - name: compute methods: [DELETE,GET,POST] paths: - /compute/ - name: status - url: http://192.168.220.4:5001 + url: https://192.168.220.4:5001 routes: - name: status methods: [GET] paths: - /status/ - name: storage - url: http://192.168.220.5:5002 + url: https://192.168.220.5:5002 routes: - name: storage methods: [GET,POST] paths: - /storage/ - name: tasks - url: http://192.168.220.6:5003 + url: https://192.168.220.6:5003 routes: - name: tasks methods: [GET] paths: - /tasks/ - name: utilities - url: http://192.168.220.7:5004 + url: https://192.168.220.7:5004 routes: - name: utilities methods: [DELETE,GET,POST,PUT] paths: - /utilities/ - +- name: reservations + url: https://192.168.220.8:5005 + routes: + - name: reservations + methods: [DELETE,GET,POST,PUT] + paths: + - /reservations diff --git a/deploy/demo/opa/data.json b/deploy/demo/opa/data.json index 8ef9f32c..d8918487 100644 --- a/deploy/demo/opa/data.json +++ b/deploy/demo/opa/data.json @@ -2,7 +2,7 @@ { "systems": { "cluster": { - "users": ["test1"] + "users": ["test1", "service-account-firecrest-sample"] }, "not_a_system": { "users": ["testuser"] diff --git a/deploy/demo/source/kong/update_kong_config.sh b/deploy/demo/source/kong/update_kong_config.sh index 9a2df5e4..81543c4b 100644 --- a/deploy/demo/source/kong/update_kong_config.sh +++ b/deploy/demo/source/kong/update_kong_config.sh @@ -1,6 +1,6 @@ #!/bin/bash ## -## Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. ## ## Please, refer to the LICENSE file in the root directory. ## SPDX-License-Identifier: BSD-3-Clause diff --git a/deploy/demo/ssl/f7t_internal.crt b/deploy/demo/ssl/f7t_internal.crt new file mode 100644 index 00000000..3d8c4477 --- /dev/null +++ b/deploy/demo/ssl/f7t_internal.crt @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUPPBK8HvPTkdgfjFWQq8DVOPyTb0wDQYJKoZIhvcNAQEL +BQAwZjELMAkGA1UEBhMCQ0gxDzANBgNVBAgMBlRpY2lubzEPMA0GA1UEBwwGTHVn +YW5vMQ0wCwYDVQQKDARDU0NTMRIwEAYDVQQLDAlGaXJlY1JFU1QxEjAQBgNVBAMM +CTEyNy4wLjAuMTAeFw0yMDExMTMxNjEwMjRaFw0zMDExMTExNjEwMjRaMGYxCzAJ +BgNVBAYTAkNIMQ8wDQYDVQQIDAZUaWNpbm8xDzANBgNVBAcMBkx1Z2FubzENMAsG +A1UECgwEQ1NDUzESMBAGA1UECwwJRmlyZWNSRVNUMRIwEAYDVQQDDAkxMjcuMC4w +LjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/dGgWlhOE2NL22+zU +UeCa2sio1psnYZfEPB9A1sctC8oH9xxvpCEeGI7PpF6+Jga0I9Zms8iLqoewVvHM +KERhD9r+b2wqwYPiIHTVBUriwMCtJv3Gw7RJXjvbwNHs6fuw3jnKJSkZX5s7OEvs +vy0OZnuNqPzj3XbeKAYoC+eQvkJEVwQM2JsbaV4j/fjFKH3irgsXutszbqmqjpDk +3nslN+9G6wL+D+JQoHLhZcVA6pGZpZgcEK7K6BX7rNex7f8Gl71fGEV7X0B+fNxP +dlQppcBVVx9/NDGmwJYiYjtC5jRHorZ1pukZ/OtmP8zj604fNCMgrLyErXkL3FFU +Rs7C/UlfdcDR29XieE/JmPXF3ihqqlXwi/7UcGr/W8tAzqTh5JFoXYY+23P1NITp +GjWiij2O8kcZ58x7q+Y43t5u3xTyl03gVPHliyjVpC78f5AlEH40cGUSyTztb8c5 +OQ2Y9t3R2XiN6w+fa7SuxyDuHCLIcP4qKBaDgB1kbFCf+cFMNC1ze810pkFTNX2J +0G6zHuWcAXPVVCaKkf1QrKOgBzoCwJmvYBQ2QSn5/M9tVCLhyA8jiP2+a1eKWEVJ +sW/L8Sx5ur1Cy6wGkA3L1m2DHi/gXNYNU5TSIDGX+y5JYCSpXVVOvcPxSwx3m4BC +CmaczBdQn6bF0q0nAsgQJq1qowIDAQABo0gwRjBEBgNVHREEPTA7gglsb2NhbGhv +c3SHBMCo3AuHBMCo3AmHBMCo3ASHBMCo3AWHBMCo3AaHBMCo3AeHBMCo3CiHBH8A +AAEwDQYJKoZIhvcNAQELBQADggIBADbmLjRBLtAKYgOabjo120nI0rtXOja6Na5t +2hgnlce/h4/Ir79761Ox3UkFF9D8vQSDibvWUyOqWoAqS7UgZ6wk0JZhk+3Xig4a +z0ArkzTneDl/M7C1e02GAx4JWxcZb+ET0sSKDWGOJATZCWsXaE6GhCZO0FDHH512 +zI0/DUUAfPtTZqBmwdzcdE1QRO6cO/gNUJHIRdi+yTuCMB0Mlj1t2nYrF7JPCetL +SkmSvpAtAgqE3D0oOs0GqlyzY2BNwwMVRT02wnEvPCY4lHtXgOtRcD1W2GUZ+1e4 +7xD+WvIc+BS+gRGYjPdB3j+yBo2xb2Nbbb6LUdeNBUqovRrB77IxXBLICwx2tgip +fFPQPAFtSrHbrbLm4GwMGqjPs8bEDJtdWggDThoE6gD9QrYa74YY0thX8CXKy2wn +NYbhF2ICSp3wn4mfxHgmntT394sLi5Aah4Pk33gN1qv/fOj2SMT0B2NEtNIL4MQQ +WkoSy6bDugUNRinTqMLvUqgmp7sNOhAZdN/7tjIsezWj4Ykvq8p/BKSs2nhnuFtw +27wcdHJEvFfAcnK21WNQQ5hKQMfxYrWnZ2VNQ0VK4kr6X5SxEwvLf4eu6A8D5EI7 +Yru1VRH1edGjWQ8P9HTllhhrC7/QaZJYDO4Bf90YRLNqF7TCCp8kDHd6OiDJHWng +MoxMs8E+ +-----END CERTIFICATE----- diff --git a/deploy/demo/ssl/f7t_internal.csr b/deploy/demo/ssl/f7t_internal.csr new file mode 100644 index 00000000..e571a847 --- /dev/null +++ b/deploy/demo/ssl/f7t_internal.csr @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIE6jCCAtICAQAwZjELMAkGA1UEBhMCQ0gxDzANBgNVBAgMBlRpY2lubzEPMA0G +A1UEBwwGTHVnYW5vMQ0wCwYDVQQKDARDU0NTMRIwEAYDVQQLDAlGaXJlY1JFU1Qx +EjAQBgNVBAMMCTEyNy4wLjAuMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBAL90aBaWE4TY0vbb7NRR4JrayKjWmydhl8Q8H0DWxy0Lygf3HG+kIR4Yjs+k +Xr4mBrQj1mazyIuqh7BW8cwoRGEP2v5vbCrBg+IgdNUFSuLAwK0m/cbDtEleO9vA +0ezp+7DeOcolKRlfmzs4S+y/LQ5me42o/OPddt4oBigL55C+QkRXBAzYmxtpXiP9 ++MUofeKuCxe62zNuqaqOkOTeeyU370brAv4P4lCgcuFlxUDqkZmlmBwQrsroFfus +17Ht/waXvV8YRXtfQH583E92VCmlwFVXH380MabAliJiO0LmNEeitnWm6Rn862Y/ +zOPrTh80IyCsvISteQvcUVRGzsL9SV91wNHb1eJ4T8mY9cXeKGqqVfCL/tRwav9b +y0DOpOHkkWhdhj7bc/U0hOkaNaKKPY7yRxnnzHur5jje3m7fFPKXTeBU8eWLKNWk +Lvx/kCUQfjRwZRLJPO1vxzk5DZj23dHZeI3rD59rtK7HIO4cIshw/iooFoOAHWRs +UJ/5wUw0LXN7zXSmQVM1fYnQbrMe5ZwBc9VUJoqR/VCso6AHOgLAma9gFDZBKfn8 +z21UIuHIDyOI/b5rV4pYRUmxb8vxLHm6vULLrAaQDcvWbYMeL+Bc1g1TlNIgMZf7 +LklgJKldVU69w/FLDHebgEIKZpzMF1CfpsXSrScCyBAmrWqjAgMBAAGgPzA9Bgkq +hkiG9w0BCQ4xMDAuMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBQGA1UdEQQNMAuC +CTEyNy4wLjAuMTANBgkqhkiG9w0BAQsFAAOCAgEAcO7T4vpzee0K3CpjbTu6qEg/ +Vt2XNSLZ2+5+l8SNwWtUmaTxk1L2z2UMceHPByyFbuekiPfnSuaF4BZ7QGtFme+1 +Wbp+XGthmhWaoWjKK0pEEwlqo9KvXlwdYycX+pPC6Fnl6dIjjuAIkA4jGRhSlGxw +YiyyduyI21b6S6v3FLCK7tQjPu1J07XAEYCzHHpHRahg1wDlzVQp0hEPueNvnXG3 +c6BHpFKw9fBsniw5fHv+FnFICsIOfI5GL56yy0CDQ3R65DsedOEWWE6wq3/wjm80 +YEbpHmM+91/uRQf5RcAwVS/hhl2uUVsiosc/BdAMAvhqpQ/3GnR1xr9wpW4y4scL +EyE58byHs7FsKMfB3iIi0WEe/ajFiSvIyqye9r83Og7agDq9HFN0G0A75m9YR6b1 +zcAwlThhIIsBnZ/tZJnLzNoy+qzu2tx1cwM2K8sy9V6PB5G86VbfACcJhS0KD5TI +IDw1sdxw3QK9WstP4tSD5Ibtpk8U/qHqISupJBBQHTAzropflMn+Rzk6om+8jr6L +qtVyrIlBHXFqlfL4zNZRgXPL5W4NZqmBVoJtcTxGwlLjT3VedorkvObDDGbpxSQe +6NHO3zwvSeItmvV7vOeEmzl+VQUifpgAErCQOTqPzK6rsk/V7IqJqAO6la9v551H +7D1++vJJk9oBlyxRm9k= +-----END CERTIFICATE REQUEST----- diff --git a/deploy/demo/ssl/f7t_internal.key b/deploy/demo/ssl/f7t_internal.key new file mode 100644 index 00000000..eb768fbc --- /dev/null +++ b/deploy/demo/ssl/f7t_internal.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAv3RoFpYThNjS9tvs1FHgmtrIqNabJ2GXxDwfQNbHLQvKB/cc +b6QhHhiOz6ReviYGtCPWZrPIi6qHsFbxzChEYQ/a/m9sKsGD4iB01QVK4sDArSb9 +xsO0SV4728DR7On7sN45yiUpGV+bOzhL7L8tDmZ7jaj849123igGKAvnkL5CRFcE +DNibG2leI/34xSh94q4LF7rbM26pqo6Q5N57JTfvRusC/g/iUKBy4WXFQOqRmaWY +HBCuyugV+6zXse3/Bpe9XxhFe19AfnzcT3ZUKaXAVVcffzQxpsCWImI7QuY0R6K2 +dabpGfzrZj/M4+tOHzQjIKy8hK15C9xRVEbOwv1JX3XA0dvV4nhPyZj1xd4oaqpV +8Iv+1HBq/1vLQM6k4eSRaF2GPttz9TSE6Ro1ooo9jvJHGefMe6vmON7ebt8U8pdN +4FTx5Yso1aQu/H+QJRB+NHBlEsk87W/HOTkNmPbd0dl4jesPn2u0rscg7hwiyHD+ +KigWg4AdZGxQn/nBTDQtc3vNdKZBUzV9idBusx7lnAFz1VQmipH9UKyjoAc6AsCZ +r2AUNkEp+fzPbVQi4cgPI4j9vmtXilhFSbFvy/Esebq9QsusBpANy9Ztgx4v4FzW +DVOU0iAxl/suSWAkqV1VTr3D8UsMd5uAQgpmnMwXUJ+mxdKtJwLIECataqMCAwEA +AQKCAgAn9TWcMSpVuaMeHnxpO8VyzGLAyjmLmJTCQgIL2EetoR5DV1G6AzlePKXh +tF6JtQoFIs2mlbMvYyH6rcq9X1IWJ+b+32Zl7gmGcluv8fDVLoGIIfEpQMMp5N79 +WFr35UXAfkD1bZgMUaXmUDOYb7YJoV38DN7q99fNOfE+/m0QCoVuGfMgczy8Fduu +C12WWIMdglGSXE9PhrMmg6a9/akZ9XidBoOTXuFPDo2reUAX8lz9AlCi2uCAPLwJ +0OMboZ5uU9EUPc6SlyiwvYKFmx6pqoxfw1UxiUUrllo/Rt0zczRKFy2ZlIJmCCFh +L8OldgDSBmGmeibGUiZHUW4v9q9EKoE/C95snvoEzBJad+QmWrZ0DpeOZkflBf6I +IAJLMPyHVDVX/WlYUygGu0ZZ+w3qDjKCAZgTyP1r0qC7KAab6kYW94rt/1qe61kk +ZtBESfZn1j0UihmJeR45Ajy1jDqZE5I+1o0i0+UuZ7t0AMFaMR+ny6C/t23iZf4i +w0FkC2aCS1GUAMFPTac5NiBlgLB8uzZXfb2ING28pj3/bhqUXnd3ZQ4Jq0yJQp4A +QA/5PfV2ucJyAPfxoAEb5phhdVFY86E8epmkQ64HyfYlutB9FW3MESLJzr7l2j0I +msPAQuaLU44lyXTlvVrLvzb4a3jlaGxC5uBZenMfLrRcoptn0QKCAQEA5ANclrEy +x0io+S/XK1N229FzS45ILwS4iv7QjK0zoo9KYaMM5InJU7b/40wzonDE2z7E6OtB +9/140EcIymv+yKbp/zpIrVzDLkA1ppc2Ecm4vQMD7hM9bLcncAG6zOH4MwiwSE0i +1One9eF28FPfNb5ywJPZCXSbB/gqLYKWxPUGVIGfecR4Z/kQggMlBdje2u5tMQ4Q +mWiAEtck5PL+19dxM4TMk+f3W7IoxrwH1xYg5MKfkENbySblWhEX3+Qd+4LYQONu +EUOGiiXp/8rWeo9GR6bWVXbWlIJjNkdMPE2OLMLrn4K5WC4xOojL7Q+v2/0cjZAH +1wu0E2tFaU8RJQKCAQEA1vROA8ExALzq7KFgxjmReQ9hlXA8CXW5Q5LF/QBrg+fr +Hx+wgTvWSJVINKPt2iSsC9Enw+uO2UoJQgbEaQLaYQhOfLcknp/o3gyaJufcGQ15 +Omv6NcTfAbehiAfT+bMSQbr1Tp2GF+gYVZkz8TPxK3Uf6Ybj2eoBjSKOQUXB/rw4 +XSWbvai1SnmxSdntlhrzE/UJOvxcJObSdfdP01TMlkoqTYgzlspZfTuSgsVAK28c +6UQilWIu+lRbVORZsI14ozunBN9a0tiVIpSGrC7cSsip+VsuIUC+/wjNryW+ZCJN +uLkkWfvoqYc5WiP5rl2I20I7Mu5YkE0CJ46W4LQ2JwKCAQBr02slmAZ7V1ERBh6q +zn2W1iW/xTk9gu4CTR4yaHXSmhCCu6iNKtuaSyAXTSHd35Y4WCamPxqjO6q2U+FE +WG3UVZqyp81Wrldet96nOTrIjqxeNd7V5yNVFggAX9EGspW9DI7QoimmpjJZ9JVa +cU9NCxMZHtG0NwBZdHH9s5O48GZd8C6KVu7ZvF6iHPqVGUnKHpZkbFJNOa3XOwan +foIlcCyIQHS+PSQsqMO/t4u9mylTBPaQMkplhY9QWGUB686B8g5yIRsb7uLg0a0m +2bUzoC2MFMmnANvYvxBLysuyeLqTUFo8xIUBvlZpZ+eH47M2RSwLheEvQMmXpVLd +yhZBAoIBAQCwUgnEbq5BirWxkNaPAx3fDczgBgZZRCXJ1hVt6PBIbO6iwx3HhEvL +khh+hZFPhRFTSCe0P/8XIfHiJSbrdJmpSmUHTDtPmqArtG6mRz51lmXAlF3TyT+c +awKnULSCeG8bV75s1C3wYkPDwz2mNxM2sIaSykEQuTC4koXZ6NyulbouPfE4a7D5 +/ER+7bJDlKubzT3bErKK2oP8m/bwxPhkt0Vn/7dBk45AeaM+fYqbljNC7M7pdnZO +hO+9/OnNNt6kFwRzrLsVhO8gRHELwAyXk25Jyo9WW4PQM2p0pyM7hB/4zHWTEl3Q +4ckbtbLTYoomQHfK2VPwWhrQU9hTTb+1AoIBAHyQsCNO6Y22QS6mM6wK11D86Nmy +L8sNXqZh4V5UNELQQEQEgBEKswLrD9qCrJbUnrRPwmI8xPtyWZ9T8rc7Ib+SGD5q +a09hh0PtLqyWP4EzB0wIA7ydWMMrvI4HJ1plAaFKGKCdNkHI+MrxQeEyiluBX3Gy +fqhlpTx0uEcEX+j2OE5eiNf43896QsfKYVvaW/HZitg1iAKIIIZwEoISMkpt7Soq +N8ZNd2Ab9zZQvKZ4bc1K1zsW1DilXMxXanccGW/BYDM2il2sp2q99GCDS7q/cDlC +MOITrc6TyDNVhTMaA0a8H4ZtYkvIPvqUUAgCB4sJmGT9qAUO9gcZLkEzxEk= +-----END RSA PRIVATE KEY----- diff --git a/deploy/demo/taskpersistence/redis.conf b/deploy/demo/taskpersistence/redis.conf index e816682b..eedf7085 100644 --- a/deploy/demo/taskpersistence/redis.conf +++ b/deploy/demo/taskpersistence/redis.conf @@ -1,3 +1,10 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# + # Redis configuration file example. # # Note that in order to read the configuration file, Redis must be diff --git a/deploy/docker/base/Dockerfile b/deploy/docker/base/Dockerfile new file mode 100644 index 00000000..3745fe6d --- /dev/null +++ b/deploy/docker/base/Dockerfile @@ -0,0 +1,16 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +from centos:7 as f7t-base + +# install epel repo for python-pip package +RUN yum install -y epel-release +RUN yum -y update +RUN yum install -y python3-pip + +RUN pip3 install --upgrade pip + +ADD deploy/docker/base/requirements.txt base/requirements.txt \ No newline at end of file diff --git a/deploy/docker/base/requirements.txt b/deploy/docker/base/requirements.txt new file mode 100644 index 00000000..3b9675b2 --- /dev/null +++ b/deploy/docker/base/requirements.txt @@ -0,0 +1,4 @@ +cryptography==2.8 +Flask==1.1.2 +PyJWT==1.7.1 +requests==2.22.0 \ No newline at end of file diff --git a/deploy/docker/certificator/Dockerfile b/deploy/docker/certificator/Dockerfile index 9fa1d88a..4349944c 100644 --- a/deploy/docker/certificator/Dockerfile +++ b/deploy/docker/certificator/Dockerfile @@ -1,23 +1,17 @@ -from centos:7 +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +from f7t-base -# install epel repo for python-pip package -RUN yum install -y epel-release -# update yum -RUN yum -y update +RUN yum install -y openssh-7.4p1 -# install python-pip from repo -RUN yum install -y python3-pip - -# upgrade -RUN pip3 install --upgrade pip - -RUN yum install -y openssh - -RUN pip3 install Flask cryptography pyjwt requests +ADD deploy/docker/certificator/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt ADD src/certificator/certificator.py certificator.py -# ADD src/common/cscs_api_common.py cscs_api_common.py ENTRYPOINT ["python3"] CMD ["certificator.py"] - diff --git a/deploy/docker/certificator/requirements.txt b/deploy/docker/certificator/requirements.txt new file mode 100644 index 00000000..a5338281 --- /dev/null +++ b/deploy/docker/certificator/requirements.txt @@ -0,0 +1 @@ +-r ../base/requirements.txt \ No newline at end of file diff --git a/deploy/docker/compute/Dockerfile b/deploy/docker/compute/Dockerfile index 9425217b..5ab4a3f4 100644 --- a/deploy/docker/compute/Dockerfile +++ b/deploy/docker/compute/Dockerfile @@ -1,12 +1,13 @@ -from centos:7 +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +from f7t-base -RUN yum install -y epel-release -RUN yum -y update -RUN yum install -y python3-pip - -RUN pip3 install --upgrade pip - -RUN pip3 install Flask paramiko cryptography pyjwt requests +ADD deploy/docker/compute/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt ADD src/compute/compute.py compute.py ADD src/common/async_task.py async_task.py @@ -15,4 +16,3 @@ ADD src/common/job_time.py job_time.py ENTRYPOINT ["python3"] CMD ["compute.py"] - diff --git a/deploy/docker/compute/requirements.txt b/deploy/docker/compute/requirements.txt new file mode 100644 index 00000000..499053fc --- /dev/null +++ b/deploy/docker/compute/requirements.txt @@ -0,0 +1,2 @@ +-r ../base/requirements.txt +paramiko==2.6.0 \ No newline at end of file diff --git a/deploy/docker/openapi/Dockerfile b/deploy/docker/openapi/Dockerfile index 3dbbe334..49995375 100644 --- a/deploy/docker/openapi/Dockerfile +++ b/deploy/docker/openapi/Dockerfile @@ -1,3 +1,9 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## FROM swaggerapi/swagger-ui:v3.22.0 COPY doc/openapi/firecrest-developers-api.yaml /tmp/openapi.yaml \ No newline at end of file diff --git a/deploy/docker/reservations/Dockerfile b/deploy/docker/reservations/Dockerfile new file mode 100644 index 00000000..126d444f --- /dev/null +++ b/deploy/docker/reservations/Dockerfile @@ -0,0 +1,10 @@ +from f7t-base + +ADD deploy/docker/reservations/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt + +ADD src/reservations/reservations.py reservations.py +ADD src/common/cscs_api_common.py cscs_api_common.py + +ENTRYPOINT ["python3"] +CMD ["reservations.py"] \ No newline at end of file diff --git a/deploy/docker/reservations/requirements.txt b/deploy/docker/reservations/requirements.txt new file mode 100644 index 00000000..499053fc --- /dev/null +++ b/deploy/docker/reservations/requirements.txt @@ -0,0 +1,2 @@ +-r ../base/requirements.txt +paramiko==2.6.0 \ No newline at end of file diff --git a/deploy/docker/status/Dockerfile b/deploy/docker/status/Dockerfile index 019470c4..fe1975d9 100644 --- a/deploy/docker/status/Dockerfile +++ b/deploy/docker/status/Dockerfile @@ -1,23 +1,16 @@ -FROM centos:7 +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +FROM f7t-base -# install epel repo for python-pip package -RUN yum install -y epel-release -# update yum -RUN yum -y update - -# install python-pip from repo -# this install also python 3.6 -RUN yum install -y python3-pip -# RUN yum remove -y cyrus-sasl-gssapi python-gssapi - -# upgrade -RUN pip3 install --upgrade pip - -RUN pip3 install Flask paramiko cryptography pyjwt requests +ADD deploy/docker/status/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt ADD src/status/status.py status.py ADD src/common/cscs_api_common.py cscs_api_common.py ENTRYPOINT ["python3"] -CMD ["status.py"] - +CMD ["status.py"] \ No newline at end of file diff --git a/deploy/docker/status/requirements.txt b/deploy/docker/status/requirements.txt new file mode 100644 index 00000000..499053fc --- /dev/null +++ b/deploy/docker/status/requirements.txt @@ -0,0 +1,2 @@ +-r ../base/requirements.txt +paramiko==2.6.0 \ No newline at end of file diff --git a/deploy/docker/storage/Dockerfile b/deploy/docker/storage/Dockerfile index bb0cfca6..cacba021 100644 --- a/deploy/docker/storage/Dockerfile +++ b/deploy/docker/storage/Dockerfile @@ -1,18 +1,13 @@ -from centos:7 - -# install epel repo for python-pip package -RUN yum install -y epel-release -# update yum -RUN yum -y update - -# install python-pip from repo -RUN yum install -y python3-pip - -# upgrade -RUN pip3 install --upgrade pip - -RUN pip3 install Flask paramiko lxml cryptography pyjwt keystoneauth1 python-keystoneclient +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +from f7t-base +ADD deploy/docker/storage/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt ADD src/storage/storage.py storage.py ADD src/storage/keystone.py keystone.py @@ -25,6 +20,5 @@ ADD src/common/async_task.py async_task.py ADD src/common/job_time.py job_time.py ADD src/common/cscs_api_common.py cscs_api_common.py - ENTRYPOINT ["python3"] -CMD ["storage.py"] +CMD ["storage.py"] \ No newline at end of file diff --git a/deploy/docker/storage/requirements.txt b/deploy/docker/storage/requirements.txt new file mode 100644 index 00000000..49669081 --- /dev/null +++ b/deploy/docker/storage/requirements.txt @@ -0,0 +1,5 @@ +-r ../base/requirements.txt +keystoneauth1==4.3.0 +lxml==4.6.2 +paramiko==2.6.0 +python-keystoneclient==4.2.0 \ No newline at end of file diff --git a/deploy/docker/tasks/Dockerfile b/deploy/docker/tasks/Dockerfile index 0e7a49d3..e8ca32c6 100644 --- a/deploy/docker/tasks/Dockerfile +++ b/deploy/docker/tasks/Dockerfile @@ -1,17 +1,13 @@ -from centos:7 +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +from f7t-base -# install epel repo for python-pip package -RUN yum install -y epel-release -# update yum -RUN yum -y update - -# install python-pip from repo -RUN yum install -y python3-pip - -# upgrade -RUN pip3 install --upgrade pip - -RUN pip3 install Flask cryptography pyjwt redis requests +ADD deploy/docker/tasks/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt ADD src/tasks/tasks.py tasks.py ADD src/common/async_task.py async_task.py @@ -19,4 +15,4 @@ ADD src/common/cscs_api_common.py cscs_api_common.py ADD src/common/tasks_persistence.py tasks_persistence.py ENTRYPOINT ["python3"] -CMD ["tasks.py"] +CMD ["tasks.py"] \ No newline at end of file diff --git a/deploy/docker/tasks/requirements.txt b/deploy/docker/tasks/requirements.txt new file mode 100644 index 00000000..beb26f51 --- /dev/null +++ b/deploy/docker/tasks/requirements.txt @@ -0,0 +1,2 @@ +-r ../base/requirements.txt +redis==3.5.3 \ No newline at end of file diff --git a/deploy/docker/tester/Dockerfile b/deploy/docker/tester/Dockerfile new file mode 100644 index 00000000..d6bb2ce7 --- /dev/null +++ b/deploy/docker/tester/Dockerfile @@ -0,0 +1,25 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +## +## Usage: +## # (from repository root level) +## docker build -t f7t-tester -f deploy/docker/tester/Dockerfile . +## docker run -ti --rm -v $PWD:/firecrest f7t-tester +## # (now inside the container run pytest as you want) +## See scripts in ci folder. +from python:3.8.5-slim + +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 + +ADD deploy/docker/base/requirements.txt base/requirements.txt +ADD deploy/docker/tester/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt + +WORKDIR /firecrest/src/tests/automated_tests + +CMD [ "python3" ] diff --git a/deploy/docker/tester/requirements.txt b/deploy/docker/tester/requirements.txt new file mode 100644 index 00000000..c032e193 --- /dev/null +++ b/deploy/docker/tester/requirements.txt @@ -0,0 +1,4 @@ +-r ../base/requirements.txt +# dev-specific below: +pytest==6.2.1 +pytest-dotenv==0.5.2 \ No newline at end of file diff --git a/deploy/docker/utilities/Dockerfile b/deploy/docker/utilities/Dockerfile index 73cb9858..a6419e7a 100644 --- a/deploy/docker/utilities/Dockerfile +++ b/deploy/docker/utilities/Dockerfile @@ -1,21 +1,16 @@ -from centos:7 +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## +from f7t-base -# install epel repo for python-pip package -RUN yum install -y epel-release -# update yum -RUN yum -y update - -# install python-pip from repo -RUN yum install -y python3-pip - -# upgrade -RUN pip3 install --upgrade pip - -RUN pip3 install Flask paramiko cryptography pyjwt requests +ADD deploy/docker/utilities/requirements.txt deps/requirements.txt +RUN pip3 install -r deps/requirements.txt ADD src/utilities/utilities.py utilities.py ADD src/common/cscs_api_common.py cscs_api_common.py ENTRYPOINT ["python3"] -CMD ["utilities.py"] - +CMD ["utilities.py"] \ No newline at end of file diff --git a/deploy/docker/utilities/requirements.txt b/deploy/docker/utilities/requirements.txt new file mode 100644 index 00000000..499053fc --- /dev/null +++ b/deploy/docker/utilities/requirements.txt @@ -0,0 +1,2 @@ +-r ../base/requirements.txt +paramiko==2.6.0 \ No newline at end of file diff --git a/deploy/test-build/cluster/Dockerfile b/deploy/test-build/cluster/Dockerfile index 27136b4c..df30482a 100644 --- a/deploy/test-build/cluster/Dockerfile +++ b/deploy/test-build/cluster/Dockerfile @@ -1,3 +1,10 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## + # partially based on https://github.com/giovtorres/slurm-docker-cluster FROM centos:7 @@ -22,7 +29,9 @@ RUN set -ex \ perl \ python-pip \ psmisc \ - wget \ + rsyslog \ + sudo \ + wget \ && yum clean all \ && rm -rf /var/cache/yum > /dev/null @@ -78,12 +87,26 @@ RUN /usr/libexec/mariadb-prepare-db-dir 2>/dev/null ADD cluster/ssh/ssh_command_wrapper.sh /ssh_command_wrapper.sh RUN chmod 555 /ssh_command_wrapper.sh +## add rsyslog configuration files +ADD cluster/rsyslog/rsyslog.conf /etc/ +ADD cluster/rsyslog/listen.conf /etc/rsyslog.d + +## advance reservation tool files +ADD cluster/rsvsvc/usr/local/bin/* /usr/local/bin/ +ADD cluster/rsvsvc/usr/local/lib/* /usr/local/lib/ +ADD cluster/rsvsvc/usr/local/sbin/* /usr/local/sbin/ +ADD cluster/rsvsvc/rsvmgmt_users /etc/sudoers.d/ + # add sbatch scripts for testing purposes RUN mkdir /srv/f7t ADD cluster/test_sbatch.sh /srv/f7t/. ADD cluster/test_sbatch.sh /srv/f7t/test_sbatch_forbidden.sh +ADD cluster/test_sbatch.sh /srv/f7t/test_sbatch_rm.sh +ADD cluster/test_sbatch.sh /srv/f7t/test_sbatch_mv.sh RUN chmod 777 /srv/f7t RUN chmod 555 /srv/f7t/test_sbatch.sh +RUN chmod 777 /srv/f7t/test_sbatch_rm.sh +RUN chmod 777 /srv/f7t/test_sbatch_mv.sh RUN chmod 700 /srv/f7t/test_sbatch_forbidden.sh ENTRYPOINT ["/usr/bin/supervisord"] diff --git a/deploy/test-build/cluster/rsvsvc/rsvmgmt_users b/deploy/test-build/cluster/rsvsvc/rsvmgmt_users new file mode 100644 index 00000000..d6d758fd --- /dev/null +++ b/deploy/test-build/cluster/rsvsvc/rsvmgmt_users @@ -0,0 +1,3 @@ +User_Alias RSVMGMT = test1, testuser, service-account-firecrest-sample +Cmnd_Alias RSV_CMD = /usr/local/sbin/rsvmgmt_priv +RSVMGMT ALL=(ALL) NOPASSWD:RSV_CMD diff --git a/deploy/test-build/cluster/rsvsvc/usr/local/bin/rsvmgmt b/deploy/test-build/cluster/rsvsvc/usr/local/bin/rsvmgmt new file mode 100755 index 00000000..6890f809 --- /dev/null +++ b/deploy/test-build/cluster/rsvsvc/usr/local/bin/rsvmgmt @@ -0,0 +1,3 @@ +#!/bin/sh + +sudo /usr/local/sbin/rsvmgmt_priv $@ diff --git a/deploy/test-build/cluster/rsvsvc/usr/local/lib/rsvmgmt.conf b/deploy/test-build/cluster/rsvsvc/usr/local/lib/rsvmgmt.conf new file mode 100644 index 00000000..35dacabc --- /dev/null +++ b/deploy/test-build/cluster/rsvsvc/usr/local/lib/rsvmgmt.conf @@ -0,0 +1,34 @@ +#!/bin/bash + +#Name +ProgName=rsvmgmt + +#Hardcode paths to avoid priv enhancements +scontrol=/usr/bin/scontrol +sacctmgr=/usr/bin/sacctmgr +hostlist=/apps/common/system/bin/hostlist +datecmd=/usr/bin/date + +VERSION_MAJOR=1 # Major version number +VERSION_MINOR=1 # Minor version number +VERSION_FIX=0 # Fix number + + +# Reservation time limit, min 1 hour, max 72 hours, can change +MIN_TIME=1 +MAX_TIME=72 + +# Maximum Number of Reservations that can exist at a time +RESV_MAX_NUM=5 + +# Constraints for reservation sizing +MIN_NODE=1 +NODE_LIMIT=1000 + + +#how far in advance do we allow reservations +FUTURE=$(${datecmd} -d 'next Year' +"%s") + + +#don't modify, current time +NOW=$(${datecmd} +"%s") diff --git a/deploy/test-build/cluster/rsvsvc/usr/local/lib/rsvmgmt.funcs b/deploy/test-build/cluster/rsvsvc/usr/local/lib/rsvmgmt.funcs new file mode 100755 index 00000000..82aa6703 --- /dev/null +++ b/deploy/test-build/cluster/rsvsvc/usr/local/lib/rsvmgmt.funcs @@ -0,0 +1,231 @@ +#!/bin/bash +. /usr/local/lib/rsvmgmt.conf + +display_help() { + echo "rsvmgmt: the reservation management tool" + echo "Subcommands:" + echo + echo "* lists current project reservations" + echo "-l" + echo "" + echo "* adds reservation" + echo "-a project #nodes NodeType StartTime EndTime [optionalname]" + echo "example: 'rsvmgmt -a csstaff 10 knl 2020-12-24T08:00:00 2020-12-25T12:30:00 csstaff-testing" + echo "creates csstaff-testing reservation for 10 knl nodes 10 hours" + echo "" + echo "* update/extend reservation" + echo "-u reservation #nodes NodeType StartTime EndTime" + echo "example: 'rsvmgmt -u csstaff_36 15 knl 2020-12-24T08:00:00 2020-12-25T12:50:00" + echo " modifies csstaff_36 reservation to be 15 knl nodes and go 20 minutes later" + echo " Note: can not change the features of an existing reservation" + echo "" + echo "* delete reservation" + echo "-d [reservation name]" + echo "" + echo "* displays version number" + echo "-V" + echo + exit 1 +} +# list_accounts returns just the accounts the user belongs to +# Example: "cstaff,cray" +list_accounts() { + for i in `${sacctmgr} show user ${SUDO_USER} where cluster=$(cat /etc/hostname) WithAssoc format=account --noheader` + do + echo -n "$i," + done +} + +#Print the reservation info +resv_print() { + ${scontrol} show reservation -o|grep "Accounts=$1 Licenses" +} + +# Loops through the accounts and prints current reservations owned by accounts +list_allowed() { + echo "${ProgName}: Current Reservations" + echo --------------------------- + ACCOUNTS=$(list_accounts) + for i in ${ACCOUNTS//,/ } + do + resv_print $i + done + echo --------------------------- +} + +# Checks if account is member of reservation +test_resv() +{ + # We can only process one account at a time, if there's a comma: error + if [[ $1 =~ "," ]] + then + echo "${ProgName}: Error: only one reservation allowed per call" + exit -1 + fi + + # If scontrol show doesn't show, error + if [[ -z $(${scontrol} show res=${1}) ]] + then + echo "${ProgName}: Error: $1 doesn't seem to be a valid reservation" + exit -1 + fi + RESV=$(${scontrol} show res=${1}|awk '/Accounts/{print $2}') + MEMBER=0 + ACCOUNTS=$(list_accounts) + for i in ${ACCOUNTS//,/ } + do + if [[ "${RESV}" == "Accounts=${i}" ]] + then + MEMBER=1 + fi + done + if [[ $MEMBER -ne 1 ]] + then + echo "${ProgName}: Error: You are not an owner of the $1 reservation" + exit -1 + fi +} + + +# Creates reservation +# Input Parameter: project #nodes nodespec starttime endttime [optionalresvname] +add_resv() { + # Check to make sure account is valid + feature_check $3 + project_check $1 + node_check $2 + start_check $4 + end_check $5 + CLEANNAME=${6//[^a-zA-Z0-9\-]/} + ${scontrol} create Res=${CLEANNAME} Accounts=${1} NodeCnt=$2 start=${4} end=${5} Feature=${3} +} + +# Update Reservation +# Input: resvname #nodes nodespec starttime endttime +update_resv() { + test_resv $1 + feature_check $3 + node_check $2 + start_check $4 + end_check $5 + ${scontrol} update res=${1} NodeCnt=$2 start=${4} end=${5} +} + +# Verify the node count requested +node_check() { + +if [[ ! -z $FEATURE ]] +then +MAX_NODE=$(${scontrol} show nodes|grep AvailableFeatures|grep -c $FEATURE) +fi + +if [[ $1 -lt ${MIN_NODE} ]] +then +echo "${ProgName}: Error: NodeCnt less than ${MIN_NODE}" +exit -1 +fi +if [[ $1 -gt ${MAX_NODE} ]] +then +echo "${ProgName}: Error: NodeCnt greater than ${MAX_NODE} available ${FEATURE} nodes" +exit -1 +fi +if [[ $1 -gt ${NODE_LIMIT} ]] +then +echo "${ProgName}: Error: NodeCnt greater than ${NODE_LIMIT} reservation limit" +exit -1 +fi + +} + +#The Features we allow are here +feature_check() { + +if ! [[ $1 =~ ^(f7t)$ ]] +then +echo "${ProgName}: Error: only f7t feature type are supported" +exit -1 +fi + +if [[ -z $FEATURE ]] +then +export FEATURE=$1 +fi + +} + + +#Is calling user a member of account they want to create a reservation for +project_check() { + MEMBER=0 + ACCOUNTS=$(list_accounts) + for i in ${ACCOUNTS//,/ } + do + if [[ "${i}" == "$1" ]] + then + MEMBER=1 + fi + done + if [[ $MEMBER -ne 1 ]] + then + echo "${ProgName}: Error: You are not a member of the $1 project" + exit -1 + fi + if [[ $(${scontrol} show res -o|grep "Accounts=$1 Licenses" -c) -ge ${RESV_MAX_NUM} ]] + then + echo "${ProgName}: Error: Only ${RESV_MAX_NUM} reservations are allowed at one time." + exit -1 + fi + +} + +#validate start time +start_check(){ +START=$(${datecmd} -d $1 +"%s" 2> /dev/null) +if [[ $? -ne 0 ]] +then + echo "${ProgName}: Error: invalid start time reservation" + exit 1 +fi + +if [[ $START -lt $NOW ]] +then + echo "${ProgName}: Error: start time for reservation is in the past" + exit 1 +fi +if [[ $START -gt $FUTURE ]] +then + echo "${ProgName}: Error: reservations can only be made for up to 1 year in the future" + exit 1 +fi +} + +#validate end time +end_check(){ +END=$(${datecmd} -d $1 +"%s" 2> /dev/null) +if [[ $? -ne 0 ]] +then + echo "${ProgName}: Error: invalid end time reservation" + exit 1 +fi + +if [[ $END -lt $NOW ]] +then + echo "${ProgName}: Error: end time for reservation is in the past" + exit 1 +fi +if [[ $END -gt $FUTURE ]] +then + echo "${ProgName}: Error: reservations can only be made for up to 1 year in the future" + exit 1 +fi +} + +#delete reservation +del_account() { + # Check to make sure resv is valid + test_resv $1 + $scontrol delete Res=${1} + echo "${ProgName}: Reservation $1 removed" +} + + diff --git a/deploy/test-build/cluster/rsvsvc/usr/local/sbin/rsvmgmt_priv b/deploy/test-build/cluster/rsvsvc/usr/local/sbin/rsvmgmt_priv new file mode 100755 index 00000000..44cfcfe5 --- /dev/null +++ b/deploy/test-build/cluster/rsvsvc/usr/local/sbin/rsvmgmt_priv @@ -0,0 +1,58 @@ +#!/bin/bash + +#Load the config +. /usr/local/lib/rsvmgmt.conf +#Load the functions +. /usr/local/lib/rsvmgmt.funcs + +#handle the inputs +case "$1" in + -h) + display_help + exit 0 + ;; + -a) + # If reservation name is included, pass it, otherwise automatic + if [[ $# -eq 7 ]] + then + add_resv $2 $3 $4 $5 $6 $7 + exit 0 + fi + if [[ $# -eq 6 ]] + then + add_resv $2 $3 $4 $5 $6 "" + exit 0 + fi + echo "${ProgName}: Error: Wrong Number of Arguments" + exit 1 + ;; + -u) + + if [[ $# -eq 6 ]] + then + update_resv $2 $3 $4 $5 $6 + exit 0 + fi + echo "${ProgName}: Error: Wrong Number of Arguments" + exit 1 + ;; + -d) + del_account $2 + exit 0 + ;; + -l) + list_allowed + exit 0 + ;; + -V) + echo "${ProgName}, version ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_FIX}" + exit 0; + ;; + *) + echo "${ProgName}: invalid option -- \"${1}\"" + echo + display_help + exit 0 + ;; +esac + diff --git a/deploy/test-build/cluster/rsvsvc/usr/local/tests/rsvmgmt_test b/deploy/test-build/cluster/rsvsvc/usr/local/tests/rsvmgmt_test new file mode 100755 index 00000000..b28c2d28 --- /dev/null +++ b/deploy/test-build/cluster/rsvsvc/usr/local/tests/rsvmgmt_test @@ -0,0 +1,144 @@ +#!/bin/bash + +#Load the configs and functions + +oneTimeSetUp() { + +# need to manually set feature for tests +if [[ $(cat /etc/xthostname) == "tave" ]] +then +FEATURE=knl + if [[ $(cat /etc/xthostname) == "daint" ]] + then + FEATURE=[mc,gpu] + if [[ $(cat /etc/xthostname) == "dom" ]] + then + FEATURE=[mc,gpu] + fi + fi +fi +#When running tests outside SUDO, this variable isn't set + +if [[ -z $SUDO_USER ]] +then +export SUDO_USER=`whoami` +fi + +. /usr/local/lib/rsvmgmt.conf +. /usr/local/lib/rsvmgmt.funcs +} + + + +testTooSoonStart() { +toosoon=`start_check "$(${datecmd} --date=yesterday +%FT%T)"` +assertContains 'Start time too early should produce an Error' "$toosoon" "Error" +} +testNowStart() { +now=`start_check "$(${datecmd} +%FT%T)"` +assertNotContains 'Start time now should not error' "$now" "Error" +} + +testTomorrowStart() { +tomorrow=`start_check "$(${datecmd} --date=tomorrow +%FT%T)"` +assertNotContains 'Start time tomorrow should not error' "$tomorrow" "Error" +} +testNextYearStart() { +oneyear=`start_check "$(${datecmd} --date='next year-10 seconds' +%FT%T)"` +assertNotContains 'Start time up to one year in future should not error' "$oneyear" "Error" +} +testTooLateStart() { +toolate=`start_check "$(${datecmd} --date='next year+1 day' +%FT%T)"` +assertContains 'Start time over 1 year in future should error' "$toolate" "Error" +} + +testTooSoonEnd() { +toosoon=`end_check "$(${datecmd} --date=yesterday +%FT%T)"` +assertContains 'End time too early should produce an Error' "$toosoon" "Error" +} +testNowEnd() { +now=`end_check "$(${datecmd} +%FT%T)"` +assertNotContains 'End time now should not error' "$now" "Error" +} + +testTomorrowEnd() { +tomorrow=`end_check "$(${datecmd} --date=tomorrow +%FT%T)"` +assertNotContains 'End time tomorrow should not error' "$tomorrow" "Error" +} +testNextYearEnd() { +oneyear=`end_check "$(${datecmd} --date='next year-10 seconds' +%FT%T)"` +assertNotContains 'End time up to one year in future should not error' "$oneyear" "Error" +} +testTooLateEnd() { +toolate=`end_check "$(${datecmd} --date='next year+1 day' +%FT%T)"` +assertContains 'End time over 1 year in future should error' "$toolate" "Error" +} + + +testAccounts() { + +accounts=`list_accounts` +assertContains "Account list should not be null" "$accounts" "," +} + + +testHelp() { +helpout=`display_help` +assertNotNull "Help should display something" helpout +} + +testReservationMultiple() { +multitest=`test_resv "csstaff,test"` +assertContains 'More than one reservation at a time should error' "$multitest" "Error" +} +testReservationInvalid() { +invalidresv=`test_resv "uniquereservationsomeonebetternotusethisplease"` +assertContains 'Invalid reservation name should error' "$multitest" "Error" +} +testReservationPermission() { +notmember=`test_resv "maintenance"` +assertContains 'Not a member of reservation should error' "$multitest" "Error" +} + +testFeaturesMC() { +mc=`feature_check "mc"` +assertNotContains 'Feature mc should be valid' "$mc" "Error" +} + +testFeaturesGPU() { +gpu=`feature_check "gpu"` +assertNotContains 'Feature gpu should be valid' "$gpu" "Error" +} +testFeaturesKNL() { +knl=`feature_check "knl"` +assertNotContains 'Feature knl should be valid' "$knl" "Error" +} +testFeaturesInvalid() { +invalid=`feature_check "invalid"` +assertContains 'Feature invalid should be invalid' "$invalid" "Error" +} + +testNodeCountsTooFew() { +toofew=`node_check -1` +assertContains 'Too few nodes should error' "$toofew" "Error" +} + +testNodeCountsWayTooMany() { +waytoomany=`node_check 10000` +assertContains 'Way too many nodes should error' "$waytoomany" "Error" +} + +testNodeCountsJustRight() { +justright=`node_check ${MIN_NODE}` +assertNotContains "Node Count of ${MIN_NODE} should not error" "$justright" "Error" +} + +testProjCheckGood() { +good=`project_check csstaff` +assertNotContains "test user should be member of csstaff" "$good" "Error" +} +testProjCheckBad() { +bad=`project_check madeupproject` +assertContains "made up project name should error" "$bad" "Error" +} +. shunit2/shunit2 diff --git a/deploy/test-build/cluster/rsyslog/listen.conf b/deploy/test-build/cluster/rsyslog/listen.conf new file mode 100644 index 00000000..8f7f3ae8 --- /dev/null +++ b/deploy/test-build/cluster/rsyslog/listen.conf @@ -0,0 +1,7 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# +$SystemLogSocketName /dev/log \ No newline at end of file diff --git a/deploy/test-build/cluster/rsyslog/rsyslog.conf b/deploy/test-build/cluster/rsyslog/rsyslog.conf new file mode 100644 index 00000000..a3f94251 --- /dev/null +++ b/deploy/test-build/cluster/rsyslog/rsyslog.conf @@ -0,0 +1,98 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# + +# rsyslog configuration file + +# For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html +# If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html + +#### MODULES #### + +# The imjournal module bellow is now used as a message source instead of imuxsock. +$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) +#$ModLoad imklog # reads kernel messages (the same are read from journald) +#$ModLoad immark # provides --MARK-- message capability + +# Provides UDP syslog reception +#$ModLoad imudp +#$UDPServerRun 514 + +# Provides TCP syslog reception +#$ModLoad imtcp +#$InputTCPServerRun 514 + + +#### GLOBAL DIRECTIVES #### + +# Where to place auxiliary files +$WorkDirectory /var/lib/rsyslog + +# Use default timestamp format +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat + +# File syncing capability is disabled by default. This feature is usually not required, +# not useful and an extreme performance hit +#$ActionFileEnableSync on + +# Include all config files in /etc/rsyslog.d/ +$IncludeConfig /etc/rsyslog.d/*.conf + +# Turn off message reception via local log socket; +# local messages are retrieved through imjournal now. +$OmitLocalLogging off + +# File to store the position in the journal +#$IMJournalStateFile imjournal.state + + +#### RULES #### + +# Log all kernel messages to the console. +# Logging much else clutters up the screen. +#kern.* /dev/console + +# Log anything (except mail) of level info or higher. +# Don't log private authentication messages! +*.info;mail.none;authpriv.none;cron.none /var/log/messages + +# The authpriv file has restricted access. +authpriv.* /var/log/secure + +# Log all the mail messages in one place. +mail.* -/var/log/maillog + + +# Log cron stuff +cron.* /var/log/cron + +# Everybody gets emergency messages +*.emerg :omusrmsg:* + +# Save news errors of level crit and higher in a special file. +uucp,news.crit /var/log/spooler + +# Save boot messages also to boot.log +local7.* /var/log/boot.log + + +# ### begin forwarding rule ### +# The statement between the begin ... end define a SINGLE forwarding +# rule. They belong together, do NOT split them. If you create multiple +# forwarding rules, duplicate the whole block! +# Remote Logging (we use TCP for reliable delivery) +# +# An on-disk queue is created for this action. If the remote host is +# down, messages are spooled to disk and sent when it is up again. +#$ActionQueueFileName fwdRule1 # unique name prefix for spool files +#$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible) +#$ActionQueueSaveOnShutdown on # save messages to disk on shutdown +#$ActionQueueType LinkedList # run asynchronously +#$ActionResumeRetryCount -1 # infinite retries if host is down +# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional +#*.* @@remote-host:514 +# ### end of the forwarding rule ### + diff --git a/deploy/test-build/cluster/slurm/slurm.conf b/deploy/test-build/cluster/slurm/slurm.conf index 22e046b0..c756b7fe 100644 --- a/deploy/test-build/cluster/slurm/slurm.conf +++ b/deploy/test-build/cluster/slurm/slurm.conf @@ -1,3 +1,10 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# + # slurm.conf # # See the slurm.conf man page for more information. @@ -81,7 +88,7 @@ AccountingStorageHost=127.0.0.1 #AccountingStorageLoc=/var/log/slurm/accounting # COMPUTE NODES -NodeName=cluster RealMemory=1000 State=UNKNOWN CPUs=2 +NodeName=cluster RealMemory=1000 State=UNKNOWN CPUs=2 Feature=f7t # # PARTITIONS PartitionName=part01 Default=yes Nodes=cluster Shared=YES MaxNodes=1 MaxTime=5-00:00:00 DefaultTime=5-00:00:00 State=UP diff --git a/deploy/test-build/cluster/slurm/slurmdbd.conf b/deploy/test-build/cluster/slurm/slurmdbd.conf index 204e80cf..f35d62c1 100644 --- a/deploy/test-build/cluster/slurm/slurmdbd.conf +++ b/deploy/test-build/cluster/slurm/slurmdbd.conf @@ -1,3 +1,10 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# + # See the slurmdbd.conf man page for more information. # Archive info #ArchiveJobs=yes diff --git a/deploy/test-build/cluster/slurm/start_db.sh b/deploy/test-build/cluster/slurm/start_db.sh index cfc5da7f..3cbd0ff5 100644 --- a/deploy/test-build/cluster/slurm/start_db.sh +++ b/deploy/test-build/cluster/slurm/start_db.sh @@ -1,6 +1,6 @@ #!/bin/bash ## -## Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. ## ## Please, refer to the LICENSE file in the root directory. ## SPDX-License-Identifier: BSD-3-Clause diff --git a/deploy/test-build/cluster/slurm/start_slurmctld.sh b/deploy/test-build/cluster/slurm/start_slurmctld.sh index 89323ea5..e4104c7f 100644 --- a/deploy/test-build/cluster/slurm/start_slurmctld.sh +++ b/deploy/test-build/cluster/slurm/start_slurmctld.sh @@ -1,6 +1,6 @@ #!/bin/bash ## -## Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. ## ## Please, refer to the LICENSE file in the root directory. ## SPDX-License-Identifier: BSD-3-Clause @@ -19,6 +19,11 @@ done sleep 1 echo "Slurmdbd ready, create cluster" sacctmgr --immediate create cluster cluster +echo "Slurmdbd, create account 'test' and add users" +sacctmgr --immediate create account name=test +sacctmgr --immediate create user name=test1 account=test +sacctmgr --immediate create user name=testuser account=test +sacctmgr --immediate create user name=service-account-firecrest-sample account=test echo "Starting slurmctld" /usr/sbin/slurmctld -D diff --git a/deploy/test-build/cluster/slurm/start_slurmdbd.sh b/deploy/test-build/cluster/slurm/start_slurmdbd.sh index 23a252cc..bbfac5c8 100644 --- a/deploy/test-build/cluster/slurm/start_slurmdbd.sh +++ b/deploy/test-build/cluster/slurm/start_slurmdbd.sh @@ -1,6 +1,6 @@ #!/bin/bash ## -## Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. ## ## Please, refer to the LICENSE file in the root directory. ## SPDX-License-Identifier: BSD-3-Clause diff --git a/deploy/test-build/cluster/ssh/ssh_command_wrapper.sh b/deploy/test-build/cluster/ssh/ssh_command_wrapper.sh index e9a03450..a871d753 100644 --- a/deploy/test-build/cluster/ssh/ssh_command_wrapper.sh +++ b/deploy/test-build/cluster/ssh/ssh_command_wrapper.sh @@ -1,9 +1,10 @@ #!/bin/bash -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. -# -# Please, refer to the LICENSE file in the root directory. -# SPDX-License-Identifier: BSD-3-Clause -# +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## # Required OpenSSH >= 7.2 for ssh-keygen to read from stdin # Optional OpenSSH >= 7.6 for direct access to certificates (via ExposeAuthInfo) @@ -18,7 +19,8 @@ SOC="${SSH_ORIGINAL_COMMAND}" set -u # -e (abort on command error), -u (undefined var are errors), -o pipefail (pipe errors) -msg="$(date +%Y-%m-%dT%H:%M:%S) - "${UID}" -" +# msg="$(date +%Y-%m-%dT%H:%M:%S) - "${UID}" -" +msg="FirecREST command execution user $USER ($UID) -" cert_type=${SOC%%-cert-v01@openssh.com *} # remove all after first space @@ -28,14 +30,14 @@ case "$cert_type" in tmp2=$(grep "^ *${CA_signature}" <<< "$tmp1") sig="Signing CA:"${tmp2## *Signing CA:} # remove left spaces if [ "$sig" != "$CA_signature" ]; then - echo "${msg} error - Wrong CA: ${sig}" >> ${log_file} + logger -p user.error "${msg} error - Wrong CA: ${sig}" exit 118 fi c=$(grep "^ *force-command " <<< "$tmp1") SSH_EXECUTE=${c#*force-command *} # remove " force-command " and spaces ;; *) - echo "${msg} error - Unknown certificate type: $cert_type" >> ${log_file} + logger -p user.error "${msg} error - Unknown certificate type: $cert_type" exit 118 ;; esac @@ -52,10 +54,13 @@ case "$command" in tmp2=${tmp1#* } command2=${tmp2%% *} # remove options case "$command2" in - base64|chmod|chown|cp|file|head|ln|ls|mkdir|mv|rm|sbatch|scontrol|sha256sum|squeue|stat|tail|wget) + base64|chmod|chown|cp|curl|id|file|head|ln|ls|mkdir|mv|rm|sbatch|scontrol|sha256sum|squeue|stat|tail) + ;; + rsvmgmt) + # advance reservation command ;; *) - echo "${msg} error - Unhandled timeout command: ${command2}" >> ${log_file} + logger -p user.error "${msg} error - Unhandled timeout command: ${command2}" exit 118 ;; esac @@ -63,17 +68,17 @@ case "$command" in sacct|sbatch|scancel|scontrol|squeue) # valid Slurm commands ;; - wget) - # from object storage + curl) + # storage ;; *) - echo "${msg} error - Unhandled command: ${command}" >> ${log_file} + logger -p user.error "${msg} error - Unhandled command: ${command}" exit 118 ;; esac # all ok, log command -echo "${msg} ok - ${SSH_EXECUTE}" >> ${log_file} +logger -p user.info "${msg} ok - ${SSH_EXECUTE}" # execute command eval ${SSH_EXECUTE} diff --git a/deploy/test-build/cluster/ssh/sshd_config b/deploy/test-build/cluster/ssh/sshd_config index 60717ae6..ac8c134d 100644 --- a/deploy/test-build/cluster/ssh/sshd_config +++ b/deploy/test-build/cluster/ssh/sshd_config @@ -107,6 +107,6 @@ Subsystem sftp /usr/libexec/openssh/sftp-server #AllowUsers user1 MaxAuthTries 1 AllowTcpForwarding no - #ForceCommand /ssh_command_wrapper.sh + ForceCommand /ssh_command_wrapper.sh PermitTTY no PermitTunnel no diff --git a/deploy/test-build/cluster/supervisord.conf b/deploy/test-build/cluster/supervisord.conf index cb6406bb..90477c8d 100644 --- a/deploy/test-build/cluster/supervisord.conf +++ b/deploy/test-build/cluster/supervisord.conf @@ -1,8 +1,16 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# [supervisord] nodaemon=true user=root +[program:rsyslog] +command=/usr/sbin/rsyslogd -n [program:mariadb] command=/usr/bin/mysqld_safe diff --git a/deploy/test-build/cluster/test_sbatch.sh b/deploy/test-build/cluster/test_sbatch.sh index 4647df17..927e3b36 100644 --- a/deploy/test-build/cluster/test_sbatch.sh +++ b/deploy/test-build/cluster/test_sbatch.sh @@ -1,4 +1,11 @@ #!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## + #SBATCH --job-name=testsbatch #SBATCH --ntasks=1 #SBATCH --tasks-per-node=1 diff --git a/deploy/test-build/docker-compose.yml b/deploy/test-build/docker-compose.yml index 4d015e94..17c8d175 100644 --- a/deploy/test-build/docker-compose.yml +++ b/deploy/test-build/docker-compose.yml @@ -1,17 +1,26 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## version: '3' services: certificator: - build: + build: context: ../../ dockerfile: deploy/docker/certificator/Dockerfile env_file: - ./environment/common.env - network_mode: "host" + networks: + - backend + - frontend volumes: - "./environment/keys/ca-key:/ca-key:ro" - "./environment/keys/user-key.pub:/user-key.pub:ro" - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl compute: build: @@ -19,10 +28,13 @@ services: dockerfile: deploy/docker/compute/Dockerfile env_file: - ./environment/common.env - network_mode: "host" + networks: + - backend + - frontend volumes: - "./environment/keys/user-key:/user-key:ro" - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl status: build: @@ -30,9 +42,12 @@ services: dockerfile: deploy/docker/status/Dockerfile env_file: - ./environment/common.env - network_mode: "host" + networks: + - backend + - frontend volumes: - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl storage: build: @@ -41,7 +56,9 @@ services: env_file: - ./environment/common.env - ./environment/storage.env - network_mode: "host" + networks: + - backend + - frontend depends_on: - "tasks" - "minio" @@ -50,9 +67,10 @@ services: # custom host file - "./environment/hosts:/etc/hosts:ro" - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl tasks: - build: + build: context: ../../ dockerfile: deploy/docker/tasks/Dockerfile env_file: @@ -65,65 +83,103 @@ services: - F7T_STORAGE_TASK_EXP_TIME=2678400 depends_on: - "taskpersistence" - network_mode: "host" - volumes: + networks: + - backend + - frontend + volumes: - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl utilities: - build: + build: context: ../../ dockerfile: deploy/docker/utilities/Dockerfile env_file: - ./environment/common.env - network_mode: "host" + networks: + - backend + - frontend volumes: - "./environment/keys/user-key:/user-key:ro" - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl + + reservations: + build: + context: ../../ + dockerfile: deploy/docker/reservations/Dockerfile + env_file: + - ./environment/common.env + networks: + - backend + - frontend + volumes: + - "./environment/keys/user-key:/user-key:ro" + - ./logs/firecrest:/var/log:delegated + - ./ssl:/ssl # auxiliary containers cluster: # runs on private network to avoid conflict with a local SSH server - build: + build: context: ./ dockerfile: ./cluster/Dockerfile hostname: cluster - ports: - - "2223:22" + networks: + - backend + - frontend minio: # runs on private network so "cluster" can reach it container_name: minio_test_build - image: minio/minio + image: minio/minio:RELEASE.2021-02-01T22-56-52Z command: minio server /data environment: MINIO_ACCESS_KEY: storage_access_key MINIO_SECRET_KEY: storage_secret_key + networks: + - backend + - frontend ports: - "9000:9000" - + taskpersistence: image: redis:5 command: redis-server /redis.conf - network_mode: "host" + networks: + - backend + - frontend volumes: - ./taskpersistence/redis.conf:/redis.conf:ro - ./logs/firecrest:/var/log:delegated opa: image: openpolicyagent/opa:0.22.0 - command: run --server --log-level=debug --log-format=json-pretty /opa-files/data.json /opa-files/policy.rego - network_mode: "host" - ports: - - "8181:8181" - volumes: + command: run --server --log-level=debug --log-format=json-pretty --tls-cert-file=/ssl/f7t_internal.crt --tls-private-key-file=/ssl/f7t_internal.key /opa-files/data.json /opa-files/policy.rego --addr http://0.0.0.0:8282 + networks: + - backend + - frontend + volumes: - ./opa:/opa-files + - ./ssl:/ssl openapi: - # image: swaggerapi/swagger-ui:v3.22.0 build: context: ../../ dockerfile: ./deploy/docker/openapi/Dockerfile + networks: + - backend + - frontend ports: - "9090:8080" environment: - SWAGGER_JSON: /tmp/openapi.yaml \ No newline at end of file + SWAGGER_JSON: /tmp/openapi.yaml + +# For now all containers are attached to both networks. +# Next step is to split microservices to the +# correct networks to reflect production reality. +networks: + backend: + name: f7t-backend + frontend: + name: f7t-frontend \ No newline at end of file diff --git a/deploy/test-build/environment/common.env b/deploy/test-build/environment/common.env index d120efad..b86e037f 100644 --- a/deploy/test-build/environment/common.env +++ b/deploy/test-build/environment/common.env @@ -21,16 +21,14 @@ F7T_REALM_RSA_TYPE=RS256 F7T_FIRECREST_SERVICE='' # AUTHENTICATION ROLE for FirecREST Service Accounts F7T_AUTH_ROLE='' +# DEBUG FLAG +F7T_DEBUG_MODE=True #------- # microservices IPs, also defined on each 'env_make' inside containers -F7T_CERTIFICATOR_IP=127.0.0.1 -F7T_COMPUTE_IP=127.0.0.1 +F7T_COMPUTE_IP=compute #TaskPersistence (redis) -F7T_PERSISTENCE_IP=127.0.0.1 -F7T_TASKS_IP=127.0.0.1 -F7T_STATUS_IP=127.0.0.1 -F7T_STORAGE_IP=127.0.0.1 -F7T_UTILITIES_IP=127.0.0.1 +F7T_PERSISTENCE_IP=taskpersistence +F7T_STORAGE_IP=storage #----- ports: F7T_CERTIFICATOR_PORT=5010 F7T_COMPUTE_PORT=5000 @@ -38,18 +36,20 @@ F7T_TASKS_PORT=5003 F7T_STATUS_PORT=5001 F7T_STORAGE_PORT=5002 F7T_UTILITIES_PORT=5004 +F7T_RESERVATIONS_PORT=5005 #------- # microservices urls: used by Kong and between microservices. Must replace $SERVICE_PORT with the port number -F7T_CERTIFICATOR_URL=http://127.0.0.1:5010 -F7T_TASKS_URL=http://127.0.0.1:5003 -F7T_COMPUTE_URL=http://127.0.0.1:5000 -F7T_STORAGE_URL=http://127.0.0.1:5002 -F7T_UTILITIES_URL=http://127.0.0.1:5004 -F7T_STATUS_URL=http://127.0.0.1:5001 +F7T_CERTIFICATOR_URL=http://certificator:5010 +F7T_TASKS_URL=http://tasks:5003 +F7T_COMPUTE_URL=http://compute:5000 +F7T_STORAGE_URL=http://storage:5002 +F7T_UTILITIES_URL=http://utilities:5004 +F7T_STATUS_URL=http://status:5001 +F7T_RESERVATIONS_URL=http://reservations:5005 # kong_url: used by microservices when return URL to clients F7T_KONG_URL= #------- -# list of systems +# list of systems #public name for systems, where users except to submit jobs and get files (list with ';') F7T_SYSTEMS_PUBLIC='system01;system02' # filesystems mounted in each system @@ -58,9 +58,9 @@ F7T_SYSTEMS_PUBLIC='system01;system02' # FILESYSTEMS = "/home,/scratch;/home" F7T_FILESYSTEMS="/home;/home" #internal machines that microservices connect to (in correlation with SYSTEMS_PUBLIC) -F7T_SYSTEMS_INTERNAL_COMPUTE='127.0.0.1:2223;127.0.0.1:2224' -F7T_SYSTEMS_INTERNAL_STORAGE='127.0.0.1:9000;127.0.0.1:2222' -F7T_SYSTEMS_INTERNAL_UTILITIES='127.0.0.1:2223;127.0.0.1:2222' +F7T_SYSTEMS_INTERNAL_COMPUTE='cluster' +F7T_SYSTEMS_INTERNAL_STORAGE='cluster' +F7T_SYSTEMS_INTERNAL_UTILITIES='cluster' #------- # COMPUTE option # Base filesystem where job submission files will be stored. @@ -76,10 +76,12 @@ F7T_STORAGE_TEMPURL_EXP_TIME=604800 F7T_STORAGE_MAX_FILE_SIZE=512000 # Storage technology used for staging area (swift or s3v2 or s3v4, unset to disable) F7T_OBJECT_STORAGE='s3v4' +# set if account is needed for SLURM job submission +F7T_USE_SLURM_ACCOUNT=True #------- # STATUS: microservices & systems to pool: -F7T_STATUS_SERVICES='certificator;utilities;compute;tasks;storage' -F7T_STATUS_SYSTEMS='127.0.0.1;127.0.0.1;127.0.0.1;127.0.0.1;127.0.0.1;' +F7T_STATUS_SERVICES='certificator;utilities;compute;tasks;storage;reservations' +F7T_STATUS_SYSTEMS='certificator;utilities;compute;tasks;storage;reservations;' #------- # UTILITIES: max size of file for download/upload from filesystem in MB F7T_UTILITIES_MAX_FILE_SIZE=5 @@ -87,7 +89,7 @@ F7T_UTILITIES_MAX_FILE_SIZE=5 F7T_UTILITIES_TIMEOUT=5 #------ # if enabled FirecREST sends a certificate as command, requires a serverside ssh ForceCommand wrapper -#F7T_SSH_CERTIFICATE_WRAPPER= +F7T_SSH_CERTIFICATE_WRAPPER=True #------ # KONG internal URLs for services F7T_KONG_JOBS_URL= @@ -97,5 +99,10 @@ F7T_KONG_TASKS_URL= F7T_KONG_UTILITIES_URL= # OPA Vars F7T_OPA_USE=True -F7T_OPA_URL=http://localhost:8181 +F7T_OPA_URL=http://opa:8282 F7T_POLICY_PATH=v1/data/f7t/authz +# SSL vars +#F7T_USE_SSL=False +F7T_SSL_CRT=/ssl/f7t_internal.crt +F7T_SSL_KEY=/ssl/f7t_internal.key +# F7T_SSL_SIGNED=False diff --git a/deploy/test-build/environment/hosts b/deploy/test-build/environment/hosts index fad92be0..fe95ea66 100644 --- a/deploy/test-build/environment/hosts +++ b/deploy/test-build/environment/hosts @@ -3,4 +3,3 @@ fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters -127.0.0.1 minio_test_build diff --git a/deploy/test-build/environment/storage.env b/deploy/test-build/environment/storage.env index 7dcda511..e0a853d7 100644 --- a/deploy/test-build/environment/storage.env +++ b/deploy/test-build/environment/storage.env @@ -9,7 +9,6 @@ F7T_SWIFT_PASS= F7T_SECRET_KEY= # for S3 -#S3_URL=http://127.0.0.1:9000 F7T_S3_URL=http://minio_test_build:9000 F7T_S3_ACCESS_KEY=storage_access_key F7T_S3_SECRET_KEY=storage_secret_key @@ -28,9 +27,9 @@ F7T_XFER_PARTITION=xfer # external transfers machine (PUBLIC: name, INTERNAL=url or IP internal) F7T_EXT_TRANSFER_MACHINE_PUBLIC='system01' -F7T_EXT_TRANSFER_MACHINE_INTERNAL=127.0.0.1:2223 +F7T_EXT_TRANSFER_MACHINE_INTERNAL=cluster # Storage polling interval for uploads in seconds -F7T_STORAGE_POLLING_INTERVAL=60 +F7T_STORAGE_POLLING_INTERVAL=60 # Cipher key to encrypt/decrypt certificates F7T_CERT_CIPHER_KEY='Df6UZuoPoJ2u5yRwxNfFQ46Nwy8eW1OGTcuhlqn4ONo=' diff --git a/deploy/test-build/ssl/f7t_internal.crt b/deploy/test-build/ssl/f7t_internal.crt new file mode 100644 index 00000000..b7777996 --- /dev/null +++ b/deploy/test-build/ssl/f7t_internal.crt @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFeDCCA2CgAwIBAgIUXiMJo/fWZowJcEPpZl12gWU1TPMwDQYJKoZIhvcNAQEL +BQAwZjELMAkGA1UEBhMCQ0gxDzANBgNVBAgMBlRpY2lubzEPMA0GA1UEBwwGTHVn +YW5vMQ0wCwYDVQQKDARDU0NTMRIwEAYDVQQLDAlGaXJlY1JFU1QxEjAQBgNVBAMM +CTEyNy4wLjAuMTAeFw0yMDExMTMxMTE4MjlaFw0zMDExMTExMTE4MjlaMGYxCzAJ +BgNVBAYTAkNIMQ8wDQYDVQQIDAZUaWNpbm8xDzANBgNVBAcMBkx1Z2FubzENMAsG +A1UECgwEQ1NDUzESMBAGA1UECwwJRmlyZWNSRVNUMRIwEAYDVQQDDAkxMjcuMC4w +LjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDHP9KNr2hg3V4tLAVe +1xEA57k3d6ND8ildegKhxX68wpwVAsw/ZYorQ9c66nEnJzCNnKR3i3w9OFoOikch +RtVucCzeBTUUum31K7IaBZd88NPN8XZ18RqtfP/Fb2o+yymGkPWlFNWerNNVhc1J +7lkg6rR7Sfei6Oe2WBaJHFYOHqESEG7jAVXas2PP98yqbpTflFYXdqp9WOBjBUAg +1vtYKMX985B+SwWc0vHOKN6hMWRyYO7ZBMX1xrvfk9MMFXBq7eQo+luL+LyKT2Sm +O/DhzPFGJCjefHLyzR5IpmT0Om1KB5/sZhRcAkig9XPxpAdENC+PaKdh/bI7lAoQ +Tk2O0i130mWCMN6lJU7o/HsbnvQdbtoiBp7ean/PDBRIvQpigPY6bFikp2g4dokY +JnjKgaUpzJfalqjXuVlkwztghkBBEzLN/NY3IG8uDE3HEd0djiCsaJxIy4fwbPTv +O6ksVhNV9x703ufyUONdtbu2m/HEvbO9VeLCprtePFpys7k7r4AvRLMbBlCldIh/ +FTmPsV4nIqWplu4pSVhK5x0OjmDiJetZwvgWb9HDcAq0b887Bl/m/X87pIuQdaOT +ADAHybF0nbpU2MwmFlKJ7pYyqHMFTv9SOfrFjWf3u9gb5wc7BjHj9w+HJXywXZDq ++ymUMqapij4zPnGzkzwBJeibFQIDAQABox4wHDAaBgNVHREEEzARgglsb2NhbGhv +c3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAK3R8tIVqh5v9k/YR5m1RynTbhtI +uTC9igpQ6Mim54BbWjySD+uyzBd96z+XXq7OqrVEPEpaadd/MZ6aYng2oymfyvfG +3ncDHzPlO1sPxQPdJX8qOdiIwJxH1Jo6bDXqJl/F7IPKU9lKRZpaSS4WmCMUT2pk +eRO4iHcnp3mKYiXrws9fOti5mjfzBvjC9c1kPPKjIH695cGPwJWnAQPaaHy38puo +XGpDYvEufi8qwtPFBKJISCKa7gp8RLqYjp4rkyCohJv9F8TM3vCeFzRqHbLbvUNI +/4wQhNtLvhbjLqCUOmOmYPEay5G0SbQLxymOTTKVExsFQw9Gz+0TPGCCDp1wdnhr +6TV7X0crctaJL+LLQP+hWqCLWHqOsXbzlesbqHUj/XV/1vb571VHg0fqg6CQBIIm +FLffspPCFYhdgDXuVi5FA0NOYW3HkOD3T1ah8ahwY1/ajmhZtBgxiwThqHHHlcp4 +i40XHUj/5/JCC55NzV32Tx3EIwJryPR2Q6h9ZP2oLaeFLb1jTXIoGqxo/NB56dyi +BgJXOGQKAkxbxZwg0VK0KYUF7peA4xo28cqO9k/dd0Egymz+b30YFBf9QZBJGKun +1TcLjHeXX2fb6cSvt2+TgONNJ9yh/boyg+bIEx6B/f0m+Cb3S1k8tdTCO4GRxxqE +nrBctmp27B82Yyig +-----END CERTIFICATE----- diff --git a/deploy/test-build/ssl/f7t_internal.csr b/deploy/test-build/ssl/f7t_internal.csr new file mode 100644 index 00000000..2deedd79 --- /dev/null +++ b/deploy/test-build/ssl/f7t_internal.csr @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIE6jCCAtICAQAwZjELMAkGA1UEBhMCQ0gxDzANBgNVBAgMBlRpY2lubzEPMA0G +A1UEBwwGTHVnYW5vMQ0wCwYDVQQKDARDU0NTMRIwEAYDVQQLDAlGaXJlY1JFU1Qx +EjAQBgNVBAMMCTEyNy4wLjAuMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBAMc/0o2vaGDdXi0sBV7XEQDnuTd3o0PyKV16AqHFfrzCnBUCzD9liitD1zrq +cScnMI2cpHeLfD04Wg6KRyFG1W5wLN4FNRS6bfUrshoFl3zw083xdnXxGq18/8Vv +aj7LKYaQ9aUU1Z6s01WFzUnuWSDqtHtJ96Lo57ZYFokcVg4eoRIQbuMBVdqzY8/3 +zKpulN+UVhd2qn1Y4GMFQCDW+1goxf3zkH5LBZzS8c4o3qExZHJg7tkExfXGu9+T +0wwVcGrt5Cj6W4v4vIpPZKY78OHM8UYkKN58cvLNHkimZPQ6bUoHn+xmFFwCSKD1 +c/GkB0Q0L49op2H9sjuUChBOTY7SLXfSZYIw3qUlTuj8exue9B1u2iIGnt5qf88M +FEi9CmKA9jpsWKSnaDh2iRgmeMqBpSnMl9qWqNe5WWTDO2CGQEETMs381jcgby4M +TccR3R2OIKxonEjLh/Bs9O87qSxWE1X3HvTe5/JQ4121u7ab8cS9s71V4sKmu148 +WnKzuTuvgC9EsxsGUKV0iH8VOY+xXicipamW7ilJWErnHQ6OYOIl61nC+BZv0cNw +CrRvzzsGX+b9fzuki5B1o5MAMAfJsXSdulTYzCYWUonuljKocwVO/1I5+sWNZ/e7 +2BvnBzsGMeP3D4clfLBdkOr7KZQypqmKPjM+cbOTPAEl6JsVAgMBAAGgPzA9Bgkq +hkiG9w0BCQ4xMDAuMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBQGA1UdEQQNMAuC +CTEyNy4wLjAuMTANBgkqhkiG9w0BAQsFAAOCAgEAdVoyluU6wZZoXLSUVJZIuzQ4 +QDxAtrq55GG5wQvQ6/P442B1mL9xttlE/j+q2LgG0z3nM3Y8FUwISDPoMWBtxaUA +0GxD6w/KFmzRcuAWVcq7pXG2TifKhp25XQyrX8HW4ErJwED0Jx/+twfqEGtqbbCo +Kbv+A6xJyDIAqUTX+nHdvLPU1PoQLbuV2Dd0qSIBXp3HXOtaBo1pDrR3O3iprz2E +E0DrZLTwVpwdE6XtnyVIBUNKni0S693NQ5zLLepNrB9kJNPIUjUHJzsKAz9BF+Qs +T9v+8m7ndltsL84V0sdOmBousqX4jdbXuJC8uCEb/w+udmPQ9zdrFD4B17XSHqrf +JOmPM71r7by63MYvY1Xj528Bjbfw3GR8YfVWNoajP26zjT2lmVHfeiJYeD2YNIKW ++weBVMoG/03cHsu7gVwkEug8W16fNRiswjo8PCbF72A2hnaJ6Th5ZtFKUGe+T9Qo +ef/P6PmKvfNIu55bBHe8RkHUVmL+oxz9CaQbQcyqG4iBaAv3BgoZcJdKyR+4fj6W +qZzG0xr9ufaf/avm9xo3gMCzNNHGt7jGBrVRAoTgu7uA6IxArPq9VUadeTkgMmhy +U5dT+B5P+HCvi95f66GliOQzjEA6/U2L0PCdT1fJY3UtW65kTAJXP8TnFmXf3ELI +9Bp/QffXl4/hlyeJ4uI= +-----END CERTIFICATE REQUEST----- diff --git a/deploy/test-build/ssl/f7t_internal.key b/deploy/test-build/ssl/f7t_internal.key new file mode 100644 index 00000000..429a364a --- /dev/null +++ b/deploy/test-build/ssl/f7t_internal.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAxz/Sja9oYN1eLSwFXtcRAOe5N3ejQ/IpXXoCocV+vMKcFQLM +P2WKK0PXOupxJycwjZykd4t8PThaDopHIUbVbnAs3gU1FLpt9SuyGgWXfPDTzfF2 +dfEarXz/xW9qPssphpD1pRTVnqzTVYXNSe5ZIOq0e0n3oujntlgWiRxWDh6hEhBu +4wFV2rNjz/fMqm6U35RWF3aqfVjgYwVAINb7WCjF/fOQfksFnNLxzijeoTFkcmDu +2QTF9ca735PTDBVwau3kKPpbi/i8ik9kpjvw4czxRiQo3nxy8s0eSKZk9DptSgef +7GYUXAJIoPVz8aQHRDQvj2inYf2yO5QKEE5NjtItd9JlgjDepSVO6Px7G570HW7a +Igae3mp/zwwUSL0KYoD2OmxYpKdoOHaJGCZ4yoGlKcyX2pao17lZZMM7YIZAQRMy +zfzWNyBvLgxNxxHdHY4grGicSMuH8Gz07zupLFYTVfce9N7n8lDjXbW7tpvxxL2z +vVXiwqa7XjxacrO5O6+AL0SzGwZQpXSIfxU5j7FeJyKlqZbuKUlYSucdDo5g4iXr +WcL4Fm/Rw3AKtG/POwZf5v1/O6SLkHWjkwAwB8mxdJ26VNjMJhZSie6WMqhzBU7/ +Ujn6xY1n97vYG+cHOwYx4/cPhyV8sF2Q6vsplDKmqYo+Mz5xs5M8ASXomxUCAwEA +AQKCAgBE8mrTqHz/99oc23zIQreZjpXjAb03R9bUh+HdRNAEVXtZMeyni2VybzlN +I88f80mz1x56V4QYUyWmnmbatv4oBx0ffsHNeG2QRxyMMJ8pFtTUENgjG9lVa0m+ +cj1bmUW3UPYPt9Pr4nYGxBQznwysfUsS1peggeOcj4LM00db/3FRJOqaFt40nZrO +XjsuRrGxZE0eNWvtkBGlci2sr3euG/3XUK8RZLh+eeg+pJnVQZg2lCBSbHpr/+cH +CMohBlIuTj+Dho6jMuFbIvn7JAbIu/hJgz25L2s/ZYczMJECii1DTrwiO3iUFh43 +xHfoiBATo9NJVreFCxrfzuxklBP12ZLSj4smUmPECVwH+Y4U5La/taQAsPMwECG4 +Lhxknt5CRpp4hMPjwWzlkEkkSnHgEA+Z+Dl5r3YTm2PVDQZEQt+w98laagvPA5v+ +h+Z/GwGoPcox4DIMDdkrduqrO/uPc3Biqrl4+/c0fE+JVu4+2eZiFP3kp7rwSVSF +BqvybOo2FMOKYBMvsLsy4IA8Ul2JxKagpOjsmr1UWaVQcaCOf8S1mdbCMruv1nzT +zRyzgSSuB57LqxhUOMEZoIJfPOD2pAoTeUHVdFWTloZxrsVkP3PRR+AaXDOhnbl9 +gPxk4+xRpJfnjuP+rS3JxhpGHJLTLEZY9a55o+kEhn7vNvDpyQKCAQEA6c66EYTg +VrHDgyBC1OOFREQOwhIiq4nalL882pbJNgGSNYYxPua60/GAZctiv+2RQTmhPVpt +fqtZOAQYvunEKV9W97vFWazEggszSukV+cxmkpHwPgmpybcAvJbyIcmiZl9zedPH +6d2H4RrIyHAcMiA825kRgCCOjImhAvL2xI4zl/+aq18t4SuyToaEIKj+2P7qtKIG +NzgmypS0QiNNBJYZz8BbgMn1CVUWqOpjBY1sVwlwDaFD3e/DtGDULKUNAaLx5k2Q +ruu4X1TlfgwweOIMTIotaSgaOxt5xDNWI+q/qUT7nfi/JwogMzb9P9x+a/UyS+Yh +OKtw6pukSlq4awKCAQEA2ileKJOfsKfDTMuiUj1GQ8x6F3XnadlJmvylEc7sySE6 +n1M80dK3qvQOF8EoB0RqppWjm9JSdwTut/nJoBW44x8jadk39yVIFx7keSKwIPl9 +2L0t+OeB86WHUJJXyQnsESne4x02VT0p5QZ93t+TL+UJmEchFNITLp4JaHGQSfcj +d3VkWzk+OEeQju7NnTGJtXDmMOc96rfNuMpiEx4DXnQMjtT/R7xEuzX17YYiEq3T +JtnNf4v54sULKXnWBXVPqNMdvMLGwg5lZ8SWioNWOvHAdTNaGAfc0UJRjZnS91Fu +Any+XlT2iMs2SbADGjoCqeP03IB9pHujfyieLJTafwKCAQEA0W8Va3X8QowvlvjC +BNLdYF4aXitClSgB/TYku6VQDalwn7NZz398oTd2DTkXdtbGQ4fs7/ldlZbFb5Dz +3aemyGyXiYfQatiAmgCQbKzHu6AlzWseqWOcjIz6AvSvZiao60zbMqjLLaj7+18z +tZ//EcOg4BCIBZ6kMi1iZjLfawf6HVCf6pxyi33J1d9W4a6fwh7ufOmZ6cLJtLNp +mMjoR5BxBJDJJl9BuJ9YkeyK0MFy57ATiJIJKfeI9b26sVcThIDCcMrQLHAIEpUI +kVU34kCSRcmMjeKXRJUDCQX3RmsUTfr17PaRu9WtLFM4nQjU0b2s1SIKZinkFrNe +giAriwKCAQAHe7PHIIkxvg9s6u8+cuueF714YIr3joR/1Hq1CawFmiYwoOJNMDUs +oAJ7vhxpY2dlH2zlQ2mgu4KUvPS1kQkugTrtvo205Ip8gIrFvPLdrrEqZhLVnttO +SDdEBEEOnFCEpjMHB1F9ywNM39QhePnLgse5eXchB7OwvasE8iBI6TlGN7ihiG0h +8RJ8VKSo57IZjy3WL5hoHLXpyMdkcHsdGk3z+EppY4PYCBnPc/JnQZ8kjgQGg/6m +UKv5WDyHI2cAG33Bo8y9i1DfJlcjmS20B+baHlDQ0jQ42VWCzev2n8xuOWkuQyvR +tWa/UFJK7AJ7YV5c0et9YOBS1EnYraJxAoIBAQDLPEuOfNf/N9S/zWYXOy4PJZ8R +9tmObqT6z/Wc0mjmG+2xNGw/3qcj7LbD9jBJLerosE8gSEQxi4ZD1H4YfHlvPCEj +Llbfp82maUc7MZT3QlJneurpi9gWhXPmOaBI1HvS6E6AbvgbXU25cQorrv3o3XIy +GYwmmRU/Y75nI9ir3+YecAS38RxvwDFCnZvG77WCnXsVz+E5fSTnSB+ZYHN6KRNz +FeC937mGzQLI4Ir0dGP2X9vmQKJkvYgmNvjoEc451TQ1CE2a0fg1ch0S6UKB1P8v +sxPsQ7exMrDpHjddozRY6cBVCEj9JfIinKpEiti1ZLcVg3yHgtfL5rjCAn6k +-----END RSA PRIVATE KEY----- diff --git a/deploy/test-build/taskpersistence/redis.conf b/deploy/test-build/taskpersistence/redis.conf index e816682b..eedf7085 100644 --- a/deploy/test-build/taskpersistence/redis.conf +++ b/deploy/test-build/taskpersistence/redis.conf @@ -1,3 +1,10 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# + # Redis configuration file example. # # Note that in order to read the configuration file, Redis must be diff --git a/doc/openapi/firecrest-api.yaml b/doc/openapi/firecrest-api.yaml index fdde7fc5..a99ebdb4 100644 --- a/doc/openapi/firecrest-api.yaml +++ b/doc/openapi/firecrest-api.yaml @@ -1,9 +1,15 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## openapi: 3.0.0 servers: - url: 'http://FIRECREST_URL' - url: 'https://FIRECREST_URL' info: - version: 1.0.0-RC1 + version: 1.7.0 title: FirecREST Developers API description: > This API specification is intended for FirecREST developers only. There're some endpoints that are not available in the public version for client developers. @@ -846,7 +852,7 @@ paths: description: 'Calculate the SHA256 (256-bit) checksum of a specified file in {targetPath} on the {X-Machine-Name} filesystem.' tags: - Utilities - parameters: + parameters: - name: targetPath in: query description: Path to the file to calculate checksum @@ -905,7 +911,7 @@ paths: description: 'View the content of a specified file in {targetPath} on the {X-Machine-Name} filesystem.' tags: - Utilities - parameters: + parameters: - name: targetPath in: query description: Path to the file to view @@ -1309,6 +1315,10 @@ paths: type: string description: Move data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - sourcePath - targetPath @@ -1389,6 +1399,10 @@ paths: type: string description: Move data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - sourcePath - targetPath @@ -1469,6 +1483,10 @@ paths: type: string description: Copy data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - sourcePath - targetPath @@ -1546,6 +1564,10 @@ paths: type: string description: Delete data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - targetPath example: @@ -1748,7 +1770,7 @@ paths: post: summary: Creates a new task description: Creates a new task. - parameters: + parameters: - in: header name: X-Firecrest-Service description: Name of the service for which the task will be created ("compute" or "storage") @@ -1757,7 +1779,7 @@ paths: type: string tags: - Tasks - responses: + responses: '201': description: Task created content: @@ -1808,11 +1830,11 @@ paths: application/json: schema: $ref: '#/components/schemas/Task' - + put: summary: Updates a task description: Updates a task entry that keeps track of progress - parameters: + parameters: - in: header name: X-Firecrest-Service description: Name of the service for which the task will be created ("compute"/"storage") @@ -1909,9 +1931,332 @@ paths: schema: $ref: '#/components/schemas/Expiration-404' - - + '/reservations': + parameters: + - in: header + name: X-Machine-Name + description: The system name + required: true + schema: + type: string + get: + summary: Returns all reservations + description: List all active reservations and their status + tags: + - Reservation + responses: + '200': + description: Reservation list returned + content: + application/json: + schema: + $ref: '#/components/schemas/Reservations' + '400': + description: Error listing reservations + content: + application/json: + schema: + $ref: '#/components/schemas/Upload-notok' + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error listing reservations + content: + application/json: + schema: + $ref: '#/components/schemas/Upload-notok' + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + + post: + summary: Creates a new reservation + description: Creates a new reservation with {reservation} name for a given SLURM groupname + tags: + - Reservation + requestBody: + required: true + content: + 'multipart/form-data': + schema: + type: object + properties: + reservation: + type: string + description: name of the reservation + account: + type: string + description: name of the account in SLURM to which the reservation is made for + numberOfNodes: + type: string + description: number of nodes needed for the reservation + nodeType: + type: string + description: type of node + default: knl + starttime: + type: string + description: start time for reservation (YYYY-MM-DDTHH:MM:SS) + endtime: + type: string + description: end time for reservation (YYYY-MM-DDTHH:MM:SS) + required: + - reservation + - account + - numberOfNodes + - nodeType + - starttime + - endtime + example: + reservation: selvedas + project: psigroup + numberOfNodes: 10 + nodeType: knl + starttime: '2020-12-24T00:00:00' + endtime: '2020-12-24T12:30:00' + responses: + '201': + description: Reservation succesfully created + content: + application/json: + schema: + type: object + properties: + success: + type: string + example: "Reservation created: {reservation}" + '400': + description: Error creating reservation + content: + 'application/json': + schema: + type: object + properties: + error: + type: string + example: "Error creating reservation {reservation}" + description: + type: string + example: "Error: invalid start time reservation" + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Error: + description: Error + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error creating reservation + content: + 'application/json': + schema: + type: object + properties: + error: + type: string + example: "Error creating reservation {reservation}" + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + '/reservations/{reservation}': + parameters: + - in: header + name: X-Machine-Name + description: The system name + required: true + schema: + type: string + - in: path + name: reservation + description: reservation name + required: true + schema: + type: string + put: + summary: Updates reservation {reservation} + description: Updates an already created reservation named {reservation} + tags: + - Reservation + requestBody: + required: true + content: + 'multipart/form-data': + schema: + type: object + properties: + numberOfNodes: + type: string + description: number of nodes needed for the reservation + nodeType: + type: string + description: type of node + default: knl + starttime: + type: string + description: start time for reservation (YYYY-MM-DDTHH:MM:SS) + endtime: + type: string + description: end time for reservation (YYYY-MM-DDTHH:MM:SS) + required: + - reservation + - numberOfNodes + - nodeType + - starttime + - endtime + example: + reservation: selvedas + project: psigroup + numberOfNodes: 10 + nodeType: knl + starttime: '2020-12-24T00:00:00' + endtime: '2020-12-24T12:30:00' + responses: + '200': + description: Reservation succesfully updated + content: + application/json: + schema: + type: object + properties: + success: + type: string + example: "Reservation {reservation} updated" + '400': + description: Error updating reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error updating reservation {reservation}" + description: + type: string + example: "Error: {reservation} doesn't seem to be a valid reservation" + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Error: + description: Error + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error updating reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error updating reservation {reservation}" + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + + delete: + summary: Deletes reservation {reservation} + description: Deletes an already created reservation named {reservation} + tags: + - Reservation + responses: + '204': + description: Reservation succesfully deleted + content: + application/json: + schema: + type: object + properties: + success: + type: string + example: "Reservation {reservation} removed" + '400': + description: Error deleting reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error deleting reservation {reservation}" + description: + type: string + example: "Error: You are not an owner of the reservation" + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Error: + description: Error + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error deleting reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error deleting reservation {reservation}" + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + + + '/certificator': @@ -1934,7 +2279,7 @@ paths: application/json: schema: $ref: '#/components/schemas/Certificator-notok' - + components: securitySchemes: @@ -2116,13 +2461,13 @@ components: properties: success: type: string - description: Success description + description: Success description Expiration-400: type: object properties: error: type: string - description: Error description + description: Error description Expiration-403: type: object properties: @@ -2227,6 +2572,46 @@ components: properties: success: type: string + Reservation: + type: object + properties: + reservationname: + type: string + starttime: + type: string + endtime: + type: string + duration: + type: string + nodes: + type: string + nodecnt: + type: string + corecnt: + type: string + features: + type: string + partitionname: + type: string + flags: + type: string + users: + type: string + accounts: + type: string + licenses: + type: string + state: + type: string + burstbuffer: + type: string + watts: + type: string + + Reservations: + type: array + items: + $ref: '#/components/schemas/Reservation' tags: - name: Status description: Status information of infrastructure and services. diff --git a/doc/openapi/firecrest-developers-api.yaml b/doc/openapi/firecrest-developers-api.yaml index 36c2ebad..6c582ddd 100644 --- a/doc/openapi/firecrest-developers-api.yaml +++ b/doc/openapi/firecrest-developers-api.yaml @@ -1,9 +1,15 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## openapi: 3.0.0 servers: - url: 'http://FIRECREST_URL' - url: 'https://FIRECREST_URL' info: - version: 1.0.0-RC1 + version: 1.7.0 title: FirecREST API description: > FirecREST platform, a RESTful Services Gateway to HPC resources, is a @@ -858,7 +864,7 @@ paths: description: 'Calculate the SHA256 (256-bit) checksum of a specified file in {targetPath} on the {X-Machine-Name} filesystem.' tags: - Utilities - parameters: + parameters: - name: targetPath in: query description: Path to the file to calculate checksum @@ -917,7 +923,7 @@ paths: description: 'View the content of a specified file in {targetPath} on the {X-Machine-Name} filesystem.' tags: - Utilities - parameters: + parameters: - name: targetPath in: query description: Path to the file to view @@ -1321,6 +1327,10 @@ paths: type: string description: Move data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - sourcePath - targetPath @@ -1401,6 +1411,10 @@ paths: type: string description: Move data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - sourcePath - targetPath @@ -1481,6 +1495,10 @@ paths: type: string description: Copy data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - sourcePath - targetPath @@ -1558,6 +1576,10 @@ paths: type: string description: Delete data after job with id {stageOutJobId} is completed default: null + account: + type: string + description: Name of the bank account to be used in SLURM. If not set, system default is taken. + default: null required: - targetPath example: @@ -1804,6 +1826,329 @@ paths: # '400': # description: Failed to delete task + '/reservations': + parameters: + - in: header + name: X-Machine-Name + description: The system name + required: true + schema: + type: string + get: + summary: Returns all reservations + description: List all active reservations and their status + tags: + - Reservation + responses: + '200': + description: Reservation list returned + content: + application/json: + schema: + $ref: '#/components/schemas/Reservations' + '400': + description: Error listing reservations + content: + application/json: + schema: + $ref: '#/components/schemas/Upload-notok' + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error listing reservations + content: + application/json: + schema: + $ref: '#/components/schemas/Upload-notok' + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + + post: + summary: Creates a new reservation + description: Creates a new reservation with {reservation} name for a given SLURM groupname + tags: + - Reservation + requestBody: + required: true + content: + 'multipart/form-data': + schema: + type: object + properties: + reservation: + type: string + description: name of the reservation + account: + type: string + description: name of the account in SLURM to which the reservation is made for + numberOfNodes: + type: string + description: number of nodes needed for the reservation + nodeType: + type: string + description: type of node + default: knl + starttime: + type: string + description: start time for reservation (YYYY-MM-DDTHH:MM:SS) + endtime: + type: string + description: end time for reservation (YYYY-MM-DDTHH:MM:SS) + required: + - reservation + - account + - numberOfNodes + - nodeType + - starttime + - endtime + example: + reservation: selvedas + project: psigroup + numberOfNodes: 10 + nodeType: knl + starttime: '2020-12-24T00:00:00' + endtime: '2020-12-24T12:30:00' + responses: + '201': + description: Reservation succesfully created + content: + application/json: + schema: + type: object + properties: + success: + type: string + example: "Reservation created: {reservation}" + '400': + description: Error creating reservation + content: + 'application/json': + schema: + type: object + properties: + error: + type: string + example: "Error creating reservation {reservation}" + description: + type: string + example: "Error: invalid start time reservation" + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Error: + description: Error + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error creating reservation + content: + 'application/json': + schema: + type: object + properties: + error: + type: string + example: "Error creating reservation {reservation}" + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + '/reservations/{reservation}': + parameters: + - in: header + name: X-Machine-Name + description: The system name + required: true + schema: + type: string + - in: path + name: reservation + description: reservation name + required: true + schema: + type: string + put: + summary: Updates reservation {reservation} + description: Updates an already created reservation named {reservation} + tags: + - Reservation + requestBody: + required: true + content: + 'multipart/form-data': + schema: + type: object + properties: + numberOfNodes: + type: string + description: number of nodes needed for the reservation + nodeType: + type: string + description: type of node + default: knl + starttime: + type: string + description: start time for reservation (YYYY-MM-DDTHH:MM:SS) + endtime: + type: string + description: end time for reservation (YYYY-MM-DDTHH:MM:SS) + required: + - reservation + - numberOfNodes + - nodeType + - starttime + - endtime + example: + reservation: selvedas + project: psigroup + numberOfNodes: 10 + nodeType: knl + starttime: '2020-12-24T00:00:00' + endtime: '2020-12-24T12:30:00' + responses: + '200': + description: Reservation succesfully updated + content: + application/json: + schema: + type: object + properties: + success: + type: string + example: "Reservation {reservation} updated" + '400': + description: Error updating reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error updating reservation {reservation}" + description: + type: string + example: "Error: {reservation} doesn't seem to be a valid reservation" + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Error: + description: Error + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error updating reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error updating reservation {reservation}" + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + + delete: + summary: Deletes reservation {reservation} + description: Deletes an already created reservation named {reservation} + tags: + - Reservation + responses: + '204': + description: Reservation succesfully deleted + content: + application/json: + schema: + type: object + properties: + success: + type: string + example: "Reservation {reservation} removed" + '400': + description: Error deleting reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error deleting reservation {reservation}" + description: + type: string + example: "Error: You are not an owner of the reservation" + headers: + X-Machine-Does-Not-Exist: + description: Machine does not exist + schema: + type: integer + X-Machine-Not-Available: + description: Machine is not available + schema: + type: integer + X-Error: + description: Error + schema: + type: integer + X-Timeout: + description: Command has finished with timeout signal + schema: + type: integer + '404': + description: Error deleting reservation + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: "Error deleting reservation {reservation}" + headers: + X-Permission-Denied: + description: User does not have permissions to access machine + schema: + type: integer + components: securitySchemes: bearerAuth: @@ -1964,6 +2309,16 @@ components: type: string nodelist: type: string + job_file: + type: string + job_file_out: + type: string + job_file_err: + type: string + job_data_out: + type: string + job_data_err: + type: string Jobs: type: array items: @@ -2046,6 +2401,46 @@ components: properties: success: type: string + Reservation: + type: object + properties: + reservationname: + type: string + starttime: + type: string + endtime: + type: string + duration: + type: string + nodes: + type: string + nodecnt: + type: string + corecnt: + type: string + features: + type: string + partitionname: + type: string + flags: + type: string + users: + type: string + accounts: + type: string + licenses: + type: string + state: + type: string + burstbuffer: + type: string + watts: + type: string + + Reservations: + type: array + items: + $ref: '#/components/schemas/Reservation' tags: - name: Status description: Status information of infrastructure and services. diff --git a/src/certificator/certificator.py b/src/certificator/certificator.py index 5e2ed856..8b7af330 100644 --- a/src/certificator/certificator.py +++ b/src/certificator/certificator.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -14,7 +14,6 @@ import base64 import requests -STATUS_IP = os.environ.get("F7T_STATUS_IP") AUTH_HEADER_NAME = 'Authorization' AUTH_AUDIENCE = os.environ.get("F7T_AUTH_TOKEN_AUD", '').strip('\'"') @@ -30,6 +29,11 @@ OPA_URL = os.environ.get("F7T_OPA_URL","http://localhost:8181").strip('\'"') POLICY_PATH = os.environ.get("F7T_POLICY_PATH","v1/data/f7t/authz").strip('\'"') +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") + realm_pubkey=os.environ.get("F7T_REALM_RSA_PUBLIC_KEY", '') if realm_pubkey != '': # headers are inserted here, must not be present @@ -44,32 +48,43 @@ # check user authorization on endpoint # using Open Policy Agent -# +# # use: # check_user_auth(username,system) def check_user_auth(username,system): # check if OPA is active if OPA_USE: - try: - input = {"input":{"user": f"{username}", "system": f"{system}"}} - #resp_opa = requests.post(f"{OPA_URL}/{POLICY_PATH}", json=input) - logging.info(f"{OPA_URL}/{POLICY_PATH}") + logging.info(f"{OPA_URL}/{POLICY_PATH}") + input = {"input":{"user": f"{username}", "system": f"{system}"}} - resp_opa = requests.post(f"{OPA_URL}/{POLICY_PATH}", json=input) + try: + resp_opa = requests.post(f"{OPA_URL}/{POLICY_PATH}", json=input, verify= (SSL_CRT if USE_SSL else False)) + msg = "{} {}".format(resp_opa.status_code, resp_opa.text) + logging.info(f"resp_opa: {msg}") - logging.info(resp_opa.content) + if not resp_opa.ok: + return {"allow": False, "description":f"Server error: {msg}", "status_code": resp_opa.status_code} if resp_opa.json()["result"]["allow"]: logging.info(f"User {username} authorized by OPA") return {"allow": True, "description":f"User {username} authorized", "status_code": 200 } else: logging.error(f"User {username} NOT authorized by OPA") - return {"allow": False, "description":f"Permission denied for user {username} in {system}", "status_code": 401} + return {"allow": False, "description":f"Permission denied for user {username} in {system}", "status_code": 401} + + except requests.exceptions.SSLError as e: + logging.error("Exception: {}".format(e)) + return {"allow": False, "description":"Authorization server error: SSL error.", "status_code": 404} + except requests.exceptions.RequestException as e: - logging.error(e.args) - return {"allow": False, "description":"Authorization server error", "status_code": 404} - + logging.error("Exception: {}".format(e)) + return {"allow": False, "description":"Authorization server error: RequestException", "status_code": 404} + + except Exception as e: + logging.error("Exception: {}".format(e)) + return {"allow": False, "description":"Authorization server error: Unexpected", "status_code": 404} + return {"allow": True, "description":"Authorization method not active", "status_code": 200 } # checks JWT from Keycloak, optionally validates signature. It only receives the content of header's auth pair (not key:content) @@ -92,7 +107,7 @@ def check_header(header): # if AUTH_REQUIRED_SCOPE != '': # if not (AUTH_REQUIRED_SCOPE in decoded['realm_access']['roles']): # return False - + # {"scope": "openid profile firecrest email"} if AUTH_REQUIRED_SCOPE != "": if AUTH_REQUIRED_SCOPE not in decoded["scope"].split(): @@ -127,7 +142,7 @@ def get_username(header): decoded = jwt.decode(header[7:], verify=False) else: decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, options={'verify_aud': False}) - + # check if it's a service account token try: if AUTH_ROLE in decoded["realm_access"]["roles"]: @@ -202,11 +217,9 @@ def receive(): # Check if user is authorized in OPA cluster = request.args.get("cluster","") - if not cluster: return jsonify(description='No cluster specified'), 404 - auth_result = check_user_auth(username,cluster) if not auth_result["allow"]: return jsonify(description=auth_result["description"]), auth_result["status_code"] @@ -218,10 +231,8 @@ def receive(): force_command = base64.urlsafe_b64decode(request.args.get("command", '')).decode("utf-8") if force_command: force_opt = base64.urlsafe_b64decode(request.args.get("option", '')).decode("utf-8") - if force_command == 'wget': - force_command = '/usr/bin/wget' - ssh_expire = "+30m" #change to '+7d' - exp_time = request.args.get("exptime",'') + if force_command == 'curl': + exp_time = request.args.get("exptime", '') if exp_time: ssh_expire = f"+{exp_time}s" else: @@ -260,15 +271,9 @@ def receive(): # get status for status microservice -# only used by STATUS_IP otherwise forbidden @app.route("/status", methods=["GET"]) def status(): app.logger.info("Test status of service") - - # if request.remote_addr != STATUS_IP: - # app.logger.warning("Invalid remote address: {addr}".format(addr=request.remote_addr)) - # return jsonify(error="Invalid access"), 403 - return jsonify(success="ack"), 200 @@ -303,5 +308,9 @@ def status(): # run app # debug = False, so output redirects to log files - app.run(debug=debug, host='0.0.0.0', port=CERTIFICATOR_PORT) + if USE_SSL: + app.run(debug=debug, host='0.0.0.0', port=CERTIFICATOR_PORT, ssl_context=(SSL_CRT, SSL_KEY)) + else: + app.run(debug=debug, host='0.0.0.0', port=CERTIFICATOR_PORT) + diff --git a/src/common/async_task.py b/src/common/async_task.py index b9311ef9..5b7ee069 100644 --- a/src/common/async_task.py +++ b/src/common/async_task.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/src/common/common.env.template b/src/common/common.env.template index 9d0d864f..80219cee 100644 --- a/src/common/common.env.template +++ b/src/common/common.env.template @@ -45,6 +45,10 @@ F7T_STATUS_URL= F7T_STORAGE_URL= F7T_TASKS_URL= F7T_UTILITIES_URL= +# use SSL between microservices +F7T_USE_SSL=False +#F7T_SSL_CRT= +#F7T_SSL_KEY= # kong_url: used by microservices when return URL to clients F7T_KONG_URL= #------- diff --git a/src/common/cscs_api_common.py b/src/common/cscs_api_common.py index e4e1a3e0..24d9a221 100644 --- a/src/common/cscs_api_common.py +++ b/src/common/cscs_api_common.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -48,6 +48,11 @@ OPA_URL = os.environ.get("F7T_OPA_URL","http://localhost:8181").strip('\'"') POLICY_PATH = os.environ.get("F7T_POLICY_PATH","v1/data/f7t/authz").strip('\'"') +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") + logging.getLogger().setLevel(logging.INFO) logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',datefmt='%Y-%m-%d:%H:%M:%S',level=logging.INFO) @@ -68,11 +73,11 @@ def check_header(header): decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, options={'verify_aud': False}) else: decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, audience=AUTH_AUDIENCE) - + if AUTH_REQUIRED_SCOPE != "": if AUTH_REQUIRED_SCOPE not in decoded["scope"].split(): return False - + return True except jwt.exceptions.InvalidSignatureError: @@ -100,7 +105,7 @@ def get_username(header): decoded = jwt.decode(header[7:], realm_pubkey, algorithms=realm_pubkey_type, options={'verify_aud': False}) # check if it's a service account token try: - if AUTH_ROLE in decoded["realm_access"]["roles"]: + if AUTH_ROLE in decoded["realm_access"]["roles"]: clientId = decoded["clientId"] username = decoded["resource_access"][clientId]["roles"][0] @@ -165,7 +170,7 @@ def create_certificate(auth_header, cluster_name, cluster_addr, command=None, o logging.info(f"Request: {reqURL}") try: - resp = requests.get(reqURL, headers={AUTH_HEADER_NAME: auth_header}) + resp = requests.get(reqURL, headers={AUTH_HEADER_NAME: auth_header}, verify= (SSL_CRT if USE_SSL else False) ) if not resp.ok: return [None, resp.status_code, resp.json()["description"]] @@ -185,9 +190,9 @@ def create_certificate(auth_header, cluster_name, cluster_addr, command=None, o # keys: [pub_cert, pub_key, priv_key, temp_dir] return [td + "/user-key-cert.pub", td + "/user-key.pub", td + "/user-key", td] - except URLError as ue: - logging.error(f"({ue.errno}) -> {ue.strerror}", exc_info=True) - return [None, ue.errno, ue.strerror] + except requests.exceptions.SSLError as ssle: + logging.error(f"(-2) -> {ssle.strerror}") + return [None, -2, ssle.strerror] except IOError as ioe: logging.error(f"({ioe.errno}) -> {ioe.strerror}", exc_info=True) return [None, ioe.errno, ioe.strerror] @@ -271,12 +276,12 @@ def exec_remote_command(auth_header, system_name, system_addr, action, file_tran stderr_errda = "" stdout_errda = "" - + # poll process status since directly using recv_exit_status() could result - # in a permanent hang when remote output is larger than the current Transport or session’s window_size + # in a permanent hang when remote output is larger than the current Transport or session’s window_size while True: if stderr.channel.exit_status_ready(): - logging.info("stderr channel exit status ready") + logging.info("stderr channel exit status ready") stderr_errno = stderr.channel.recv_exit_status() endtime = time.time() + 30 eof_received = True @@ -298,7 +303,7 @@ def exec_remote_command(auth_header, system_name, system_addr, action, file_tran #for i in range(0,10): while True: if stdout.channel.exit_status_ready(): - logging.info("stdout channel exit status ready") + logging.info("stdout channel exit status ready") stdout_errno = stdout.channel.recv_exit_status() endtime = time.time() + 30 eof_received = True @@ -308,7 +313,7 @@ def exec_remote_command(auth_header, system_name, system_addr, action, file_tran stdout.channel.close() eof_received = False break - if eof_received: + if eof_received: output = "".join(stdout.readlines()) # error = stderr.read() it hangs # clean "tput: No ..." lines at error output @@ -317,7 +322,6 @@ def exec_remote_command(auth_header, system_name, system_addr, action, file_tran # else: # time.sleep(5) - if file_transfer == "download": outlines = output else: @@ -330,12 +334,18 @@ def exec_remote_command(auth_header, system_name, system_addr, action, file_tran # TODO: change precedence of error, because in /xfer-external/download this gives error and it s not an error if stderr_errno == 0: - if stderr_errda and not in_str(stderr_errda,"Could not chdir to home directory"): - result = {"error": 0, "msg": stderr_errda} + if stderr_errda and not (in_str(stderr_errda,"Could not chdir to home directory") or in_str(stderr_errda,"scancel: Terminating job")): + result = {"error": 1, "msg": stderr_errda} + elif in_str(stdout_errda, "No such file"): # in case that error is 0 and the msg is on the stdout (like with some file) + result = {"error": 1, "msg": stdout_errda} + elif in_str(stdout_errda, "no read permission"): # in case that error is 0 and the msg is on the stdout (like with some file) + result = {"error": 1, "msg": stdout_errda} + elif in_str(stdout_errda, "cannot open"): # in case that error is 0 and the msg is on the stdout (like with some file) + result = {"error": 1, "msg": stdout_errda} else: result = {"error": 0, "msg": outlines} elif stderr_errno > 0: - result = {"error": stderr_errno, "msg": stderr_errda} + result = {"error": stderr_errno, "msg": stderr_errda or stdout_errda} elif len(stderr_errda) > 0: result = {"error": 1, "msg": stderr_errda} elif stdout_errno == -2: @@ -343,6 +353,7 @@ def exec_remote_command(auth_header, system_name, system_addr, action, file_tran elif stderr_errno == -1: result = {"error": -1, "msg": "No exit status was provided by the server"} + # first if paramiko exception raise except paramiko.ssh_exception.NoValidConnectionsError as e: logging.error(type(e), exc_info=True) @@ -381,7 +392,7 @@ def exec_remote_command(auth_header, system_name, system_addr, action, file_tran os.remove(priv_key) os.rmdir(temp_dir) - logging.info(f"Result returned: {result['msg']}") + logging.info(f"Result: status_code {result['error']} -> {result['msg']}") return result @@ -435,7 +446,7 @@ def create_task(auth_header,service=None): try: # X-Firecrest-Service: service that created the task req = requests.post(f"{TASKS_URL}/", - headers={AUTH_HEADER_NAME: auth_header, "X-Firecrest-Service":service}) + headers={AUTH_HEADER_NAME: auth_header, "X-Firecrest-Service":service}, verify=(SSL_CRT if USE_SSL else False)) except requests.exceptions.ConnectionError as e: logging.error(type(e), exc_info=True) @@ -455,16 +466,16 @@ def create_task(auth_header,service=None): # function to call update task entry API in Queue FS def update_task(task_id, auth_header, status, msg = None, is_json=False): - logging.info(f"Update {TASKS_URL}/{task_id} -> status: {status}") + logging.info(f"Update {TASKS_URL}/{task_id} -> status: {status}") if is_json: data = {"status": status, "msg": msg} req = requests.put(f"{TASKS_URL}/{task_id}", - json=data, headers={AUTH_HEADER_NAME: auth_header}) + json=data, headers={AUTH_HEADER_NAME: auth_header}, verify=(SSL_CRT if USE_SSL else False)) else: data = {"status": status, "msg": msg} req = requests.put(f"{TASKS_URL}/{task_id}", - data=data, headers={AUTH_HEADER_NAME: auth_header}) + data=data, headers={AUTH_HEADER_NAME: auth_header}, verify=(SSL_CRT if USE_SSL else False)) resp = json.loads(req.content) @@ -477,7 +488,7 @@ def expire_task(task_id,auth_header,service): req = requests.post(f"{TASKS_URL}/expire/{task_id}", - headers={AUTH_HEADER_NAME: auth_header, "X-Firecrest-Service": service}) + headers={AUTH_HEADER_NAME: auth_header, "X-Firecrest-Service": service}, verify=(SSL_CRT if USE_SSL else False)) # resp = json.loads(req.content) @@ -487,8 +498,8 @@ def expire_task(task_id,auth_header,service): return True - - + + # function to check task status: def get_task_status(task_id,auth_header): @@ -497,7 +508,7 @@ def get_task_status(task_id,auth_header): try: retval = requests.get(f"{TASKS_URL}/{task_id}", - headers={AUTH_HEADER_NAME: auth_header}) + headers={AUTH_HEADER_NAME: auth_header}, verify=(SSL_CRT if USE_SSL else False)) if retval.status_code != 200: return -1 @@ -532,30 +543,30 @@ def is_valid_file(path, auth_header, system_name, system_addr): if retval["error"] == 113: return {"result":False, "headers":{"X-Machine-Not-Available":"Machine is not available"} } - + if retval["error"] == 124: return {"result":False, "headers":{"X-Timeout": "Command has finished with timeout signal"}} - + # error no such file if in_str(error_str,"No such file"): return {"result":False, "headers":{"X-Invalid-Path": "{path} is an invalid path.".format(path=path)}} - + # permission denied if in_str(error_str,"Permission denied") or in_str(error_str,"OPENSSH"): return {"result":False, "headers":{"X-Permission-Denied": "User does not have permissions to access machine or path"}} if in_str(error_str, "directory"): - return {"result":False, "headers":{"X-A-Directory": "{path} is a directory".format(path=path)}} + return {"result":False, "headers":{"X-A-Directory": "{path} is a directory".format(path=path)}} return {"result":False, "headers":{"X-Error": retval["msg"]}} return {"result":True} - + # checks if {path} is a valid directory # 'path' should exists and be accesible to the user (write permissions) # @@ -565,9 +576,9 @@ def is_valid_dir(path, auth_header, system_name, system_addr): # test file is a hidden file and has a timestamp in order to not overwrite other files created by user # after this, file should be deleted - + timestamp = datetime.datetime.today().strftime("%Y-%m-%dT%H:%M:%S.%f") - # using a hash + # using a hash hashedTS = hashlib.md5() hashedTS.update(timestamp.encode("utf-8")) @@ -600,7 +611,7 @@ def is_valid_dir(path, auth_header, system_name, system_addr): if in_str(error_str,"Not a directory"): return {"result":False, "headers":{"X-Not-A-Directory": "{path} is not a directory".format(path=path)}} - return {"result":False, "headers":{"X-Error": retval["msg"]}} + return {"result":False, "headers":{"X-Error": retval["msg"]}} # delete test file created action = f"rm -- {path}/{tempFileName}" @@ -634,14 +645,14 @@ def wrapper_check_auth_header(*args, **kwargs): # check user authorization on endpoint # using Open Policy Agent -# +# # use: # check_user_auth(username,system) def check_user_auth(username,system): # check if OPA is active if OPA_USE: - try: + try: input = {"input":{"user": f"{username}", "system": f"{system}"}} #resp_opa = requests.post(f"{OPA_URL}/{POLICY_PATH}", json=input) logging.info(f"{OPA_URL}/{POLICY_PATH}") @@ -655,11 +666,11 @@ def check_user_auth(username,system): return {"allow": True, "description":f"User {username} authorized", "status_code": 200 } else: logging.error(f"User {username} NOT authorized by OPA") - return {"allow": False, "description":f"User {username} not authorized in {system}", "status_code": 401} + return {"allow": False, "description":f"User {username} not authorized in {system}", "status_code": 401} except requests.exceptions.RequestException as e: logging.error(e.args) - return {"allow": False, "description":"Authorization server error", "status_code": 404} - + return {"allow": False, "description":"Authorization server error", "status_code": 404} + return {"allow": True, "description":"Authorization method not active", "status_code": 200 } @@ -681,6 +692,10 @@ def check_command_error(error_str, error_code, service_msg): header = {"X-Timeout": "Command has finished with timeout signal"} return {"description": service_msg, "status_code": 400, "header": header} + if error_code == 118: + header = {"X-Error": "Command execution is not allowed in machine"} + return {"description": service_msg, "status_code": 400, "header": header} + # When certificate doesn't match SSH configuration if in_str(error_str,"OPENSSH"): header = {"X-Permission-Denied": "User does not have permissions to access machine"} @@ -690,10 +705,6 @@ def check_command_error(error_str, error_code, service_msg): header={"X-Invalid-Path":"path is an invalid path"} return {"description": service_msg, "status_code": 400, "header": header} - if in_str(error_str,"cannot open"): - header = {"X-Permission-Denied": "User does not have permissions to access path"} - return {"description":service_msg, "status_code": 400, "header": header} - if in_str(error_str,"No such file"): if in_str(error_str,"cannot stat"): header={"X-Not-Found":"sourcePath not found"} @@ -711,6 +722,10 @@ def check_command_error(error_str, error_code, service_msg): header={"X-Invalid-Path":"path is an invalid path"} return {"description": service_msg, "status_code": 400, "header": header} + if in_str(error_str,"cannot open"): + header = {"X-Permission-Denied": "User does not have permissions to access path"} + return {"description":service_msg, "status_code": 400, "header": header} + if in_str(error_str,"Permission denied"): header = {"X-Permission-Denied": "User does not have permissions to access path"} return {"description": service_msg, "status_code": 400, "header": header} @@ -718,7 +733,7 @@ def check_command_error(error_str, error_code, service_msg): if in_str(error_str,"directory"): header = {"X-A-Directory": "path is a directory, can't checksum directories"} return {"description": service_msg, "status_code": 400, "header": header} - + # if already exists, not overwrite (-i) if in_str(error_str,"overwrite"): header = {"X-Exists": "targetPath already exists"} @@ -735,7 +750,7 @@ def check_command_error(error_str, error_code, service_msg): if in_str(error_str,"invalid user"): header = {"X-Invalid-Owner": "owner is an invalid user"} return {"description": service_msg, "status_code": 400, "header": header} - + if in_str(error_str, "invalid mode"): header = {"X-Invalid-Mode": "mode is an invalid mode"} return {"description": service_msg, "status_code": 400, "header": header} diff --git a/src/common/job_time.py b/src/common/job_time.py index 53f63bae..1fe713d2 100644 --- a/src/common/job_time.py +++ b/src/common/job_time.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/src/common/tasks_persistence.py b/src/common/tasks_persistence.py index aaf6884f..06fa3fcb 100644 --- a/src/common/tasks_persistence.py +++ b/src/common/tasks_persistence.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/src/compute/compute.py b/src/compute/compute.py index 9c818568..2c069ff2 100644 --- a/src/compute/compute.py +++ b/src/compute/compute.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -32,11 +32,15 @@ CERTIFICATOR_URL= os.environ.get("F7T_CERTIFICATOR_URL") TASKS_URL = os.environ.get("F7T_TASKS_URL") -STATUS_IP = os.environ.get("F7T_STATUS_IP") KONG_URL = os.environ.get("F7T_KONG_URL") COMPUTE_PORT = os.environ.get("F7T_COMPUTE_PORT", 5000) +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") + # SYSTEMS_PUBLIC: list of allowed systems # remove quotes and split into array @@ -74,7 +78,7 @@ def is_jobid(jobid): return True app.logger.error("Wrong SLURM sbatch return string") app.logger.error(f"{jobid} isn't > 0") - + except ValueError as e: app.logger.error("Wrong SLURM sbatch return string") app.logger.error("Couldn't convert to int") @@ -100,7 +104,7 @@ def extract_jobid(outline): if not is_jobid(list_line[-1]): # for compatibility reasons if error, returns original string return outline - + # all clear, conversion is OK jobid = int(list_line[-1]) @@ -182,7 +186,7 @@ def submit_job_task(auth_header, system_name, system_addr, job_file, job_dir, ta jobid = extract_jobid(outlines) - msg = {"result" : "Job submitted", "jobid" : jobid, "jobfile" : f"{job_dir}/{job_file['filename']}"} + msg = {"result" : "Job submitted", "jobid" : jobid} # now look for log and err files location job_extra_info = get_slurm_files(auth_header, system_name, system_addr, task_id, msg) @@ -198,7 +202,7 @@ def submit_job_task(auth_header, system_name, system_addr, job_file, job_dir, ta app.logger.error(e) update_task(task_id, auth_header, async_task.ERROR, e.message) - + #app.logger.info(result) return @@ -273,7 +277,7 @@ def get_slurm_files(auth_header, system_name, system_addr, task_id,job_info,outp return control_info def submit_job_path_task(auth_header,system_name, system_addr,fileName,job_dir, task_id): - + try: # get scopes from token decoded = jwt.decode(auth_header[7:], verify=False) @@ -296,12 +300,12 @@ def submit_job_path_task(auth_header,system_name, system_addr,fileName,job_dir, app.logger.info("scope parameters: " + scopes_parameters) - + except Exception as e: app.logger.error(type(e)) - + app.logger.error(e.args) - + action=f"sbatch --chdir={job_dir} {scopes_parameters} -- {fileName}" @@ -323,18 +327,18 @@ def submit_job_path_task(auth_header,system_name, system_addr,fileName,job_dir, return err_msg = resp["msg"] update_task(task_id, auth_header, async_task.ERROR, err_msg) - + jobid = extract_jobid(resp["msg"]) msg = {"result":"Job submitted", "jobid":jobid} - + # now looking for log and err files location job_extra_info = get_slurm_files(auth_header, system_name, system_addr, task_id,msg) update_task(task_id, auth_header,async_task.SUCCESS, job_extra_info,True) - + ## error handler for files above SIZE_LIMIT -> app.config['MAX_CONTENT_LENGTH'] @app.errorhandler(413) @@ -348,9 +352,9 @@ def request_entity_too_large(error): @app.route("/jobs/upload",methods=["POST"]) @check_auth_header def submit_job_upload(): - + auth_header = request.headers[AUTH_HEADER_NAME] - + try: system_name = request.headers["X-Machine-Name"] except KeyError as e: @@ -429,7 +433,7 @@ def submit_job_upload(): args=(auth_header, system_name, system_addr, job_file, job_dir, task_id)) aTask.start() - retval = update_task(task_id, auth_header,async_task.QUEUED, TASKS_URL) + retval = update_task(task_id, auth_header,async_task.QUEUED) task_url = f"{KONG_URL}/tasks/{task_id}" data = jsonify(success="Task created", task_id=task_id, task_url=task_url) @@ -476,8 +480,12 @@ def submit_job_path(): header = {"X-Permission-Denied": "User does not have permissions to access machine or path"} return jsonify(description="Failed to submit job"), 404, header - targetPath = request.form["targetPath"] - + try: + targetPath = request.form["targetPath"] + except KeyError as e: + data = jsonify(description="Failed to submit job", error="'targetPath' parameter not set in request") + return data, 400 + if targetPath == None: data = jsonify(description="Failed to submit job", error="'targetPath' parameter not set in request") return data, 400 @@ -486,7 +494,7 @@ def submit_job_path(): data = jsonify(description="Failed to submit job", error="'targetPath' parameter value is empty") return data, 400 - + # checks if targetPath is a valid path for this user in this machine check = is_valid_file(targetPath, auth_header, system_name, system_addr) @@ -498,7 +506,7 @@ def submit_job_path(): # if error in creating task: if task_id == -1: return jsonify(description="Failed to submit job",error='Error creating task'), 400 - + # if targetPath = "/home/testuser/test/sbatch.sh/" # split by / and discard last element (the file name): ['', 'home', 'testuser', 'test'] job_dir_splitted = targetPath.split("/")[:-1] @@ -508,7 +516,7 @@ def submit_job_path(): job_dir_splitted = job_dir_splitted[:-1] job_dir = "/".join(job_dir_splitted) - + try: # asynchronous task creation @@ -598,7 +606,7 @@ def list_jobs(): for jobid in job_aux_list: if not is_jobid(jobid): - return jsonify(error=f"{jobid} is not a valid job ID", description="Failed to retrieve job information"), 400 + return jsonify(error=f"{jobid} is not a valid job ID", description="Failed to retrieve job information"), 400 job_list="--job={jobs}".format(jobs=jobs) except: @@ -615,7 +623,7 @@ def list_jobs(): # if error in creating task: if task_id == -1: return jsonify(description="Failed to retrieve job information",error='Error creating task'), 400 - + update_task(task_id, auth_header, async_task.QUEUED) # asynchronous task creation @@ -705,14 +713,14 @@ def list_job_task(auth_header,system_name, system_addr,action,task_id,pageSize,p data = jobs update_task(task_id, auth_header, async_task.SUCCESS, data, True) - + # Retrieves information from a jobid @app.route("/jobs/",methods=["GET"]) @check_auth_header def list_job(jobid): - + auth_header = request.headers[AUTH_HEADER_NAME] try: @@ -763,7 +771,7 @@ def list_job(jobid): # if error in creating task: if task_id == -1: return jsonify(description="Failed to retrieve job information",error='Error creating task'), 400 - + update_task(task_id, auth_header, async_task.QUEUED) # asynchronous task creation @@ -814,7 +822,10 @@ def cancel_job_task(auth_header,system_name, system_addr,action,task_id): # if "error" word appears: if in_str(data,"error"): - update_task(task_id, auth_header, async_task.ERROR, data) + # error message: "scancel: error: Kill job error on job id 5: Invalid job id specified" + # desired output: "Kill job error on job id 5: Invalid job id specified" + err_msg = data[(data.index("error")+7):] + update_task(task_id, auth_header, async_task.ERROR, err_msg) return # otherwise @@ -827,7 +838,7 @@ def cancel_job_task(auth_header,system_name, system_addr,action,task_id): def cancel_job(jobid): auth_header = request.headers[AUTH_HEADER_NAME] - + try: system_name = request.headers["X-Machine-Name"] except KeyError as e: @@ -869,7 +880,7 @@ def cancel_job(jobid): # if error in creating task: if task_id == -1: return jsonify(description="Failed to delete job",error='Error creating task'), 400 - + # asynchronous task creation aTask = threading.Thread(target=cancel_job_task, args=(auth_header, system_name, system_addr, action, task_id)) @@ -1019,8 +1030,8 @@ def acct(): # if error in creating task: if task_id == -1: return jsonify(description="Failed to retrieve account information",error='Error creating task'), 400 - - + + update_task(task_id, auth_header, async_task.QUEUED) # asynchronous task creation @@ -1037,17 +1048,10 @@ def acct(): data = jsonify(description="Failed to retrieve account information",error=e) return data, 400 -# get status for status microservice -# only used by STATUS_IP otherwise forbidden @app.route("/status",methods=["GET"]) def status(): - app.logger.info("Test status of service") - - if request.remote_addr != STATUS_IP: - app.logger.warning("Invalid remote address: {addr}".format(addr=request.remote_addr)) - return jsonify(error="Invalid access"), 403 - + # TODO: check compute reservation binary to truthfully respond this request return jsonify(success="ack"), 200 @@ -1069,4 +1073,7 @@ def status(): logger.addHandler(logHandler) # set debug = False, so output goes to log files - app.run(debug=debug, host='0.0.0.0', port=COMPUTE_PORT) + if USE_SSL: + app.run(debug=debug, host='0.0.0.0', port=COMPUTE_PORT, ssl_context=(SSL_CRT, SSL_KEY)) + else: + app.run(debug=debug, host='0.0.0.0', port=COMPUTE_PORT) diff --git a/src/reservations/reservations.py b/src/reservations/reservations.py new file mode 100644 index 00000000..fdd7a871 --- /dev/null +++ b/src/reservations/reservations.py @@ -0,0 +1,544 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# +from flask import Flask, request, jsonify + +from werkzeug.exceptions import BadRequestKeyError, InternalServerError, MethodNotAllowed + +# task states +import os +import logging +from logging.handlers import TimedRotatingFileHandler +from cscs_api_common import check_auth_header, exec_remote_command, in_str + +import re +import datetime + + +AUTH_HEADER_NAME = 'Authorization' + +RESERVATIONS_PORT = os.environ.get("F7T_RESERVATIONS_PORT", 5050) + +# SYSTEMS: list of ; separated systems allowed +SYSTEMS_PUBLIC = os.environ.get("F7T_SYSTEMS_PUBLIC").strip('\'"').split(";") +# internal machines for file operations +SYS_INTERNALS = os.environ.get("F7T_SYSTEMS_INTERNAL_COMPUTE").strip('\'"').split(";") + +# time out for rsvmgmt command +TIMEOUT = os.environ.get("F7T_UTILITIES_TIMEOUT", 5) + +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") + +RESERVATION_CMD = os.environ.get("F7T_RESERVATION_CMD", "rsvmgmt") + +debug = os.environ.get("F7T_DEBUG_MODE", None) + + +app = Flask(__name__) + +# checks if reservation/account name are valid +# accepts identifier names format and includes dash and underscore names. +def check_name(name): + regex="^[a-z_$][-a-z_$0-9]*$" + + # can start with alphabetics letters in caps or not, or underscore, + # can have (after first char) numbers. + + match = re.compile(regex).match(name) + + return bool(match) + + +# parse a string to a int, in order to check if it's valid +# positive number > 0 (also, it should be <= 1000, for number of nodes, but not checking) +def check_number(number): + try: + n=int(number) + if n<1: + return False + except ValueError: + return False + return True + + +# checks if reservation times (endtime/starttime) are correct: +def check_dateTime(dateTime): + try: + # the one format that is valid is YYYY-MM-DDTHH:MM:SS + datetime.datetime.strptime(dateTime, "%Y-%m-%dT%H:%M:%S") + return True + except ValueError: + return False + + +# compare dates to see if d1 0 (value entered:'{numberOfNodes}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error creating reservation", description="'numberOfNodes' form data input missing"), 400 + + # getting nodeType from request form + try: + nodeType = request.form["nodeType"] + if not check_name(nodeType): + return jsonify(error="Error creating reservation", description=f"'nodeType' parameter format is not valid (value entered:'{nodeType}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error creating reservation", description="'nodeType' form data input missing"), 400 + + # getting starttime from request form + try: + starttime = request.form["starttime"] + if not check_dateTime(starttime): + return jsonify(error="Error creating reservation", description=f"'starttime' parameter format is not valid. It should be YYYY-MM-DDTHH:MM:SS (value entered:'{starttime}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error creating reservation", description="'starttime' form data input missing"), 400 + + # getting endtime from request form + try: + endtime = request.form["endtime"] + if not check_dateTime(endtime): + return jsonify(error="Error creating reservation", description=f"'endtime' parameter format is not valid. It should be YYYY-MM-DDTHH:MM:SS (value entered:'{endtime}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error creating reservation", description="'endtime' form data input missing"), 400 + + if not check_dateDiff(starttime,endtime): + return jsonify(error="Error creating reservation", description=f"'endtime' occurs before 'starttime' (values entered: endtime='{endtime}' <= starttime='{starttime}')"), 400 + + if not check_actualDate(starttime): + return jsonify(error="Error creating reservation", description=f"'starttime' is in the pass (values entered: starttime='{starttime}')"), 400 + + # create a reservation + # rsvmgmt -a unixGroupName numberOfNodes NodeType startDateTime endDateTime [optional reservationName] + action = f"timeout {TIMEOUT} {RESERVATION_CMD} -a {account} {numberOfNodes} {nodeType} {starttime} {endtime} {reservation}" + + #execute command + retval = exec_remote_command(auth_header, system_name, system_addr, action) + + error_str = retval["msg"] + + if retval["error"] != 0: + if retval["error"] == -2: + header = {"X-Machine-Not-Available": "Machine is not available"} + return jsonify(error="Error creating reservation"), 400, header + + if retval["error"] == 124: + header = {"X-Timeout": "Command has finished with timeout signal"} + return jsonify(error="Error creating reservation"), 400, header + + #in case of permission for other user + if in_str(error_str,"Permission") or in_str(error_str,"SystemAdministrator"): + header = {"X-Permission-Denied": "User does not have permissions to access machine or path"} + return jsonify(error="Error creating reservation"), 404, header + + # otherwise, generic error + error_str = cleanup_rsvmgmt_error(error_str) + + return jsonify(error="Error creating reservation", description=error_str), 400 + + output = retval["msg"] + # Reservation created: {reservation} + + data = jsonify(success=output) + return data, 201 + + + + + +# update status of the task with task_id = id +@app.route("/",methods=["PUT"]) +@check_auth_header +def put(reservation): + + auth_header = request.headers[AUTH_HEADER_NAME] + + # checks if machine name is set + try: + system_name = request.headers["X-Machine-Name"] + except KeyError as e: + app.logger.error("No machinename given") + return jsonify(error="Error updating reservation", description="No machine name given"), 400 + + # PUBLIC endpoints from Kong to users + if system_name not in SYSTEMS_PUBLIC: + header = {"X-Machine-Does-Not-Exist": "Machine does not exist"} + return jsonify(error="Error updating reservation"), 400, header + + # select index in the list corresponding with machine name + system_idx = SYSTEMS_PUBLIC.index(system_name) + system_addr = SYS_INTERNALS[system_idx] + + # checking input data + if not check_name(reservation): + return jsonify(error="Error updating reservation", description=f"'reservation' parameter format is not valid (value entered:'{reservation}')"), 400 + + # getting numberOfNodes from request form + try: + numberOfNodes = request.form["numberOfNodes"] + if not check_number(numberOfNodes): + return jsonify(error="Error updating reservation", description=f"'numberOfNodes' parameter is not valid. It should be an integer > 0 (value entered:'{numberOfNodes}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error updating reservation", description="'numberOfNodes' form data input missing"), 400 + + # getting nodeType from request form + try: + nodeType = request.form["nodeType"] + if not check_name(nodeType): + return jsonify(error="Error updating reservation", description=f"'nodeType' parameter format is not valid (value entered:'{nodeType}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error updating reservation", description="'nodeType' form data input missing"), 400 + + # getting starttime from request form + try: + starttime = request.form["starttime"] + if not check_dateTime(starttime): + return jsonify(error="Error updating reservation", description=f"'starttime' parameter format is not valid. It should be YYYY-MM-DDTHH:MM:SS (value entered:'{starttime}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error updating reservation", description="'starttime' form data input missing"), 400 + + # getting endtime from request form + try: + endtime = request.form["endtime"] + if not check_dateTime(endtime): + return jsonify(error="Error updating reservation", description=f"'endtime' parameter format is not valid. It should be YYYY-MM-DDTHH:MM:SS (value entered:'{endtime}')"), 400 + except BadRequestKeyError: + return jsonify(error="Error updating reservation", description="'endtime' form data input missing"), 400 + + if not check_dateDiff(starttime,endtime): + return jsonify(error="Error updating reservation", description=f"'endtime' occurs before 'starttime' (values entered: endtime='{endtime}' <= starttime='{starttime}')"), 400 + + if not check_actualDate(starttime): + return jsonify(error="Error creating reservation", description=f"'starttime' is in the pass (values entered: starttime='{starttime}')"), 400 + + # Update a reservation + # rsvmgmt -u reservationName numberOfNodes NodeType StartDateTime EndDateTime + action = f"timeout {TIMEOUT} {RESERVATION_CMD} -u {reservation} {numberOfNodes} {nodeType} {starttime} {endtime}" + + #execute command + retval = exec_remote_command(auth_header, system_name, system_addr, action) + error_str = retval["msg"] + + if retval["error"] != 0: + if retval["error"] == -2: + header = {"X-Machine-Not-Available": "Machine is not available"} + return jsonify(error="Error updating reservation"), 400, header + + if retval["error"] == 124: + header = {"X-Timeout": "Command has finished with timeout signal"} + return jsonify(error="Error updating reservation"), 400, header + + #in case of permission for other user + if in_str(error_str,"Permission") or in_str(error_str,"SystemAdministrator"): + header = {"X-Permission-Denied": "User does not have permissions to access machine or path"} + return jsonify(error="Error updating reservation"), 404, header + + # otherwise, generic error + # First cleanup "timeout:" error string. + # Then if it comes from rsvmgmt this is the format + # rsvmgmt: Error: You are not a member of the $1 project" + # let's extract "rsvmgmt: Error: " string so it reports "You are not a member of the $1 project" + + error_str = error_str.lstrip("timeout:") + error_str = error_str.lstrip("rsvmgmt:") + error_str = error_str.lstrip("Error: ") + + return jsonify(error="Error updating reservation", description=error_str), 400 + + output = retval["msg"] + # Reservation updated + + data = jsonify(success=output) + return data, 200 + + +def cleanup_rsvmgmt_error(error_msg): + """ + Helper to cleanup errors from rsvmgmt output + """ + # in lack of builtin remove_prefix (python >= 3.9) + if error_msg.startswith("timeout:"): + error_msg = error_msg[len("timeout:"):].strip() + if error_msg.startswith("rsvmgmt:"): + error_msg = error_msg[len("rsvmgmt:"):].strip() + if error_msg.startswith("Error:"): + error_msg = error_msg[len("Error:"):].strip() + return error_msg + + +@app.route("/",methods=["DELETE"]) +@check_auth_header +def delete(reservation): + + auth_header = request.headers[AUTH_HEADER_NAME] + + # checks if machine name is set + try: + system_name = request.headers["X-Machine-Name"] + except KeyError as e: + app.logger.error("No machinename given") + return jsonify(error="Error deleting reservation", description="No machine name given"), 400 + + # PUBLIC endpoints from Kong to users + if system_name not in SYSTEMS_PUBLIC: + header = {"X-Machine-Does-Not-Exist": "Machine does not exist"} + return jsonify(error="Error deleting reservation"), 400, header + + # select index in the list corresponding with machine name + system_idx = SYSTEMS_PUBLIC.index(system_name) + system_addr = SYS_INTERNALS[system_idx] + + # checking input data + if not check_name(reservation): + return jsonify(error="Error deleting reservation", description=f"'reservation' parameter format is not valid (value entered:'{reservation}')"), 400 + + # Update a reservation + # rsvmgmt -d reservationName + action = f"timeout {TIMEOUT} {RESERVATION_CMD} -d {reservation}" + + #execute command + retval = exec_remote_command(auth_header, system_name, system_addr, action) + + error_str = retval["msg"] + + if retval["error"] != 0: + if retval["error"] == -2: + header = {"X-Machine-Not-Available": "Machine is not available"} + return jsonify(error="Error deleting reservation"), 400, header + + if retval["error"] == 124: + header = {"X-Timeout": "Command has finished with timeout signal"} + return jsonify(error="Error deleting reservation"), 400, header + + #in case of permission for other user + if in_str(error_str,"Permission") or in_str(error_str,"SystemAdministrator"): + header = {"X-Permission-Denied": "User does not have permissions to access machine or path"} + return jsonify(error="Error deleting reservation"), 404, header + + # otherwise, generic error + error_str = cleanup_rsvmgmt_error(error_str) + + return jsonify(error="Error deleting reservation", description=error_str), 400 + + output = retval["msg"] + # "rsvmgmt: Reservation csstaff_32 removed", removing "rsvmgmt: " + + output = output.lstrip("rsvmgmt: ") + + data = jsonify(success=output) + return data, 204 + +@app.route("/status",methods=["GET"]) +def status(): + app.logger.info("Test status of service") + # TODO: check backend reservation binary to truthfully respond this request + return jsonify(success="ack"), 200 + +@app.errorhandler(MethodNotAllowed) +def page_not_found(e): + return jsonify (error='Method not allowed', description=e.description), 405 + +@app.errorhandler(InternalServerError) +def internal_error(e): + app.logger.error(e.description) + app.logger.error(e.original_exception) + return jsonify(error='FirecREST Internal error', description=e.description), 500 + + +if __name__ == "__main__": + # log handler definition + # timed rotation: 1 (interval) rotation per day (when="D") + logHandler = TimedRotatingFileHandler('/var/log/reservations.log', when='D', interval=1) + + logFormatter = logging.Formatter('%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', + '%Y-%m-%d:%H:%M:%S') + logHandler.setFormatter(logFormatter) + logHandler.setLevel(logging.DEBUG) + + # get app log (Flask+werkzeug+python) + logger = logging.getLogger() + + # set handler to logger + logger.addHandler(logHandler) + + # set to debug = False, so stderr and stdout go to log file + + # run app + if USE_SSL: + app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=RESERVATIONS_PORT, ssl_context=(SSL_CRT, SSL_KEY)) + else: + app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=RESERVATIONS_PORT) diff --git a/src/status/status.py b/src/status/status.py index e02a4929..0feea74e 100644 --- a/src/status/status.py +++ b/src/status/status.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -33,6 +33,12 @@ SERVICES_DICT = {} +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") + + ### parameters UTILITIES_MAX_FILE_SIZE = os.environ.get("F7T_UTILITIES_MAX_FILE_SIZE") UTILITIES_TIMEOUT = os.environ.get("F7T_UTILITIES_TIMEOUT") @@ -63,7 +69,7 @@ def test_service(servicename, status_list): try: serviceurl = SERVICES_DICT[servicename] #timeout set to 5 seconds - req = requests.get("{url}/status".format(url=serviceurl), timeout=5) + req = requests.get("{url}/status".format(url=serviceurl), timeout=5, verify= (SSL_CRT if USE_SSL else False)) app.logger.info("Return code: {status_code}".format(status_code=req.status_code)) @@ -379,4 +385,7 @@ def parameters(): logger.addHandler(logHandler) # run app - app.run(debug=debug, host='0.0.0.0', port=STATUS_PORT) + if USE_SSL: + app.run(debug=debug, host='0.0.0.0', port=STATUS_PORT, ssl_context=(SSL_CRT, SSL_KEY)) + else: + app.run(debug=debug, host='0.0.0.0', port=STATUS_PORT) diff --git a/src/storage/keystone.py b/src/storage/keystone.py index 0f93f19f..b746609f 100644 --- a/src/storage/keystone.py +++ b/src/storage/keystone.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/src/storage/objectstorage.py b/src/storage/objectstorage.py index 3e7dd369..ea329d6d 100644 --- a/src/storage/objectstorage.py +++ b/src/storage/objectstorage.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/src/storage/s3v2OS.py b/src/storage/s3v2OS.py index a5ea9af9..3db9d5d2 100644 --- a/src/storage/s3v2OS.py +++ b/src/storage/s3v2OS.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -307,35 +307,32 @@ def create_upload_form(self, sourcepath, containername, prefix, ttl, max_file_si # signature will be Bytes type in Pytho3, so it needs to be decoded to str again sig = sig.decode('latin-1') - - # sig = base64.b64encode(hmac.new(self.passwd, string_to_sign, hashlib.sha1).digest()) - - # print("(result sig: {})".format(sig)) - - # msg = "curl -i -X POST {url}/{containername}/{prefix} -F AWSAccessKeyId={awsAccessKeyId} -F Signature={signature} " \ - # "-F Expires={expires} -F file=@{file}".format( - # url=self.url, containername=containername, prefix=prefix, - # awsAccessKeyId=self.user, signature=sig, expires=expires,file=sourcepath) - + url = "{url}/{containername}/{prefix}/{objectname}".format( url=self.url, containername=containername, prefix=prefix, objectname=objectname) - data = {} - # - data["url"] = url - data["method"] = httpVerb - data["AWSAccessKeyId"] = self.user - data["Signature"] = sig - data["Expires"] = expires - data["sourcepath"] = sourcepath - - command = "curl -i -X {httpVerb} '{url}?AWSAccessKeyId={AWSAccessKeyId}&Signature={Signature}&Expires={Expires}' -T {sourcepath}".format( - httpVerb=httpVerb, sourcepath=sourcepath, url=url, AWSAccessKeyId=data["AWSAccessKeyId"], - Signature=urllib.parse.quote(data["Signature"]), Expires=data["Expires"]) - - data["command"] = command + retval = {} + + retval["parameters"] = { + + "url": url, + "method": httpVerb, + "params": { + "AWSAccessKeyId": self.user, + "Signature": sig, + "Expires": expires + }, + "files": sourcepath, + "json": {}, + "data": {}, + "headers": {} + } + + command = f"curl -i -X {httpVerb} '{url}?AWSAccessKeyId={self.user}&Signature={sig}&Expires={expires}' -T {sourcepath}" + + retval["command"] = command - return data + return retval def create_temp_url(self, containername, prefix, objectname, ttl): diff --git a/src/storage/s3v4OS.py b/src/storage/s3v4OS.py index 107e6a05..213f07c3 100644 --- a/src/storage/s3v4OS.py +++ b/src/storage/s3v4OS.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -380,28 +380,38 @@ def create_upload_form(self, sourcepath, containername, prefix, ttl, max_file_si signature = hmac.new(signing_key, base64Policy.encode('utf-8'), hashlib.sha256).hexdigest() - fields = { - "key": prefix + "/" + objectname, - "x-amz-algorithm": algorithm, - "x-amz-credential": credentials, - "x-amz-date": amzdate, - "policy": base64Policy, - "x-amz-signature" : signature + + retval = {} + + retval["parameters"] = { + + "method": httpVerb, + "url": f"{endpoint_url}/{containername}", + "data": { + "key": prefix + "/" + objectname, + "x-amz-algorithm": algorithm, + "x-amz-credential": credentials, + "x-amz-date": amzdate, + "policy": base64Policy, + "x-amz-signature" : signature + }, + "files": sourcepath, + "json" : {}, + "params": {}, + "headers": {} } command = f"curl -i -X {httpVerb} {endpoint_url}/{containername}" - for k,v in fields.items(): + for k,v in retval["parameters"]["data"].items(): command += f" -F '{k}={v}'" - command+=f" -F file=@{sourcepath}" - - fields["command"] = command - fields["method"] = httpVerb - fields["url"] = f"{endpoint_url}/{containername}" + command+=f" -F file=@{retval['parameters']['files']}" - return fields + retval["command"] = command + return retval + def create_temp_url(self, containername, prefix, objectname, ttl): diff --git a/src/storage/storage.py b/src/storage/storage.py index c74b87d3..158d72fd 100644 --- a/src/storage/storage.py +++ b/src/storage/storage.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -21,7 +21,7 @@ from cscs_api_common import exec_remote_command from cscs_api_common import create_certificate from cscs_api_common import in_str -from cscs_api_common import is_valid_file, is_valid_dir +from cscs_api_common import is_valid_file, is_valid_dir, check_command_error # job_time_checker for correct SLURM job time in /xfer-internal tasks import job_time @@ -41,7 +41,6 @@ CERTIFICATOR_URL = os.environ.get("F7T_CERTIFICATOR_URL") TASKS_URL = os.environ.get("F7T_TASKS_URL") COMPUTE_URL = os.environ.get("F7T_COMPUTE_URL") -STATUS_IP = os.environ.get("F7T_STATUS_IP") KONG_URL = os.environ.get("F7T_KONG_URL") STORAGE_PORT = os.environ.get("F7T_STORAGE_PORT", 5000) @@ -66,6 +65,9 @@ # Scheduller partition used for internal transfers XFER_PARTITION = os.environ.get("F7T_XFER_PARTITION", "").strip('\'"') +# --account parameter needed in sbatch? +USE_SLURM_ACCOUNT = os.environ.get("F7T_USE_SLURM_ACCOUNT", False) + # Machine used for external transfers EXT_TRANSFER_MACHINE_PUBLIC=os.environ.get("F7T_EXT_TRANSFER_MACHINE_PUBLIC", "").strip('\'"') @@ -88,9 +90,17 @@ # for use on signature of URL it must be in bytes (MB*1024*1024 = Bytes) STORAGE_MAX_FILE_SIZE *= 1024*1024 +UTILITIES_TIMEOUT = int(os.environ.get("F7T_UTILITIES_TIMEOUT", "5").strip('\'"')) + STORAGE_POLLING_INTERVAL = int(os.environ.get("F7T_STORAGE_POLLING_INTERVAL", "60").strip('\'"')) CERT_CIPHER_KEY = os.environ.get("F7T_CERT_CIPHER_KEY", "").strip('\'"').encode('utf-8') +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") +# verify signed SSL certificates +SSL_SIGNED = os.environ.get("F7T_SSL_SIGNED", False) # aynchronous tasks: upload & download --> http://TASKS_URL # {task_id : AsyncTask} @@ -148,7 +158,7 @@ def os_to_fs(task_id): try: app.logger.info(upl_file["msg"]) action = upl_file["msg"]["action"] - + # certificate is encrypted with CERT_CIPHER_KEY key # here is decrypted cert = upl_file["msg"]["cert"] @@ -186,9 +196,9 @@ def os_to_fs(task_id): # if no error, then download is complete if result["error"] == 0: - + update_task(task_id, None, async_task.ST_DWN_END) - + # No need to delete the dictionary, it will be cleaned on next iteration # delete upload request @@ -197,7 +207,7 @@ def os_to_fs(task_id): # must be deleted after object is moved to storage # staging.delete_object(containername=username,prefix=task_id,objectname=objectname) # for big files delete_object consumes a long time and often gives a TimeOut error between system and staging area - # Therefore, using delete_object_after a few minutes (in this case 5 minutes) will trigger internal staging area + # Therefore, using delete_object_after a few minutes (in this case 5 minutes) will trigger internal staging area # mechanism to delete the file automatically and without a need of a connection staging.delete_object_after(containername=username,prefix=task_id,objectname=objectname, ttl = time.time()+600) @@ -221,17 +231,17 @@ def check_upload_files(): global staging while True: - + # Get updated task status from Tasks microservice DB backend (TaskPersistence) get_upload_unfinished_tasks() # Timestampo for logs timestamp = time.asctime( time.localtime(time.time()) ) - + app.logger.info(f"Check files in Object Storage {timestamp}") app.logger.info(f"Pendings uploads: {len(uploaded_files)}") - + # create STATIC auxiliary upload list in order to avoid "RuntimeError: dictionary changed size during iteration" # (this occurs since upload_files dictionary is shared between threads and since Python3 dict.items() trigger that error) upl_list= [(task_id, upload) for task_id,upload in uploaded_files.items()] @@ -239,11 +249,11 @@ def check_upload_files(): for task_id,upload in upl_list: #checks if file is ready or not for download to FileSystem try: - + task_status = async_task.status_codes[upload['status']] - + app.logger.info(f"Status of {task_id}: {task_status}") - + #if upload["status"] in [async_task.ST_URL_REC,async_task.ST_DWN_ERR] : if upload["status"] == async_task.ST_URL_REC: app.logger.info(f"Task {task_id} -> File ready to upload or already downloaded") @@ -254,7 +264,7 @@ def check_upload_files(): containername = upl["user"] prefix = task_id objectname = upl["source"] - + if not staging.is_object_created(containername,prefix,objectname): app.logger.info(f"{containername}/{prefix}/{objectname} isn't created in staging area, continue polling") continue @@ -285,10 +295,10 @@ def check_upload_files(): os_to_fs_task = threading.Thread(target=os_to_fs,args=(task_id,)) os_to_fs_task.start() except Exception as e: - + app.logger.error(type(e), e) continue - + time.sleep(STORAGE_POLLING_INTERVAL) @@ -373,7 +383,7 @@ def download_task(auth_header,system_name, system_addr,sourcePath,task_id): def download_request(): auth_header = request.headers[AUTH_HEADER_NAME] - + system_addr = EXT_TRANSFER_MACHINE_INTERNAL system_name = EXT_TRANSFER_MACHINE_PUBLIC sourcePath = request.form["sourcePath"] # path file in cluster @@ -388,7 +398,7 @@ def download_request(): if not check["result"]: return jsonify(description="sourcePath error"), 400, check["headers"] - + # obtain new task from Tasks microservice task_id = create_task(auth_header, service="storage") @@ -397,7 +407,7 @@ def download_request(): if task_id == -1: data = jsonify(error="Couldn't create task") return data, 400 - + # asynchronous task creation aTask = threading.Thread(target=download_task, args=(auth_header, system_name, system_addr, sourcePath, task_id)) @@ -435,10 +445,10 @@ def invalidate_request(): # search if task belongs to the user task_status = get_task_status(task_id, auth_header) - + if task_status == -1: return jsonify(error="Invalid X-Task-Id"), 400 - + containername = get_username(auth_header) prefix = task_id @@ -459,7 +469,7 @@ def invalidate_request(): - + # async task for upload large files # user: user in the posix file system @@ -525,11 +535,10 @@ def upload_task(auth_header,system_name, system_addr,targetPath,sourcePath,task_ download_url = staging.create_temp_url(container_name, object_prefix, fileName, STORAGE_TEMPURL_EXP_TIME) # create certificate for later download from OS to filesystem - app.logger.info("Creating certificate for later download") - options = f"-q -O {targetPath}/{fileName} -- '{download_url}'" + app.logger.info("Creating certificate for later download") + options = f"-s -G -o {targetPath}/{fileName} -- '{download_url}'" exp_time = STORAGE_TEMPURL_EXP_TIME - certs = create_certificate(auth_header, system_name, system_addr, "wget", options, exp_time) - # certs = create_certificates(auth_header,system,command="wget",options=urllib.parse.quote(options),exp_time=STORAGE_TEMPURL_EXP_TIME) + certs = create_certificate(auth_header, system_name, system_addr, "curl", options, exp_time) if not certs[0]: data = uploaded_files[task_id] @@ -546,7 +555,7 @@ def upload_task(auth_header,system_name, system_addr,targetPath,sourcePath,task_ # key_priv = file_to_str(fileName=certs[2]) temp_dir = certs[3] - # encrypt certificate with CERT_CIPHER_KEY key + # encrypt certificate with CERT_CIPHER_KEY key cipher = Fernet(CERT_CIPHER_KEY) # data to be encrypted should be encoded to bytes # in order to save it as json, the cert encrypted should be decoded to string @@ -554,14 +563,14 @@ def upload_task(auth_header,system_name, system_addr,targetPath,sourcePath,task_ resp["download_url"] = download_url - resp["action"] = f"wget {options}" + resp["action"] = f"curl {options}" resp["cert"] = [cert_pub_enc, temp_dir] data["msg"] = resp data["status"] = async_task.ST_URL_REC app.logger.info("Cert and url created correctly") - + update_task(task_id,auth_header,async_task.ST_URL_REC,data,is_json=True) return @@ -571,7 +580,7 @@ def upload_task(auth_header,system_name, system_addr,targetPath,sourcePath,task_ @app.route("/xfer-external/upload",methods=["POST"]) @check_auth_header def upload_request(): - + auth_header = request.headers[AUTH_HEADER_NAME] system_addr = EXT_TRANSFER_MACHINE_INTERNAL @@ -590,7 +599,7 @@ def upload_request(): data = jsonify(error="Target path not set in request") return data, 400 - + if sourcePath == None or sourcePath == "": data = jsonify(error="Source path not set in request") return data, 400 @@ -606,7 +615,7 @@ def upload_request(): if task_id == -1: return jsonify(error="Error creating task"), 400 - + # asynchronous task creation try: @@ -629,23 +638,6 @@ def upload_request(): return data, 400 - -# use wget to download file from download_url created with swift -def get_file_from_storage(auth_header,system_name, system_addr,path,download_url,fileName): - - app.logger.info(f"Trying downloading {download_url} from Object Storage to {system_name}") - - - # wget to be executed on cluster side: - action = f"wget -q -O {path}/{fileName} -- \"{download_url}\" " - - app.logger.info(action) - - retval = exec_remote_command(auth_header,system_name, system_addr,action) - - return retval - - ## Internal Transfer MicroServices: ## cp / rm / mv / rsync using Jobs microservice @@ -659,7 +651,8 @@ def get_file_from_storage(auth_header,system_name, system_addr,path,download_url # jobName = --job-name parameter to be used on sbatch command # jobTime = --time parameter to be used on sbatch command # stageOutJobId = value to set in --dependency:afterok parameter -def exec_internal_command(auth_header,command,sourcePath, targetPath, jobName, jobTime, stageOutJobId): +# account = value to set in --account parameter +def exec_internal_command(auth_header,command,sourcePath, targetPath, jobName, jobTime, stageOutJobId, account): action = "{command} {sourcePath} {targetPath}".\ @@ -681,6 +674,9 @@ def exec_internal_command(auth_header,command,sourcePath, targetPath, jobName, j if stageOutJobId != None: sbatch_file.write("#SBATCH --dependency=afterok:{stageOutJobId}\n".format(stageOutJobId=stageOutJobId)) + if account != None: + app.logger.info(account) + sbatch_file.write(f"#SBATCH --account={account}") sbatch_file.write("\n") sbatch_file.write("echo -e \"$SLURM_JOB_NAME started on $(date): {action}\"\n".format(action=action)) @@ -694,7 +690,7 @@ def exec_internal_command(auth_header,command,sourcePath, targetPath, jobName, j result = {"error": 1, "msg":ioe.message} return result - + # create xfer job resp = create_xfer_job(STORAGE_JOBS_MACHINE, auth_header, td + "/sbatch-job.sh") @@ -736,15 +732,20 @@ def internal_rm(): def internal_operation(request, command): auth_header = request.headers[AUTH_HEADER_NAME] - + + system_idx = SYSTEMS_PUBLIC.index(STORAGE_JOBS_MACHINE) + system_addr = SYS_INTERNALS[system_idx] + system_name = STORAGE_JOBS_MACHINE + try: targetPath = request.form["targetPath"] # path to save file in cluster if targetPath == "": - return jsonify(error="targetPath is empty"), 400 + return jsonify(error="targetPath is empty"), 400 except: app.logger.error("targetPath not specified") return jsonify(error="targetPath not specified"), 400 + # using actual_command to add options to check sanity of the command to be executed actual_command = "" if command in ['cp', 'mv', 'rsync']: @@ -755,6 +756,30 @@ def internal_operation(request, command): except: app.logger.error("sourcePath not specified") return jsonify(error="sourcePath not specified"), 400 + + # checks if file to copy, move or rsync (targetPath) is a valid path + # remove the last part of the path (after last "/" char) to check if the dir can be written by user + + _targetPath = targetPath.split("/")[:-1] + _targetPath = "/".join(_targetPath) + + app.logger.info(f"_targetPath={_targetPath}") + + + check_dir = is_valid_dir(_targetPath, auth_header, system_name, system_addr) + + if not check_dir["result"]: + return jsonify(description="targetPath error"), 400, check_dir["headers"] + + check_file = is_valid_file(sourcePath, auth_header, system_name, system_addr) + + if not check_file["result"]: + check_dir = is_valid_dir(sourcePath, auth_header, system_name, system_addr) + + if not check_dir["result"]: + return jsonify(description="sourcePath error"), 400, check_dir["headers"] + + if command == "cp": actual_command = "cp --force -dR --preserve=all -- " elif command == "mv": @@ -763,6 +788,15 @@ def internal_operation(request, command): actual_command = "rsync -av -- " elif command == "rm": # for 'rm' there's no source, set empty to call exec_internal_command(...) + # checks if file or dir to delete (targetPath) is a valid path or valid directory + check_file = is_valid_file(targetPath, auth_header, system_name, system_addr) + + if not check_file["result"]: + check_dir = is_valid_dir(targetPath, auth_header, system_name, system_addr) + + if not check_dir["result"]: + return jsonify(description="targetPath error"), 400, check_dir["headers"] + sourcePath = "" actual_command = "rm -rf -- " else: @@ -793,6 +827,25 @@ def internal_operation(request, command): system_idx = SYSTEMS_PUBLIC.index(STORAGE_JOBS_MACHINE) system_addr = SYS_INTERNALS[system_idx] + app.logger.info(f"USE_SLURM_ACCOUNT: {USE_SLURM_ACCOUNT}") + # get "account" parameter, if not found, it is obtained from "id" command + try: + account = request.form["account"] + except: + if USE_SLURM_ACCOUNT: + username = get_username(auth_header) + + id_command = f"timeout {UTILITIES_TIMEOUT} id -gn -- {username}" + resp = exec_remote_command(auth_header, STORAGE_JOBS_MACHINE, system_addr, id_command) + if resp["error"] != 0: + retval = check_command_error(resp["msg"], resp["error"], f"{command} job") + + return jsonify(description=f"Failed to submit {command} job", error=retval["description"]), retval["status_code"], retval["header"] + + account = resp["msg"] + else: + account = None + # check if machine is accessible by user: # exec test remote command resp = exec_remote_command(auth_header, STORAGE_JOBS_MACHINE, system_addr, "true") @@ -806,7 +859,7 @@ def internal_operation(request, command): header = {"X-Permission-Denied": "User does not have permissions to access machine or path"} return jsonify(description=f"Failed to submit {command} job"), 404, header - retval = exec_internal_command(auth_header, actual_command, sourcePath, targetPath, jobName, jobTime, stageOutJobId) + retval = exec_internal_command(auth_header, actual_command, sourcePath, targetPath, jobName, jobTime, stageOutJobId, account) # returns "error" key or "success" key try: @@ -833,7 +886,7 @@ def create_xfer_job(machine,auth_header,fileName): try: req = requests.post("{compute_url}/jobs/upload". format(compute_url=COMPUTE_URL), - files=files, headers={AUTH_HEADER_NAME: auth_header, "X-Machine-Name":machine}) + files=files, headers={AUTH_HEADER_NAME: auth_header, "X-Machine-Name":machine}, verify= (SSL_CRT if USE_SSL else False)) retval = json.loads(req.text) if not req.ok: @@ -847,18 +900,11 @@ def create_xfer_job(machine,auth_header,fileName): -# get status for status microservice -# only used by STATUS_IP otherwise forbidden - @app.route("/status",methods=["GET"]) def status(): app.logger.info("Test status of service") - - if request.remote_addr != STATUS_IP: - app.logger.warning("Invalid remote address: {addr}".format(addr=request.remote_addr)) - return jsonify(error="Invalid access"), 403 - + # TODO: check backend storage service to truthfully respond this request return jsonify(success="ack"), 200 @@ -916,17 +962,17 @@ def get_upload_unfinished_tasks(): # cleanup upload dictionary global uploaded_files uploaded_files = {} - - + + app.logger.info("Staging Area Used: {}".format(staging.url)) app.logger.info("ObjectStorage Technology: {}".format(staging.get_object_storage())) - + try: # query Tasks microservice for previous tasks. Allow 30 seconds to answer # only unfinished upload process status_code = [async_task.ST_URL_ASK, async_task.ST_URL_REC, async_task.ST_UPL_CFM, async_task.ST_DWN_BEG, async_task.ST_DWN_ERR] - retval=requests.get(f"{TASKS_URL}/taskslist", json={"service": "storage", "status_code":status_code}, timeout=30) + retval=requests.get(f"{TASKS_URL}/taskslist", json={"service": "storage", "status_code":status_code}, timeout=30, verify=(SSL_CRT if USE_SSL else False)) if not retval.ok: app.logger.error("Error getting tasks from Tasks microservice") @@ -947,7 +993,7 @@ def get_upload_unfinished_tasks(): n_tasks = 0 for key,task in queue_tasks.items(): - + task = json.loads(task) # iterating over queue_tasls @@ -993,7 +1039,7 @@ def init_storage(): create_staging() get_upload_unfinished_tasks() - + if __name__ == "__main__": @@ -1009,7 +1055,7 @@ def init_storage(): # get app log (Flask+werkzeug+python) logger = logging.getLogger() - + # set handler to logger logger.addHandler(logHandler) @@ -1021,4 +1067,7 @@ def init_storage(): upload_check.start() - app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=STORAGE_PORT) + if USE_SSL: + app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=STORAGE_PORT, ssl_context=(SSL_CRT, SSL_KEY)) + else: + app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=STORAGE_PORT) diff --git a/src/storage/swiftOS.py b/src/storage/swiftOS.py index ee9e977f..1100911a 100644 --- a/src/storage/swiftOS.py +++ b/src/storage/swiftOS.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -224,30 +224,34 @@ def create_upload_form(self,sourcepath,containername,prefix,ttl,max_file_size): signature = hmac.new(secret, hmac_body, sha1).hexdigest() # added OBJECT_PREFIX as dir_[task_id] in order to become unique the upload instead of user/filename - command = "curl -i {swift_url}/{swift_api_version}/{swift_account}/{containername}/{prefix}/" \ - " -X POST " \ - "-F max_file_size={max_file_size} -F max_file_count={max_file_count} " \ - "-F expires={expires} -F signature={signature} " \ - "-F redirect={redirect} -F file=@{sourcepath} ".format( - swift_url=swift_url, swift_api_version=swift_version, swift_account=swift_account, - containername=containername, prefix=prefix, max_file_size=max_file_size, - max_file_count=max_file_count, - expires=expires, signature=signature, redirect=redirect, sourcepath=sourcepath) + command = f"curl -i {swift_url}/{swift_version}/{swift_account}/{containername}/{prefix}/" \ + f" -X POST " \ + f"-F max_file_size={max_file_size} -F max_file_count={max_file_count} " \ + f"-F expires={expires} -F signature={signature} " \ + f"-F redirect={redirect} -F file=@{sourcepath} " + + + retval = {} + + retval["parameters"] = { + "method": "POST", + "url": f"{swift_url}/{swift_version}/{swift_account}/{containername}/{prefix}/", + "data": { + "max_file_size": max_file_size, + "max_file_count": max_file_count, + "expires": expires, + "signature": signature, + "redirect": redirect, + }, + "files": sourcepath, + "json": {}, + "headers": {}, + "params": {} + + } - - - retval = dict() - - retval["method"] = "POST" retval["command"] = command - retval["url"] = "{swift_url}/{swift_api_version}/{swift_account}/{containername}/{prefix}/".format(swift_url=swift_url, swift_api_version=swift_version, swift_account=swift_account, - containername=containername, prefix=prefix) - retval["max_file_size"] = max_file_size - retval["max_file_count"] = max_file_count - retval["expires"] = expires - retval["signature"] = signature - retval["redirect"] = redirect - retval["sourcepath"] = sourcepath + return retval diff --git a/src/tasks/tasks.py b/src/tasks/tasks.py index 808a0e0e..8fa39591 100644 --- a/src/tasks/tasks.py +++ b/src/tasks/tasks.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -17,7 +17,6 @@ AUTH_HEADER_NAME = 'Authorization' -STATUS_IP = os.environ.get("F7T_STATUS_IP") STORAGE_IP = os.environ.get("F7T_STORAGE_IP") COMPUTE_IP = os.environ.get("F7T_COMPUTE_IP") KONG_URL = os.environ.get("F7T_KONG_URL") @@ -29,6 +28,11 @@ PERSIST_PORT = os.environ.get("F7T_PERSIST_PORT") PERSIST_PWD = os.environ.get("F7T_PERSIST_PWD") +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") + # expire time in seconds, for squeue or sacct tasks: default: 24hours = 86400 secs COMPUTE_TASK_EXP_TIME = os.environ.get("F7T_COMPUTE_TASK_EXP_TIME", 86400) @@ -58,10 +62,10 @@ def init_queue(): # dictionary: [task_id] = {hash_id,status_code,user,data} task_list = persistence.get_all_tasks(r) - + # key = task_id ; values = {status_code,user,data} for rid, value in task_list.items(): - + # task_list has id with format task_id, ie: task_2 # therefore it must be splitted by "_" char: task_id = rid.split("_")[1] @@ -75,7 +79,7 @@ def init_queue(): t.set_status(status,data) tasks[t.hash_id] = t - + @app.route("/",methods=["GET"]) @check_auth_header def list_tasks(): @@ -89,7 +93,7 @@ def list_tasks(): for task_id,task in tasks.items(): if task.user == username: user_tasks[task_id] = task.get_status() - + data = jsonify(tasks=user_tasks) return data, 200 @@ -103,8 +107,9 @@ def create_task(): logging.info('debug: tasks: create_task: remote_address: ' + remote_addr) # checks if request comes from allowed microservices - if remote_addr not in [COMPUTE_IP, STORAGE_IP]: - return jsonify(description="Invalid request address"), 403 + if not debug and remote_addr not in [COMPUTE_IP, STORAGE_IP]: + msg = f"Invalid remote address: {remote_addr}" + return jsonify(error=msg), 403 # checks if request has service header try: @@ -152,7 +157,7 @@ def create_task(): app.logger.info("New task created: {hash_id}".format(hash_id=t.hash_id)) app.logger.info(t.get_status()) task_url = "{KONG_URL}/tasks/{hash_id}".format(KONG_URL=KONG_URL,hash_id=t.hash_id) - + data = jsonify(hash_id=t.hash_id, task_url=task_url) return data, 201 @@ -165,7 +170,7 @@ def create_task(): def get_task(id): auth_header = request.headers[AUTH_HEADER_NAME] - + # getting username from auth_header username = get_username(auth_header) @@ -194,8 +199,9 @@ def update_task(id): remote_addr = request.remote_addr # checks if request comes from allowed microservices - if remote_addr not in [COMPUTE_IP, STORAGE_IP]: - return jsonify(description="Invalid request address"), 403 + if not debug and remote_addr not in [COMPUTE_IP, STORAGE_IP]: + msg = f"Invalid remote address: {remote_addr}" + return jsonify(error=msg), 403 if request.is_json: @@ -203,14 +209,14 @@ def update_task(id): data = request.get_json(force=True) status=data["status"] msg=data["msg"] - + except Exception as e: app.logger.error(type(e)) else: try: - msg = request.form["msg"] + msg = request.form["msg"] except Exception as e: msg = None # app.logger.error(e.message) @@ -224,7 +230,7 @@ def update_task(id): # then check of header is NOT needed. *** # owner_needed is True if is not *** owner_needed = False - if status not in [async_task.ST_DWN_END , async_task.ST_DWN_ERR, async_task.ST_UPL_ERR, async_task.ST_UPL_END, + if status not in [async_task.ST_DWN_END , async_task.ST_DWN_ERR, async_task.ST_UPL_ERR, async_task.ST_UPL_END, async_task.ST_UPL_CFM, async_task.ST_DWN_BEG,async_task.ST_DWN_END] : @@ -262,7 +268,7 @@ def update_task(id): # if no msg on request, default status msg: if msg == None: msg = async_task.status_codes[status] - + # update task in memory tasks[hash_id].set_status(status=status, data=msg) @@ -292,13 +298,14 @@ def update_task(id): @check_auth_header def delete_task(id): auth_header = request.headers[AUTH_HEADER_NAME] - + # remote address request by Flask remote_addr = request.remote_addr # checks if request comes from allowed microservices - if remote_addr not in [COMPUTE_IP, STORAGE_IP]: - return jsonify(description="Invalid request address"), 403 + if not debug and remote_addr not in [COMPUTE_IP, STORAGE_IP]: + msg = f"Invalid remote address: {remote_addr}" + return jsonify(error=msg), 403 # getting username from auth_header username = get_username(auth_header) @@ -338,8 +345,9 @@ def expire_task(id): remote_addr = request.remote_addr # checks if request comes from allowed microservices - if remote_addr not in [COMPUTE_IP, STORAGE_IP]: - return jsonify(description="Invalid request address"), 403 + if not debug and remote_addr not in [COMPUTE_IP, STORAGE_IP]: + msg = f"Invalid remote address: {remote_addr}" + return jsonify(error=msg), 403 # checks if request has service header try: @@ -370,7 +378,7 @@ def expire_task(id): if service == "compute": exp_time = COMPUTE_TASK_EXP_TIME - + try: global r @@ -390,18 +398,10 @@ def expire_task(id): return data, 400 -# get status for status microservice -# only used by STATUS_URL otherwise forbidden - @app.route("/status",methods=["GET"]) def status(): app.logger.info("Test status of service") - - if request.remote_addr != STATUS_IP: - app.logger.warning("Invalid remote address: {addr}".format(addr=request.remote_addr)) - return jsonify(error="Invalid access"), 403 - return jsonify(success="ack"), 200 @@ -415,11 +415,12 @@ def tasklist(): app.logger.info("Getting service tasks") app.logger.info("STORAGE_IP is {storage_ip}".format(storage_ip=STORAGE_IP)) - # reject if not STORAGE_IP remote address - if request.remote_addr != STORAGE_IP: - app.logger.warning("Invalid remote address: {addr}".format(addr=request.remote_addr)) - return jsonify(error="Invalid access"), 403 - + # checks if request comes from allowed microservices + if not debug and request.remote_addr != STORAGE_IP: + msg = "Invalid remote address: {}".format(request.remote_addr) + app.logger.warning(msg) + return jsonify(error=msg), 403 + json = request.json if json == None: @@ -434,9 +435,9 @@ def tasklist(): if json["service"] not in ["storage", "compute"]: app.logger.error(f"Service parameter {json['service']} not valid") return jsonify(error=f"Service parameter {json['service']} not valid"), 401 - + _tasks = persistence.get_service_tasks(r, json["service"], json["status_code"]) - + except KeyError as e: app.logger.error(f"Key {e.args} in 'json' parameter is missing") return jsonify(error=f"{e.args} parameter missing"), 401 @@ -472,4 +473,7 @@ def tasklist(): init_queue() # set to debug = False, so stderr and stdout go to log file - app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=TASKS_PORT) + if USE_SSL: + app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=TASKS_PORT, ssl_context=(SSL_CRT, SSL_KEY)) + else: + app.run(debug=debug, host='0.0.0.0', use_reloader=False, port=TASKS_PORT) diff --git a/src/tests/automated_tests/conftest.py b/src/tests/automated_tests/conftest.py index 0aa19623..7f0e68af 100644 --- a/src/tests/automated_tests/conftest.py +++ b/src/tests/automated_tests/conftest.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import os import jwt diff --git a/src/tests/automated_tests/demo.ini b/src/tests/automated_tests/demo.ini index 2b2dd1f5..4a870051 100644 --- a/src/tests/automated_tests/demo.ini +++ b/src/tests/automated_tests/demo.ini @@ -4,3 +4,5 @@ env_override_existing_values = 1 env_files = ../../../deploy/demo/common/common.env demo.env +markers = + reservations: tests of the reservation feature. Better not overlapped with the rest. \ No newline at end of file diff --git a/src/tests/automated_tests/integration/test_compute.py b/src/tests/automated_tests/integration/test_compute.py index 51e575c4..bec06bbf 100644 --- a/src/tests/automated_tests/integration/test_compute.py +++ b/src/tests/automated_tests/integration/test_compute.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import os @@ -15,20 +21,24 @@ JOBS_URL = COMPUTE_URL + "/jobs" SERVER_COMPUTE = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";")[0] +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" # Helper function for job submittings def submit_job(machine, headers, file='testsbatch.sh'): files = {'file': ('upload.txt', open(file, 'rb'))} headers.update({"X-Machine-Name": machine}) - resp = requests.post(f"{JOBS_URL}/upload", headers=headers, files=files) + resp = requests.post(f"{JOBS_URL}/upload", headers=headers, files=files, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 201 return resp def get_task(task_id, headers): url = "{}/{}".format(TASKS_URL, task_id) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 return resp @@ -67,7 +77,7 @@ def test_submit_job(machine, headers): @pytest.mark.parametrize("machine", [SERVER_COMPUTE]) def test_list_jobs(machine, headers): headers.update({"X-Machine-Name": machine}) - resp = requests.get(JOBS_URL, headers=headers) + resp = requests.get(JOBS_URL, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 @@ -80,7 +90,7 @@ def test_list_job(machine, headers): jobid = -1 url = "{}/{}".format(JOBS_URL, jobid) headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 400 # task_id = resp.json()["task_id"] @@ -97,7 +107,7 @@ def test_cancel_job(machine, headers): # cancel job url = "{}/{}".format(JOBS_URL, job_id) headers.update({"X-Machine-Name": machine}) - resp = requests.delete(url, headers=headers) + resp = requests.delete(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 @@ -117,7 +127,7 @@ def test_acct_job(machine, headers): url = "{}/acct".format(COMPUTE_URL) params = {"jobs": job_id} headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers,params=params) + resp = requests.get(url, headers=headers,params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/integration/test_storage.py b/src/tests/automated_tests/integration/test_storage.py index c3941e11..ec5d80d6 100644 --- a/src/tests/automated_tests/integration/test_storage.py +++ b/src/tests/automated_tests/integration/test_storage.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import os @@ -16,14 +22,19 @@ UTILITIES_URL = os.environ.get("F7T_UTILITIES_URL") # same server used for utilities and external upload storage -SERVER_UTILITIES_STORAGE = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";")[0] +SERVER_UTILITIES_STORAGE = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";")[0] OBJECT_STORAGE = os.environ.get("F7T_OBJECT_STORAGE") +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" + def get_task(task_id, headers): url = "{}/{}".format(TASKS_URL, task_id) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 return resp @@ -41,7 +52,7 @@ def test_post_upload_request(headers): # request upload form data = { "sourcePath": "testsbatch.sh", "targetPath": USER_HOME } - resp = requests.post(STORAGE_URL + "/xfer-external/upload", headers=headers, data=data) + resp = requests.post(STORAGE_URL + "/xfer-external/upload", headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 201 task_id = resp.json()["task_id"] @@ -55,46 +66,43 @@ def test_post_upload_request(headers): # upload file to storage server msg = resp.json()["task"]["data"]["msg"] - url = msg["url"] - - - url = url.replace("minio_test_build", "127.0.0.1") + url = msg["parameters"]["url"] resp = None - + if (OBJECT_STORAGE == "s3v2"): - params = [('AWSAccessKeyId', msg["AWSAccessKeyId"]), ('Signature', msg["Signature"]), ('Expires', msg["Expires"])] - + params = [('AWSAccessKeyId', msg["parameters"]["params"]["AWSAccessKeyId"]), ('Signature', msg["parameters"]["params"]["Signature"]), ('Expires', msg["parameters"]["params"]["Expires"])] + # this way doesn't work # files = {'file': ("testsbatch.sh", open(data["sourcePath"], 'rb'))} # resp = requests.put(url=url, files=files, params) - + # this is the only way signature doesn't break! with open(data["sourcePath"], 'rb') as data: - resp= requests.put(url, data=data, params=params) + resp= requests.put(url, data=data, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) elif (OBJECT_STORAGE == "s3v4"): - post_data = [('key', msg["key"]), ('policy', msg["policy"]), ('x-amz-algorithm', msg["x-amz-algorithm"]) - , ('x-amz-credential', msg["x-amz-credential"]), ('x-amz-date', msg["x-amz-date"]), - ('x-amz-signature', msg["x-amz-signature"])] + post_data = [('key', msg["parameters"]["data"]["key"]), ('policy', msg["parameters"]["data"]["policy"]), ('x-amz-algorithm', msg["parameters"]["data"]["x-amz-algorithm"]) + , ('x-amz-credential', msg["parameters"]["data"]["x-amz-credential"]), ('x-amz-date', msg["parameters"]["data"]["x-amz-date"]), + ('x-amz-signature', msg["parameters"]["data"]["x-amz-signature"])] files = {'file': open(data["sourcePath"],'rb')} - resp = requests.post(url, data=post_data, files=files) + resp = requests.post(url, data=post_data, files=files, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) else: # swift post request params = [('max_file_size', msg["max_file_size"]), ('max_file_count', msg["max_file_count"]), ('expires', msg["expires"]), ('signature', msg["signature"]), ('redirect', msg["redirect"])] - + with open(data["sourcePath"], 'rb') as data: - resp= requests.put(url, data=data, params=params) - + resp= requests.put(url, data=data, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) + assert resp.status_code == 200 or resp.status_code == 204 #TODO: check 204 is right # download from OS to FS is automatic download_ok = False for i in range(20): - r = requests.get(TASKS_URL +"/"+task_id, headers=headers) + r = requests.get(TASKS_URL +"/"+task_id, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert r.status_code == 200 if r.json()["task"]["status"] == "114": # import async_tasks -> async_tasks.ST_DWN_END download_ok = True @@ -114,12 +122,12 @@ def test_internal_cp(machine, headers): # jobName, time, stageOutJobId data = {"sourcePath": USER_HOME + "/testsbatch.sh", "targetPath": USER_HOME + "/testsbatch2.sh"} url = "{}/xfer-internal/cp".format(STORAGE_URL) - resp = requests.post(url, headers=headers,data=data) + resp = requests.post(url, headers=headers,data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 201 task_id = resp.json()["task_id"] check_task_status(task_id, headers) - + # wait to make sure job is finished time.sleep(5) @@ -127,7 +135,7 @@ def test_internal_cp(machine, headers): params = {"targetPath": USER_HOME + "/testsbatch.sh", "showhidden" : "true"} url = "{}/ls".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers, params=params) + resp = requests.get(url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.json()) print(machine) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/readme.txt b/src/tests/automated_tests/readme.txt index 7164b243..335314ac 100644 --- a/src/tests/automated_tests/readme.txt +++ b/src/tests/automated_tests/readme.txt @@ -1,12 +1,16 @@ -***** Install Pytest and dependencies ***** +***** IMPORTANT ***** -pip3 install -r requirements.txt +Instructions below are for the case where you want to run tests from a local +python environment. For a containerized environment, please refer to +ci/dev/README.md. +***** Install Pytest and dependencies ***** +pip3 install -r deploy/docker/tester/requirements.txt ***** Configure Test environment ***** -The environment variables for your test implementation must be configured +The environment variables for your test implementation must be configured in the pytest section of pytest.ini file. For example, for "test-build" deploy, the configuration is as follows: @@ -72,8 +76,8 @@ Run a specific test within a test file: ***** Tests Limitations ***** -In order to test implementations that are behind a gateway with authetication -you will need to disable token verification. This has to be done in your gateway configuration. +In order to test implementations that are behind a gateway with authetication +you will need to disable token verification. This has to be done in your gateway configuration. Also you must set to empty the REALM_RSA_PUBLIC_KEY environment variable in the common.env file of your deploy. Finally, you will need to specify the firecrest gateway address in FIRECREST_URL environment variable. @@ -87,7 +91,7 @@ You need to setup the following environment variables: FIRECREST_URL = http://myapigateway # enable login with SA -SA_LOGIN = True +SA_LOGIN = True # Openid service url SA_TOKEN_URI = http://myopenidservice/auth/realms/kcrealm/protocol/openid-connect/token @@ -101,6 +105,6 @@ The "demo" implementation has been configured to be tested using service account Open demo.env file to check the configuration values that have been set. As previously shown, run the tests on "demo" implementation by executing the following command: - pytest -c demo.ini + pytest -c demo.ini diff --git a/src/tests/automated_tests/requirements.txt b/src/tests/automated_tests/requirements.txt deleted file mode 100644 index 3b8e50d3..00000000 --- a/src/tests/automated_tests/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -pytest -python-dotenv -pytest-dotenv -pyjwt -requests diff --git a/src/tests/automated_tests/test-build.ini b/src/tests/automated_tests/test-build.ini index 93e8ade9..ff44a861 100644 --- a/src/tests/automated_tests/test-build.ini +++ b/src/tests/automated_tests/test-build.ini @@ -5,3 +5,5 @@ env_files = ../../../deploy/test-build/environment/common.env ../../../deploy/test-build/environment/storage.env test-build.env +markers = + reservations: tests of the reservation feature. Better not overlapped with the rest. \ No newline at end of file diff --git a/src/tests/automated_tests/test_globals.py b/src/tests/automated_tests/test_globals.py index 1e2e6b58..4d7d5d82 100644 --- a/src/tests/automated_tests/test_globals.py +++ b/src/tests/automated_tests/test_globals.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import os # name of user firing the tests: diff --git a/src/tests/automated_tests/testsbatch.sh b/src/tests/automated_tests/testsbatch.sh index 4647df17..927e3b36 100644 --- a/src/tests/automated_tests/testsbatch.sh +++ b/src/tests/automated_tests/testsbatch.sh @@ -1,4 +1,11 @@ #!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## + #SBATCH --job-name=testsbatch #SBATCH --ntasks=1 #SBATCH --tasks-per-node=1 diff --git a/src/tests/automated_tests/unit/markers.py b/src/tests/automated_tests/unit/markers.py index 41f73f4e..fcb08042 100644 --- a/src/tests/automated_tests/unit/markers.py +++ b/src/tests/automated_tests/unit/markers.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import os import pytest diff --git a/src/tests/automated_tests/unit/test_unit_certificator.py b/src/tests/automated_tests/unit/test_unit_certificator.py index fdae4abd..68edde72 100644 --- a/src/tests/automated_tests/unit/test_unit_certificator.py +++ b/src/tests/automated_tests/unit/test_unit_certificator.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import os @@ -13,6 +19,11 @@ SYSTEM_NAME = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";")[0] SYSTEM_ADDR = os.environ.get("F7T_SYSTEMS_INTERNAL_UTILITIES").split(";")[0] +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" + OPA_DATA = [("not_existing_system", "not_existing_addr", 401), (SYSTEM_NAME, SYSTEM_ADDR, 200)] @@ -22,7 +33,7 @@ def test_receive(headers): # url = f"{CERTIFICATOR_URL}/?command=" + base64.urlsafe_b64encode("ls".encode()).decode() params = {"command": base64.urlsafe_b64encode("ls".encode()).decode(), "cluster": SYSTEM_NAME, "addr": SYSTEM_ADDR } - resp = requests.get(CERTIFICATOR_URL, headers=headers, params=params) + resp = requests.get(CERTIFICATOR_URL, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 @@ -32,7 +43,7 @@ def test_opa(machine,addr,expected_response_code,headers): # url = f"{CERTIFICATOR_URL}/?command=" + base64.urlsafe_b64encode("ls".encode()).decode() params = {"command": base64.urlsafe_b64encode("ls".encode()).decode(), "cluster": machine, "addr": addr } - resp = requests.get(CERTIFICATOR_URL, headers=headers, params=params) + resp = requests.get(CERTIFICATOR_URL, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -41,7 +52,7 @@ def test_opa(machine,addr,expected_response_code,headers): @host_environment_test def test_status(headers): url = f"{CERTIFICATOR_URL}/status" - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/unit/test_unit_compute.py b/src/tests/automated_tests/unit/test_unit_compute.py index fa4d4870..3746d11b 100644 --- a/src/tests/automated_tests/unit/test_unit_compute.py +++ b/src/tests/automated_tests/unit/test_unit_compute.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import os @@ -13,6 +19,11 @@ JOBS_URL = COMPUTE_URL + "/jobs" SERVER_COMPUTE = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";")[0] +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" + # test data: (server name, expected response code) DATA = [ (SERVER_COMPUTE, 200) , ("someservernotavailable", 400)] @@ -23,7 +34,7 @@ def submit_job_upload(machine, headers): print(f"COMPUTE_URL {COMPUTE_URL}") files = {'file': ('upload.txt', open('testsbatch.sh', 'rb'))} headers.update({"X-Machine-Name": machine}) - resp = requests.post(f"{JOBS_URL}/upload", headers=headers, files=files) + resp = requests.post(f"{JOBS_URL}/upload", headers=headers, files=files, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) return resp @@ -48,7 +59,7 @@ def test_submit_job_upload(machine, expected_response_code, headers): def test_submit_job_path(machine, targetPath, expected_response_code, headers): data = {"targetPath" : targetPath} headers.update({"X-Machine-Name": machine}) - resp = requests.post(f"{JOBS_URL}/path", headers=headers, data=data) + resp = requests.post(f"{JOBS_URL}/path", headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) print(resp.headers) assert resp.status_code == expected_response_code @@ -59,7 +70,7 @@ def test_submit_job_path(machine, targetPath, expected_response_code, headers): def test_list_jobs(machine, expected_response_code, headers): url = "{}".format(JOBS_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -71,7 +82,7 @@ def test_list_job(machine, expected_response_code, headers): jobid = -1 url = "{}/{}".format(JOBS_URL, jobid) headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -83,7 +94,7 @@ def test_cancel_job(machine, expected_response_code, headers): jobid = 1 url = "{}/{}".format(JOBS_URL, jobid) headers.update({"X-Machine-Name": machine}) - resp = requests.delete(url, headers=headers) + resp = requests.delete(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -95,7 +106,7 @@ def test_acct(machine, expected_response_code, headers): url = "{}/acct".format(COMPUTE_URL) headers.update({"X-Machine-Name": machine}) params = {"jobs":jobid} - resp = requests.get(url, headers=headers, params=params) + resp = requests.get(url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -104,7 +115,7 @@ def test_acct(machine, expected_response_code, headers): @host_environment_test def test_status(headers): url = "{}/status".format(COMPUTE_URL) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/unit/test_unit_reservations.py b/src/tests/automated_tests/unit/test_unit_reservations.py new file mode 100644 index 00000000..eec6145f --- /dev/null +++ b/src/tests/automated_tests/unit/test_unit_reservations.py @@ -0,0 +1,198 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# +import pytest +import requests +import os +import json +import datetime +import time +from conftest import headers # header fixture + +pytestmark = pytest.mark.reservations + +# Requests Parameters +FIRECREST_URL = os.environ.get("FIRECREST_URL") +if FIRECREST_URL: + RESERVATIONS_URL = os.environ.get("FIRECREST_URL") + "/reservations" +else: + RESERVATIONS_URL = os.environ.get("F7T_RESERVATIONS_URL") +SYSTEM = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";")[0] + +# SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" +VERIFY = (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False) + +# Time examples +d1 = (datetime.datetime.now() + datetime.timedelta(hours=5)).strftime("%Y-%m-%dT%H:%M:%S") +d2 = (datetime.datetime.now() + datetime.timedelta(hours=6)).strftime("%Y-%m-%dT%H:%M:%S") +d3 = (datetime.datetime.now() + datetime.timedelta(hours=12)).strftime("%Y-%m-%dT%H:%M") # wrong format +d4 = (datetime.datetime.now() + datetime.timedelta(hours=13)).strftime("%Y-%m-%dT%H:%M") # wrong format +d5 = (datetime.datetime.now() + datetime.timedelta(hours=12)).strftime("%Y-%m-%dT%H:%M:%S") +d6 = (datetime.datetime.now() + datetime.timedelta(hours=13)).strftime("%Y-%m-%dT%H:%M:%S") + + +def test_list_reservations_empty(headers): + url = RESERVATIONS_URL + headers["X-Machine-Name"] = SYSTEM + check_no_reservations(url, headers) + + +def test_list_reservations_wrong(headers): + url = RESERVATIONS_URL + headers["X-Machine-Name"] = "notavalidsystem" + + resp = requests.get(url, headers=headers, verify=VERIFY) + check_response(resp, 400) + assert resp.json()['error'] == 'Error listing reservation' + + +# You can find the valid options for accounts, node types, etc in the slurm config files of the cluster built for these tests. +POST_DATA = [ + (400, None, "test", "1", "f7t", d1, d2, "\'reservation\' form data input missing"), + (400, "", "test", "1", "f7t", d1, d2, "\'reservation\' parameter format is not valid"), + (400, "validrsvname", None, "1", "f7t", d1, d2, "\'account\' form data input missing"), + (400, "validrsvname", "", "1", "f7t", d1, d2, "\'account\' parameter format is not valid"), + + (400, "validrsvname", "test", "3", "f7t", d1, d2, "greater than 1 available"), + (400, "validrsvname", "test", "1", "ntl", d1, d2, "only f7t feature type are supported"), +] +BASE_DATA = [ + (400, "validrsvname", "test", None, "f7t", d1, d2, "\'numberOfNodes\' form data input missing"), + (400, "validrsvname", "test", "", "f7t", d1, d2, "\'numberOfNodes\' parameter is not valid"), + (400, "validrsvname", "test", "-3", "f7t", d1, d2, "\'numberOfNodes\' parameter is not valid"), + (400, "validrsvname", "test", "0", "f7t", d1, d2, "\'numberOfNodes\' parameter is not valid"), + + (400, "validrsvname", "test", "1", None, d1, d2, "\'nodeType\' form data input missing"), + (400, "validrsvname", "test", "1", "", d1, d2, "\'nodeType\' parameter format is not valid"), + + (400, "validrsvname", "test", "1", "f7t", None, d2, "\'starttime\' form data input missing"), + (400, "validrsvname", "test", "1", "f7t", "", d2, "\'starttime\' parameter format is not valid"), + (400, "validrsvname", "test", "1", "f7t", "2day", d2, "\'starttime\' parameter format is not valid"), + (400, "validrsvname", "test", "1", "f7t", d3, d4, "\'starttime\' parameter format is not valid"), + (400, "validrsvname", "test", "1", "f7t", d1, None, "\'endtime\' form data input missing"), + (400, "validrsvname", "test", "1", "f7t", d1, "", "\'endtime\' parameter format is not valid"), + (400, "validrsvname", "test", "1", "f7t", d1, "2m", "\'endtime\' parameter format is not valid"), + (400, "validrsvname", "test", "1", "f7t", d1, d4, "\'endtime\' parameter format is not valid"), + (400, "validrsvname", "test", "1", "f7t", d2, d1, "\'endtime\' occurs before \'starttime\'"), +] +@pytest.mark.parametrize("status_code,reservation,account,numberOfNodes,nodeType,starttime,endtime,msg",POST_DATA + BASE_DATA) +def test_post_reservation_wrong(status_code,reservation,account,numberOfNodes,nodeType,starttime,endtime,msg,headers): + url = RESERVATIONS_URL + headers["X-Machine-Name"] = SYSTEM + data = {"reservation": reservation, + "account": account, + "numberOfNodes": numberOfNodes, + "nodeType": nodeType, + "starttime": starttime, + "endtime": endtime} + + resp = requests.post(url, headers=headers, data=data, verify=VERIFY) + check_response(resp, status_code, msg) + + +@pytest.mark.parametrize("status_code,reservation,account,numberOfNodes,nodeType,starttime,endtime,msg", BASE_DATA) +def test_put_reservation_wrong(status_code,reservation,account,numberOfNodes,nodeType,starttime,endtime,msg,headers): + url = f"{RESERVATIONS_URL}/{reservation}" + headers["X-Machine-Name"] = SYSTEM + data = {"numberOfNodes": numberOfNodes, + "nodeType": nodeType, + "starttime": starttime, + "endtime": endtime} + + resp = requests.put(url, headers=headers, data=data, verify=VERIFY) + check_response(resp, status_code, msg) + + +@pytest.mark.parametrize("status_code,reservation,msg",[ + (400, "wrongname", "You are not an owner of the wrongname reservation"), + (400, "1_", "\'reservation\' parameter format is not valid"), +]) +def test_delete_reservation_wrong(status_code, reservation, msg, headers): + url = f"{RESERVATIONS_URL}/{reservation}" + headers["X-Machine-Name"] = SYSTEM + + resp = requests.delete(url, headers=headers, verify=VERIFY) + check_response(resp, status_code, msg) + + +def test_reservation_crud_conflicts(dummy_resevation, headers): + url = RESERVATIONS_URL + headers["X-Machine-Name"] = SYSTEM + + rsv02 = dict(dummy_resevation) + rsv02['reservation'] = "testrsvok02" + + resp = requests.post(url, headers=headers, data=rsv02, verify=VERIFY) + check_response(resp, 400, "Requested nodes are busy") + + +def test_reservation_crud_ok(dummy_resevation, headers): + url = RESERVATIONS_URL + headers["X-Machine-Name"] = SYSTEM + + # read reservation + resp = requests.get(url, headers=headers, verify=VERIFY) + check_response(resp, 200) + obtained = resp.json().get('success', []) + assert [x['reservationname'] for x in obtained] == ['testrsvok01'] + + # update rsv1 + upd = { + "numberOfNodes": "1", + "nodeType": "f7t", + "starttime": d5, + "endtime": d6 + } + resp = requests.put(f"{RESERVATIONS_URL}/testrsvok01", headers=headers, data=upd, verify=VERIFY) + check_response(resp, 200) + + +@pytest.fixture +def dummy_resevation(headers): + url = RESERVATIONS_URL + headers["X-Machine-Name"] = SYSTEM + + rsv01 = { + "reservation": "testrsvok01", + "account": "test", + "numberOfNodes": "1", + "nodeType": "f7t", + "starttime": d1, + "endtime": d2, + } + + check_no_reservations(url, headers) + + # create rsv1 + resp = requests.post(url, headers=headers, data=rsv01, verify=VERIFY) + check_response(resp, 201) + + yield rsv01 + + # delete rsv1 + resp = requests.delete(f"{RESERVATIONS_URL}/testrsvok01", headers=headers, verify=VERIFY) + check_response(resp, 204) + check_no_reservations(url, headers) + + +def check_response(response, expected_code, in_description=None): + assert response.status_code == expected_code, "headers: {}, content: {}".format(response.headers, response.content) + if in_description: + assert in_description in response.json().get('description', "") + + +def check_no_reservations(url, headers): + resp = requests.get(url, headers=headers, verify=VERIFY) + check_response(resp, 200) + obtained = resp.json()['success'] + assert obtained == [] + + +if __name__ == '__main__': + pytest.main() \ No newline at end of file diff --git a/src/tests/automated_tests/unit/test_unit_status.py b/src/tests/automated_tests/unit/test_unit_status.py index 40957c94..4e3d47af 100644 --- a/src/tests/automated_tests/unit/test_unit_status.py +++ b/src/tests/automated_tests/unit/test_unit_status.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import os @@ -11,42 +17,47 @@ SYSTEMS = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";") +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" @pytest.mark.parametrize("system",SYSTEMS) def test_status_system(system, headers): url = "{}/systems/{}".format(STATUS_URL, system) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert 'description' in resp.json() def test_status_systems(headers): url = "{}/systems".format(STATUS_URL) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert 'description' in resp.json() -@pytest.mark.parametrize("service",["certificator", "utilities", "compute", "tasks", "storage"]) +@pytest.mark.parametrize("service",["certificator", "utilities", "compute", "tasks", "storage","reservations"]) def test_status_service(service, headers): url = "{}/services/{}".format(STATUS_URL, service) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert 'description' in resp.json() def test_status_services(headers): url = "{}/services".format(STATUS_URL) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) print(resp.json()) assert 'description' in resp.json() def test_parameters(headers): + print(STATUS_URL) url = "{}/parameters".format(STATUS_URL) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/unit/test_unit_storage.py b/src/tests/automated_tests/unit/test_unit_storage.py index 3f63f396..0998d03f 100644 --- a/src/tests/automated_tests/unit/test_unit_storage.py +++ b/src/tests/automated_tests/unit/test_unit_storage.py @@ -1,39 +1,51 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import os from markers import host_environment_test from test_globals import * +import time FIRECREST_URL = os.environ.get("FIRECREST_URL") if FIRECREST_URL: - STORAGE_URL = os.environ.get("FIRECREST_URL") + "/storage" + STORAGE_URL = os.environ.get("FIRECREST_URL") + "/storage" else: STORAGE_URL = os.environ.get("F7T_STORAGE_URL") +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" + # test upload request: ask for an upload task (must throw 200 OK) def test_post_upload_request(headers): data = { "sourcePath": "testsbatch.sh", "targetPath": USER_HOME } - resp = requests.post(STORAGE_URL + "/xfer-external/upload", headers=headers, data=data) + resp = requests.post(STORAGE_URL + "/xfer-external/upload", headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 201 def test_download_file_not_exist(headers): data = { "sourcePath": "no-existing-file" } - resp = requests.post(STORAGE_URL + "/xfer-external/download", headers=headers, data=data) + resp = requests.post(STORAGE_URL + "/xfer-external/download", headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.json()) print(resp.headers) assert resp.status_code == 400 def test_download_file_not_allowed(headers): data = { "sourcePath": "/srv/f7t/test_sbatch_forbidden.sh" } - resp = requests.post(STORAGE_URL + "/xfer-external/download", headers=headers, data=data) + resp = requests.post(STORAGE_URL + "/xfer-external/download", headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.json()) print(resp.headers) assert resp.status_code == 400 def test_download_dir_not_allowed(headers): data = { "sourcePath": "/srv/f7t" } - resp = requests.post(STORAGE_URL + "/xfer-external/download", headers=headers, data=data) + resp = requests.post(STORAGE_URL + "/xfer-external/download", headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.json()) print(resp.headers) assert resp.status_code == 400 @@ -41,17 +53,17 @@ def test_download_dir_not_allowed(headers): def test_internal_cp(headers): # jobName, time, stageOutJobId - data = {"sourcePath": USER_HOME + "/testsbatch.sh", "targetPath": USER_HOME + "/testsbatch2.sh"} + data = {"sourcePath": "/srv/f7t/test_sbatch.sh", "targetPath": USER_HOME + "/testsbatch2.sh", "account": "test"} url = "{}/xfer-internal/cp".format(STORAGE_URL) - resp = requests.post(url, headers=headers,data=data) + resp = requests.post(url, headers=headers,data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 201 -def test_internal_mv(headers): +def test_internal_mv(headers): # jobName, time, stageOutJobId - data = {"sourcePath": USER_HOME + "/testsbatch2.sh", "targetPath": USER_HOME + "/testsbatch3.sh"} + data = {"sourcePath": "/srv/f7t/test_sbatch_mv.sh", "targetPath": USER_HOME + "/testsbatch3.sh"} url = "{}/xfer-internal/mv".format(STORAGE_URL) - resp = requests.post(url, headers=headers,data=data) + resp = requests.post(url, headers=headers,data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 201 @@ -59,23 +71,32 @@ def test_internal_rsync(headers): # jobName, time, stageOutJobId data = {"sourcePath": USER_HOME + "/", "targetPath": USER_HOME + "/"} url = "{}/xfer-internal/rsync".format(STORAGE_URL) - resp = requests.post(url, headers=headers,data=data) + resp = requests.post(url, headers=headers,data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 201 def test_internal_rm(headers): # jobName, time, stageOutJobId - data = {"targetPath": USER_HOME + "/testsbatch3.sh"} + data = {"targetPath": "/srv/f7t/test_sbatch_rm.sh"} + url = "{}/xfer-internal/rm".format(STORAGE_URL) - resp = requests.post(url, headers=headers,data=data) + resp = requests.post(url, headers=headers,data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 201 +def test_internal_rm_err(headers): + # jobName, time, stageOutJobId + data = {"targetPath": "/srv/f7t/test_sbatch_forbidden.sh"} + + url = "{}/xfer-internal/rm".format(STORAGE_URL) + resp = requests.post(url, headers=headers,data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) + assert resp.status_code == 400 + # Test storage microservice status @host_environment_test def test_status(): url = "{}/status".format(STORAGE_URL) - resp = requests.get(url) + resp = requests.get(url, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/unit/test_unit_tasks.py b/src/tests/automated_tests/unit/test_unit_tasks.py index 6b0a80eb..1192184e 100644 --- a/src/tests/automated_tests/unit/test_unit_tasks.py +++ b/src/tests/automated_tests/unit/test_unit_tasks.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import json @@ -10,6 +16,11 @@ else: TASKS_URL = os.environ.get("F7T_TASKS_URL") +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" + INVALID_CODE1 = "9999" INVALID_CODE2 = "47777" @@ -47,7 +58,7 @@ # helper function to create a task def create_task(headers): url = "{}".format(TASKS_URL) - resp = requests.post(url, headers=headers) + resp = requests.post(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False) ) print(resp.content) print(url) return resp @@ -56,7 +67,7 @@ def create_task(headers): # Test list all tasks def test_list_tasks(headers): url = "{}/".format(TASKS_URL) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(json.dumps(resp.json(),indent=2)) print(url) assert resp.status_code == 200 @@ -75,7 +86,7 @@ def test_get_task(headers): resp = create_task(headers) hash_id = resp.json()["hash_id"] url = "{}/{}".format(TASKS_URL, hash_id) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(json.dumps(resp.json(),indent=2)) assert resp.status_code == 200 @@ -84,7 +95,7 @@ def test_get_task(headers): def test_get_task_not_exists(headers): hash_id = "IDONTEXIST" url = "{}/{}".format(TASKS_URL, hash_id) - resp = requests.get(url, headers=headers) + resp = requests.get(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == 404 @@ -100,7 +111,7 @@ def test_update_task_formdata(headers, status, msg, expected_response_code): url = "{}/{}".format(TASKS_URL, hash_id) #FORM data - resp = requests.put(url, headers=headers, data={'status': status, 'msg': msg}) + resp = requests.put(url, headers=headers, data={'status': status, 'msg': msg}, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == expected_response_code @@ -116,7 +127,7 @@ def test_update_task_jsondata(headers, status, msg, expected_response_code): #JSON data json={"status": status, "msg": msg} - resp = requests.put(url, headers=headers, json=json) + resp = requests.put(url, headers=headers, json=json, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == expected_response_code @@ -126,7 +137,7 @@ def test_delete_task_id_exists(headers): resp = create_task(headers) hash_id = resp.json()["hash_id"] url = "{}/{}".format(TASKS_URL, hash_id) - resp = requests.delete(url, headers=headers) + resp = requests.delete(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 204 @@ -135,7 +146,7 @@ def test_delete_task_id_exists(headers): def test_delete_task_id_not_exists(headers): hash_id = "IDONTEXIST" url = "{}/{}".format(TASKS_URL, hash_id) - resp = requests.delete(url, headers=headers) + resp = requests.delete(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 404 and "error" in resp.json() @@ -145,7 +156,7 @@ def test_expire_task(headers): resp = create_task(headers) hash_id = resp.json()["hash_id"] url = "{}/expire/{}".format(TASKS_URL, hash_id) - resp = requests.post(url, headers=headers) + resp = requests.post(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 200 and "success" in resp.json() @@ -154,14 +165,14 @@ def test_expire_task(headers): def test_expire_task_id_not_exists(headers): hash_id = "IDONTEXIST" url = "{}/expire/{}".format(TASKS_URL, hash_id) - resp = requests.post(url, headers=headers) + resp = requests.post(url, headers=headers, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 404 and "error" in resp.json() @host_environment_test def test_status(): url = "{}/status".format(TASKS_URL) - resp = requests.get(url) + resp = requests.get(url, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 200 @@ -169,7 +180,7 @@ def test_status(): def test_taskslist(): url = "{}/taskslist".format(TASKS_URL) json = {"service": "storage", "status_code":[]} - resp = requests.get(url, json=json) + resp = requests.get(url, json=json, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/unit/test_unit_utilities.py b/src/tests/automated_tests/unit/test_unit_utilities.py index b869143e..6c22025f 100644 --- a/src/tests/automated_tests/unit/test_unit_utilities.py +++ b/src/tests/automated_tests/unit/test_unit_utilities.py @@ -1,3 +1,9 @@ +# +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause +# import pytest import requests import os @@ -14,10 +20,22 @@ SERVER_UTILITIES = os.environ.get("F7T_SYSTEMS_PUBLIC").split(";")[0] +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_PATH = "../../../deploy/test-build" -# test data for rename, chmod,chown, file, download,upload + +# test data for rename, chmod,chown, download,upload DATA = [ (SERVER_UTILITIES, 200) , ("someservernotavailable", 400)] +# test data for file +DATA_FILE = [ (SERVER_UTILITIES, 200, ".bashrc") , + ("someservernotavailable", 400, ".bashrc"), + (SERVER_UTILITIES, 400, "nofile") , + (SERVER_UTILITIES, 400, "/var/log/messages") , + ] + # test data for #mkdir, symlink DATA_201 = [ (SERVER_UTILITIES, 201) , ("someservernotavailable", 400)] @@ -53,7 +71,7 @@ def test_view(machine, targetPath, expected_response_code, headers): headers.update({ "X-Machine-Name": machine }) - resp = requests.get(url=url, headers=headers, params=params) + resp = requests.get(url=url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.json()) print(resp.headers) @@ -67,7 +85,7 @@ def test_checksum(machine, targetPath, expected_response_code, headers): headers.update({ "X-Machine-Name": machine }) - resp = requests.get(url=url, headers=headers, params=params) + resp = requests.get(url=url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.json()) print(resp.headers) @@ -83,19 +101,33 @@ def test_upload(machine, expected_response_code, headers): files = {'file': ('testsbatch.sh', open('testsbatch.sh', 'rb'))} url = "{}/upload".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.post(url, headers=headers, data=data, files=files) + print(machine) + resp = requests.post(url, headers=headers, data=data, files=files, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) + print(resp.headers) assert resp.status_code == expected_response_code +# Test exec file command on remote system +@pytest.mark.parametrize("machine, expected_response_code,file_name", DATA_FILE) +def test_file_type(machine, expected_response_code, file_name, headers): + url = "{}/file".format(UTILITIES_URL) + params = {"targetPath": file_name} + headers.update({"X-Machine-Name": machine}) + resp = requests.get(url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) + print(resp.content) + print(resp.headers) + assert resp.status_code == expected_response_code + # Test exec file command on remote system @pytest.mark.parametrize("machine, expected_response_code", DATA) -def test_file_type(machine, expected_response_code, headers): +def test_file_type_error(machine, expected_response_code, headers): url = "{}/file".format(UTILITIES_URL) params = {"targetPath": ".bashrc"} headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers, params=params) + resp = requests.get(url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) + print(resp.headers) assert resp.status_code == expected_response_code @@ -103,7 +135,7 @@ def test_file_type(machine, expected_response_code, headers): def exec_chmod(machine, headers, data): url = "{}/chmod".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.put(url, headers=headers, data=data) + resp = requests.put(url, headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) return resp @@ -132,7 +164,7 @@ def test_chown(machine, expected_response_code, headers): data = {"targetPath": USER_HOME + "/testsbatch.sh", "owner" : CURRENT_USER , "group": CURRENT_USER} url = "{}/chown".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.put(url, headers=headers, data=data) + resp = requests.put(url, headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -142,7 +174,7 @@ def test_list_directory(machine, targetPath, expected_response_code, headers): params = {"targetPath": targetPath, "showhidden" : "true"} url = "{}/ls".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers, params=params) + resp = requests.get(url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(json.dumps(resp.json(),indent=2)) print(resp.headers) assert resp.status_code == expected_response_code @@ -154,7 +186,7 @@ def test_make_directory(machine, expected_response_code, headers): data = {"targetPath": USER_HOME + "/samplefolder/samplesubfolder", "p" : "true"} url = "{}/mkdir".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.post(url, headers=headers, data=data) + resp = requests.post(url, headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -165,7 +197,7 @@ def test_rename(machine, expected_response_code, headers): data = {"sourcePath": USER_HOME + "/samplefolder/", "targetPath" : USER_HOME + "/sampleFolder/"} url = "{}/rename".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.put(url, headers=headers, data=data) + resp = requests.put(url, headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -177,7 +209,7 @@ def test_copy(machine, expected_response_code, headers): data = {"sourcePath": USER_HOME + "/sampleFolder", "targetPath" : USER_HOME + "/sampleFoldercopy"} url = "{}/copy".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.post(url, headers=headers, data=data) + resp = requests.post(url, headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -188,7 +220,7 @@ def test_symlink(machine, expected_response_code, headers): data = {"targetPath": USER_HOME + "/testsbatch.sh", "linkPath" : USER_HOME + "/sampleFolder/testlink"} url = "{}/symlink".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.post(url, headers=headers, data=data) + resp = requests.post(url, headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) print(machine) assert resp.status_code == expected_response_code @@ -201,7 +233,7 @@ def test_rm(machine, expected_response_code, headers): data = {"targetPath": USER_HOME + "/sampleFolder/"} url = "{}/rm".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.delete(url, headers=headers, data=data) + resp = requests.delete(url, headers=headers, data=data, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) print(resp.content) assert resp.status_code == expected_response_code @@ -212,7 +244,7 @@ def test_download(machine, expected_response_code, headers): params = {"sourcePath": USER_HOME + "/testsbatch.sh"} url = "{}/download".format(UTILITIES_URL) headers.update({"X-Machine-Name": machine}) - resp = requests.get(url, headers=headers, params=params) + resp = requests.get(url, headers=headers, params=params, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) assert resp.status_code == expected_response_code @@ -220,7 +252,9 @@ def test_download(machine, expected_response_code, headers): @host_environment_test def test_status(): url = "{}/status".format(UTILITIES_URL) - resp = requests.get(url) + resp = requests.get(url, verify= (f"{SSL_PATH}{SSL_CRT}" if USE_SSL else False)) + print(resp.content) + print(resp.headers) assert resp.status_code == 200 diff --git a/src/tests/automated_tests/unit/testsbatch.sh b/src/tests/automated_tests/unit/testsbatch.sh index 4647df17..927e3b36 100644 --- a/src/tests/automated_tests/unit/testsbatch.sh +++ b/src/tests/automated_tests/unit/testsbatch.sh @@ -1,4 +1,11 @@ #!/bin/bash +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## + #SBATCH --job-name=testsbatch #SBATCH --ntasks=1 #SBATCH --tasks-per-node=1 diff --git a/src/tests/template_client/Dockerfile b/src/tests/template_client/Dockerfile index da444417..e57b2d54 100644 --- a/src/tests/template_client/Dockerfile +++ b/src/tests/template_client/Dockerfile @@ -1,3 +1,9 @@ +## +## Copyright (c) 2019-2021, ETH Zurich. All rights reserved. +## +## Please, refer to the LICENSE file in the root directory. +## SPDX-License-Identifier: BSD-3-Clause +## FROM python:3.7-alpine RUN pip install flask flask-WTF flask-bootstrap flask-oidc flask_sslify requests diff --git a/src/tests/template_client/config.py b/src/tests/template_client/config.py index 1301dec8..5834a7cd 100644 --- a/src/tests/template_client/config.py +++ b/src/tests/template_client/config.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/src/tests/template_client/firecrest_demo.py b/src/tests/template_client/firecrest_demo.py index b342c16f..5d987398 100644 --- a/src/tests/template_client/firecrest_demo.py +++ b/src/tests/template_client/firecrest_demo.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause diff --git a/src/tests/template_client/templates/api.html b/src/tests/template_client/templates/api.html index 8e3733e6..b9666ce6 100644 --- a/src/tests/template_client/templates/api.html +++ b/src/tests/template_client/templates/api.html @@ -1,3 +1,9 @@ + {% extends "demo_base.html" %} {% block title %}Test API{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/compute.html b/src/tests/template_client/templates/compute.html index 3531e776..3544f945 100644 --- a/src/tests/template_client/templates/compute.html +++ b/src/tests/template_client/templates/compute.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Compute microservice{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/compute/acct.html b/src/tests/template_client/templates/compute/acct.html index 0615e3c4..79c9066b 100644 --- a/src/tests/template_client/templates/compute/acct.html +++ b/src/tests/template_client/templates/compute/acct.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Accounting information{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/compute/canceljob.html b/src/tests/template_client/templates/compute/canceljob.html index 19779feb..ac436e3d 100644 --- a/src/tests/template_client/templates/compute/canceljob.html +++ b/src/tests/template_client/templates/compute/canceljob.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Cancel compute job{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/compute/job.html b/src/tests/template_client/templates/compute/job.html index 671e9201..3a31f988 100644 --- a/src/tests/template_client/templates/compute/job.html +++ b/src/tests/template_client/templates/compute/job.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}List single job{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/compute/jobs.html b/src/tests/template_client/templates/compute/jobs.html index 3bb51d2f..0d4445a4 100644 --- a/src/tests/template_client/templates/compute/jobs.html +++ b/src/tests/template_client/templates/compute/jobs.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}List jobs{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/compute/submitjob.html b/src/tests/template_client/templates/compute/submitjob.html index 41a7741c..3c0f62f2 100644 --- a/src/tests/template_client/templates/compute/submitjob.html +++ b/src/tests/template_client/templates/compute/submitjob.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Submit a compute job{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/demo_base.html b/src/tests/template_client/templates/demo_base.html index a159f107..bbf8e780 100644 --- a/src/tests/template_client/templates/demo_base.html +++ b/src/tests/template_client/templates/demo_base.html @@ -1,9 +1,9 @@ +--> {% extends "bootstrap/base.html" %} {% import "bootstrap/wtf.html" as wtf %} {# {% block head %} diff --git a/src/tests/template_client/templates/index.html b/src/tests/template_client/templates/index.html index 081dbe24..93466fed 100644 --- a/src/tests/template_client/templates/index.html +++ b/src/tests/template_client/templates/index.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}FirecREST Demo{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/status.html b/src/tests/template_client/templates/status.html index 7ffd125a..29ae12d7 100644 --- a/src/tests/template_client/templates/status.html +++ b/src/tests/template_client/templates/status.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Status microservices{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/status/allservices.html b/src/tests/template_client/templates/status/allservices.html index 818fe7e7..32e3b609 100644 --- a/src/tests/template_client/templates/status/allservices.html +++ b/src/tests/template_client/templates/status/allservices.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Status microservice{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/status/allsystems.html b/src/tests/template_client/templates/status/allsystems.html index 4f6eeca2..1693044b 100644 --- a/src/tests/template_client/templates/status/allsystems.html +++ b/src/tests/template_client/templates/status/allsystems.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Status microservice{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/status/parameters.html b/src/tests/template_client/templates/status/parameters.html index f7eaf1cb..dc3a009b 100644 --- a/src/tests/template_client/templates/status/parameters.html +++ b/src/tests/template_client/templates/status/parameters.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Status microservice{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/storage.html b/src/tests/template_client/templates/storage.html index 3ef244e6..d8bccfc8 100644 --- a/src/tests/template_client/templates/storage.html +++ b/src/tests/template_client/templates/storage.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Storage microservice{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/storage/download.html b/src/tests/template_client/templates/storage/download.html index 85e1e201..cb830b90 100644 --- a/src/tests/template_client/templates/storage/download.html +++ b/src/tests/template_client/templates/storage/download.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}External File Download{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/storage/upload.html b/src/tests/template_client/templates/storage/upload.html index 3b51222e..55bad1f7 100644 --- a/src/tests/template_client/templates/storage/upload.html +++ b/src/tests/template_client/templates/storage/upload.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}External File Upload{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/storage/xfer-internal.html b/src/tests/template_client/templates/storage/xfer-internal.html index c3cff4bc..0249e8bf 100644 --- a/src/tests/template_client/templates/storage/xfer-internal.html +++ b/src/tests/template_client/templates/storage/xfer-internal.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Create an Internal Transfer Job{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/tasks.html b/src/tests/template_client/templates/tasks.html index 67452949..d14d2e5f 100644 --- a/src/tests/template_client/templates/tasks.html +++ b/src/tests/template_client/templates/tasks.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Tasks microservice{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/tasks/task.html b/src/tests/template_client/templates/tasks/task.html index 8b910a23..a87ebc4c 100644 --- a/src/tests/template_client/templates/tasks/task.html +++ b/src/tests/template_client/templates/tasks/task.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}FirecREST Tasks{% endblock %} {% block content %} diff --git a/src/tests/template_client/templates/utilities.html b/src/tests/template_client/templates/utilities.html index 8d292bf8..81d36d16 100644 --- a/src/tests/template_client/templates/utilities.html +++ b/src/tests/template_client/templates/utilities.html @@ -1,9 +1,9 @@ +--> {% extends "demo_base.html" %} {% block title %}Utilities microservice{% endblock %} {% block content %} diff --git a/src/tools/pyfirecrest b/src/tools/pyfirecrest new file mode 160000 index 00000000..f98fe3c4 --- /dev/null +++ b/src/tools/pyfirecrest @@ -0,0 +1 @@ +Subproject commit f98fe3c48c5ca8545c18c8d3d57f6850f566bc9e diff --git a/src/utilities/utilities.py b/src/utilities/utilities.py index 4065780a..7ea50c72 100644 --- a/src/utilities/utilities.py +++ b/src/utilities/utilities.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2019-2020, ETH Zurich. All rights reserved. +# Copyright (c) 2019-2021, ETH Zurich. All rights reserved. # # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause @@ -19,7 +19,6 @@ CERTIFICATOR_URL = os.environ.get("F7T_CERTIFICATOR_URL") -STATUS_IP = os.environ.get("F7T_STATUS_IP") UTILITIES_PORT = os.environ.get("F7T_UTILITIES_PORT", 5000) @@ -37,6 +36,11 @@ #max file size for upload/download in MB MAX_FILE_SIZE=int(os.environ.get("F7T_UTILITIES_MAX_FILE_SIZE")) +### SSL parameters +USE_SSL = os.environ.get("F7T_USE_SSL", False) +SSL_CRT = os.environ.get("F7T_SSL_CRT", "") +SSL_KEY = os.environ.get("F7T_SSL_KEY", "") + app = Flask(__name__) # max content lenght for upload in bytes app.config['MAX_CONTENT_LENGTH'] = int(MAX_FILE_SIZE) * 1024 * 1024 @@ -49,7 +53,7 @@ @app.route("/file", methods=["GET"]) @check_auth_header def file_type(): - + auth_header = request.headers[AUTH_HEADER_NAME] try: @@ -108,7 +112,7 @@ def file_type(): def chmod(): auth_header = request.headers[AUTH_HEADER_NAME] - + try: system_name = request.headers["X-Machine-Name"] except KeyError as e: @@ -136,7 +140,7 @@ def chmod(): try: mode = request.form["mode"] if mode == "": - return jsonify(description="Error in chown operation",error="'mode' value is empty"), 400 + return jsonify(description="Error in chmod operation",error="'mode' value is empty"), 400 except BadRequestKeyError: return jsonify(description="Error in chmod operation", error="mode query string missing"), 400 @@ -171,7 +175,7 @@ def chmod(): @app.route("/chown",methods=["PUT"]) @check_auth_header def chown(): - + auth_header = request.headers[AUTH_HEADER_NAME] try: @@ -229,7 +233,7 @@ def chown(): except: return jsonify(description=ret_data["description"]), ret_data["status_code"], ret_data["header"] - + return jsonify(description="Operation completed", out=retval["msg"]), 200 @@ -242,7 +246,7 @@ def chown(): @app.route("/ls",methods=["GET"]) @check_auth_header def list_directory(): - + auth_header = request.headers[AUTH_HEADER_NAME] try: @@ -294,7 +298,7 @@ def list_directory(): jsonify(description=ret_data["description"], error=ret_data["error"]), ret_data["status_code"], ret_data["header"] except: return jsonify(description=ret_data["description"]), ret_data["status_code"], ret_data["header"] - + # file List is retorned as a string separated for a $ character fileList = [] if len(retval["msg"].split("$")) == 1: @@ -380,7 +384,7 @@ def list_directory(): def make_directory(): auth_header = request.headers[AUTH_HEADER_NAME] - + try: system_name = request.headers["X-Machine-Name"] except KeyError as e: @@ -400,7 +404,7 @@ def make_directory(): path = request.form["targetPath"] if path == "": return jsonify(description="Error creating directory",error="'targetPath' value is empty"), 400 - + except BadRequestKeyError: return jsonify(description="Error creating directory", error="'targetPath' query string missing"), 400 @@ -426,7 +430,7 @@ def make_directory(): jsonify(description=ret_data["description"], error=ret_data["error"]), ret_data["status_code"], ret_data["header"] except: return jsonify(description=ret_data["description"]), ret_data["status_code"], ret_data["header"] - + return jsonify(description="Directory created", output=""), 201 ## Returns the content from the specified path on the {machine} filesystem @@ -480,9 +484,9 @@ def view(): except: return jsonify(description=ret_data["description"]), ret_data["status_code"], ret_data["header"] - + file_size = int(retval["msg"]) # in bytes - max_file_size = MAX_FILE_SIZE*(1024*1024) + max_file_size = MAX_FILE_SIZE*(1024*1024) if file_size > max_file_size: @@ -511,11 +515,11 @@ def view(): content = retval["msg"].replace("$","\n") return jsonify(description="File content successfully returned", output=content), 200 - + ## checksum: Print or check SHA256 (256-bit) checksums ## params: -## - targetPath: Filesystem path (Str) *required## +## - targetPath: Filesystem path (Str) *required## ## - machinename: str *required @app.route("/checksum",methods=["GET"]) @@ -523,7 +527,7 @@ def view(): def checksum(): auth_header = request.headers[AUTH_HEADER_NAME] - + try: system_name = request.headers["X-Machine-Name"] except KeyError as e: @@ -543,7 +547,7 @@ def checksum(): path = request.args.get("targetPath") if path == "": return jsonify(description="Error obatining checksum",error="'targetPath' value is empty"), 400 - + except BadRequestKeyError: return jsonify(description="Error obatining checksum", error="'targetPath' query string missing"), 400 @@ -567,7 +571,7 @@ def checksum(): # on success: retval["msg"] = "checksum /path/to/file" output = retval["msg"].split()[0] - + return jsonify(description="Checksum successfully retrieved", output=output), 200 @@ -596,7 +600,7 @@ def copy(): ## common code for file operations: copy, rename (move) def common_operation(request, command, method): - + auth_header = request.headers[AUTH_HEADER_NAME] try: @@ -655,7 +659,7 @@ def common_operation(request, command, method): jsonify(description=ret_data["description"], error=ret_data["error"]), ret_data["status_code"], ret_data["header"] except: return jsonify(description=ret_data["description"]), ret_data["status_code"], ret_data["header"] - + return jsonify(description="Success to " + command + " file or directory.", output=""), success_code @@ -670,7 +674,7 @@ def common_operation(request, command, method): def rm(): auth_header = request.headers[AUTH_HEADER_NAME] - + try: system_name = request.headers["X-Machine-Name"] except KeyError as e: @@ -689,7 +693,7 @@ def rm(): try: path = request.form["targetPath"] if path == "": - return jsonify(description="Error on delete operation",error="'targetPath' value is empty"), 400 + return jsonify(description="Error on delete operation",error="'targetPath' value is empty"), 400 except BadRequestKeyError: return jsonify(description="Error on delete operation",error="'targetPath' query string missing"), 400 @@ -711,7 +715,7 @@ def rm(): jsonify(description=ret_data["description"], error=ret_data["error"]), ret_data["status_code"], ret_data["header"] except: return jsonify(description=ret_data["description"]), ret_data["status_code"], ret_data["header"] - + return jsonify(description="Success to delete file or directory.", output=""), 204 @@ -727,7 +731,7 @@ def rm(): def symlink(): auth_header = request.headers[AUTH_HEADER_NAME] - + try: system_name = request.headers["X-Machine-Name"] except KeyError as e: @@ -773,7 +777,7 @@ def symlink(): jsonify(description=ret_data["description"], error=ret_data["error"]), ret_data["status_code"], ret_data["header"] except: return jsonify(description=ret_data["description"]), ret_data["status_code"], ret_data["header"] - + return jsonify(description="Success create the symlink"), 201 @@ -921,16 +925,9 @@ def upload(): return jsonify(description="File upload successful"), 201 -# get status for status microservice -# only used by STATUS_IP otherwise forbidden @app.route("/status", methods=["GET"]) def status(): app.logger.info("Test status of service") - - if request.remote_addr != STATUS_IP: - app.logger.warning("Invalid remote address: {addr}".format(addr=request.remote_addr)) - return jsonify(error="Invalid access"), 403 - return jsonify(success="ack"), 200 @@ -952,4 +949,7 @@ def status(): # run app # debug = False, so output redirects to log files - app.run(debug=debug, host='0.0.0.0', port=UTILITIES_PORT) + if USE_SSL: + app.run(debug=debug, host='0.0.0.0', port=UTILITIES_PORT, ssl_context=(SSL_CRT, SSL_KEY)) + else: + app.run(debug=debug, host='0.0.0.0', port=UTILITIES_PORT)