diff --git a/.gitignore b/.gitignore index 57f1cb2..85e7c1d 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -/.idea/ \ No newline at end of file +/.idea/ diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 94a25f7..0000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 7dc91b5..0904aef 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ services: before_install: - sudo apt-get clean && sudo apt-get update -y - - sudo apt-get install -o Dpkg::Options::="--force-confold" --force-yes -y docker-engine + - sudo apt-get install -o Dpkg::Options::="--force-confold" --force-yes -y docker-ce - docker --version - git clone https://github.com/sstephenson/bats.git diff --git a/Dockerfile b/Dockerfile index ef3d2fd..3bcfc89 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.3 +FROM alpine:3.5 MAINTAINER frekele @@ -8,10 +8,10 @@ RUN apk add --update --no-cache \ wget \ git \ python\ - py-pip \ + py2-pip \ docker \ - && curl -sL https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 > /usr/bin/docker \ - && chmod +x /usr/bin/docker \ + #&& curl -sL https://get.docker.com/builds/Linux/x86_64/docker-1.12.6 > /usr/bin/docker \ + #&& chmod +x /usr/bin/docker \ && pip install --upgrade pip \ && pip install awscli @@ -22,12 +22,14 @@ RUN chmod -R +x /scripts ENV STORAGE_PROVIDER='' \ BACKUP_NAME='' \ DATA_PATH='/data/' \ + DATA_PATH_EXCLUDE='' \ GZIP_COMPRESSION='true' \ CLEAN_DATA_BEFORE_RESTORE='false' \ BACKUP_VERSION='' \ CRON_SCHEDULE='' \ AWS_ACCESS_KEY_ID='' \ AWS_SECRET_ACCESS_KEY='' \ + AWS_S3_BUCKET_CREATE='false' \ AWS_S3_BUCKET_NAME='' \ AWS_S3_PATH='/' \ AWS_DEFAULT_REGION='us-east-1' \ @@ -35,4 +37,4 @@ ENV STORAGE_PROVIDER='' \ ENTRYPOINT ["/scripts/run.sh"] -CMD [""] \ No newline at end of file +CMD [""] diff --git a/README.md b/README.md index 3d94d88..37e205f 100644 --- a/README.md +++ b/README.md @@ -2,24 +2,23 @@ #### Solution Open Source in Backup and Restore, for Docker Container in the Cloud Providers! -[![Helicopterizer Image][HelicopterizerImage]][website] +[![Helicopterizer Image][HelicopterizerImage]][website] -[![ImageLayers](https://badge.imagelayers.io/frekele/helicopterizer:latest.svg)](https://imagelayers.io/?images=frekele/helicopterizer:latest) +[![ImageLayers](https://badge.imagelayers.io/frekele/helicopterizer:master.svg)](https://imagelayers.io/?images=frekele/helicopterizer:master) [![Gitter](https://badges.gitter.im/frekele/helicopterizer.svg)](https://gitter.im/frekele/helicopterizer?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Docker Pulls](https://img.shields.io/docker/pulls/frekele/helicopterizer.svg)](https://hub.docker.com/r/frekele/helicopterizer/) [![Docker Stars](https://img.shields.io/docker/stars/frekele/helicopterizer.svg)](https://hub.docker.com/r/frekele/helicopterizer/) [![Release](https://img.shields.io/github/release/frekele/helicopterizer.svg)](https://github.com/frekele/helicopterizer/releases/latest) + [![Circle CI](https://circleci.com/gh/frekele/helicopterizer/tree/master.svg?style=shield)](https://circleci.com/gh/frekele/helicopterizer/tree/master) [![Build Status](https://travis-ci.org/frekele/helicopterizer.svg?branch=master)](https://travis-ci.org/frekele/helicopterizer) [![GitHub issues](https://img.shields.io/github/issues/frekele/helicopterizer.svg)](https://github.com/frekele/helicopterizer/issues) -[![GitHub forks](https://img.shields.io/github/forks/frekele/helicopterizer.svg)](https://github.com/frekele/helicopterizer/network) +[![GitHub forks](https://img.shields.io/github/forks/frekele/helicopterizer.svg)](https://github.com/frekele/helicopterizer/network) [![GitHub stars](https://img.shields.io/github/stars/frekele/helicopterizer.svg)](https://github.com/frekele/helicopterizer/stargazers) [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/frekele/helicopterizer/master/LICENSE) - - ## Backup and Restore for Docker Container #### Solution Open Source in Backup and Restore, for Docker Container in the Cloud Providers! @@ -27,9 +26,30 @@ ### Usage: - ``` - docker run -d [Environment Variables] [-v|--volumes-from] frekele/helicopterizer [backup|restore] [--tarball|--sync] - ``` +``` +docker run -d [Environment Variables] [-v|--volumes-from] frekele/helicopterizer [backup|restore] [--tarball|--sync] +``` + + +### Use Stable Branch for (Production) +``` +docker run -d frekele/helicopterizer:stable +``` + + +### Master Branch for (Development) +``` +docker run -d frekele/helicopterizer:latest +# or +docker run -d frekele/helicopterizer +``` + + +### Specific Tag Version + +``` +docker run -d frekele/helicopterizer:v0.2.1 +``` #### Cloud Storage Provider Supported: @@ -51,10 +71,12 @@ | ------------------------------- | ------------------- | ---------------------- | --------- | --------------- | --------------------------------------------------------------- | | STORAGE_PROVIDER | null | backup, restore | yes | tarball, sync | Provider name (AWS, AZURE, GOOGLE ...) | | DATA_PATH | /data/ | backup, restore | no | tarball, sync | Data path : /data/(your files) | +| DATA_PATH_EXCLUDE | null | backup | no | tarball | Exclude file from data path (comma-separated) | | CRON_SCHEDULE | null | backup, restore | no | tarball, sync | Cron Job Scheduler, Eg: '*/15 * * * *' run every 15 minutes | -| BACKUP_NAME | null | backup, restore | no | tarball | Backup name using: $(BACKUP_NAME)-$(BACKUP_VERSION).tar.gz | +| BACKUP_PREFIX | null | backup, restore | no | tarball | Default name schema: $(BACKUP_PREFIX)$(BACKUP_VERSION).tar.gz | +| BACKUP_NAME | null | backup, restore | no | tarball | If defined the name shcema is: $(BACKUP_NAME).tar.gz | | GZIP_COMPRESSION | true | backup, restore | no | tarball | Boolean to indicate the compression of the file .tar to .tar.gz | -| CLEAN_DATA_BEFORE_RESTORE | false | restore | no | tarball, sync | Clear the data directory before the restore | +| CLEAN_DATA_BEFORE_RESTORE | false | restore | no | tarball, sync | Clear the data directory before the restore | | BACKUP_VERSION | null | restore | yes | tarball | Backup version using: $(BACKUP_VERSION).tar.gz | @@ -65,6 +87,7 @@ | AWS_ACCESS_KEY_ID | null | backup, restore | yes | tarball, sync | AWS access key. Eg: AKRJPMI3QYCARJCRF4VF | | AWS_SECRET_ACCESS_KEY | null | backup, restore | yes | tarball, sync | AWS secret key. Eg: VCsrO7aVulGuiUdXbS31jtQA4iRTVgi4scftJAJr | | AWS_S3_BUCKET_NAME | null | backup, restore | yes | tarball, sync | S3 bucket name. Eg: s3://my-bucket-backup/ | +| AWS_S3_BUCKET_CREATE | false | backup | no | tarball, sync | Boolean to indicate if we create the bucket (if not exists) | | AWS_S3_PATH | / | backup, restore | no | tarball, sync | Relative path for bucket S3. Eg: (AWS_S3_BUCKET_NAME)/jenkins/ | | AWS_DEFAULT_REGION | us-east-1 | backup, restore | no | tarball, sync | Default region bucket. Eg: (sa-east-1) | | AWS_S3_OPTIONS | null | backup, restore | no | tarball, sync | AWS S3 options parameters. See in [AWS CLI S3] | @@ -91,30 +114,35 @@ us-gov-west-1 | US GovCloud West (Oregon) | ### Usage Examples: Run Backup with tarball: + ``` docker run --rm \ -e STORAGE_PROVIDER=AWS \ -e AWS_ACCESS_KEY_ID=XXXXXXXXXXXXX \ -e AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX \ -e AWS_S3_BUCKET_NAME=s3://my-bucket-backup/ \ --v /home/jenkins-data:/data:ro \ +-v /some/dir/jenkins-data:/data:ro \ helicopterizer backup --tarball ``` + Run Backup with sync filesystem: + ``` docker run --rm \ -e STORAGE_PROVIDER=AWS \ -e AWS_ACCESS_KEY_ID=XXXXXXXXXXXXX \ -e AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX \ -e AWS_S3_BUCKET_NAME=s3://my-bucket-backup/ \ --v /home/jenkins-data:/data:ro \ +-v /some/dir/jenkins-data:/data:ro \ helicopterizer backup --sync ``` *Use ':ro' to mount the volumes in read-only mode.* + Run Restore with tarball: + ``` docker run --rm \ -e STORAGE_PROVIDER=AWS \ @@ -122,11 +150,13 @@ docker run --rm \ -e AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX \ -e AWS_S3_BUCKET_NAME=s3://my-bucket-backup/ \ -e BACKUP_VERSION='2016-05-04T01:34:20Z' \ --v /home/jenkins-data:/data:rw \ +-v /some/dir/jenkins-data:/data:rw \ helicopterizer restore --tarball ``` + Run Restore with sync filesystem: + ``` docker run --rm \ -e STORAGE_PROVIDER=AWS \ @@ -134,14 +164,15 @@ docker run --rm \ -e AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX \ -e AWS_S3_BUCKET_NAME=s3://my-bucket-backup/ \ -e BACKUP_VERSION='2016-05-04T01:34:20Z' \ --v /home/jenkins-data:/data:rw \ +-v /some/dir/jenkins-data:/data:rw \ helicopterizer restore --sync ``` *Use ':rw' to mount the volumes in read-write mode.* - - + + Run [Backup|Restore] with environment file: + ``` touch ~/helicopterizer.conf ################################## @@ -163,11 +194,13 @@ AWS_S3_OPTIONS= . docker run --rm \ --env-file ~/helicopterizer.conf \ --v /home/jenkins-data:/data \ +-v /some/dir/jenkins-data:/data \ helicopterizer [backup|restore] [--tarball|--sync] ``` + Run [Backup|Restore] with data volume container: + ``` docker run --rm \ ........ @@ -176,6 +209,7 @@ docker run --rm \ helicopterizer [backup|restore] [--tarball|--sync] ``` + Run [Backup|Restore] with Cron Job Scheduler (System Timezone is UTC): ``` @@ -202,10 +236,13 @@ helicopterizer [backup|restore] [--tarball|--sync] - CRON_SCHEDULE='@midnight' - Run once a Day, the same as: '0 0 * * *' and @daily; - CRON_SCHEDULE='@hourly' - Run once a Hour, the same as: '0 * * * *'; ``` + More info to usage: [Cron Wiki]. + Run [Backup|Restore] with prefix name *$(BACKUP_NAME)-$(BACKUP_VERSION).tar.gz*: + ``` docker run --rm \ ........ @@ -213,7 +250,9 @@ docker run --rm \ helicopterizer [backup|restore] --tarball ``` + Run [Backup|Restore] without gzip compression: + ``` docker run --rm \ ........ @@ -221,9 +260,21 @@ docker run --rm \ helicopterizer [backup|restore] --tarball ``` + +Run [Backup|Restore] with bucket creation (if NoSuchBucket): + +``` +docker run --rm \ +........ +-e AWS_S3_BUCKET_CREATE=true \ +helicopterizer [backup|restore] --tarball +``` + + Run With clean the date before the restore: ***[Be careful here, you will lose all your data inside DATA_PATH directory].*** + ``` docker run --rm \ ........ @@ -231,16 +282,32 @@ docker run --rm \ helicopterizer restore [--tarball|--sync] ``` + Run [Backup|Restore] with other data path: + ``` docker run --rm \ ........ --e DATA_PATH=/other-data-directory/ \ --v /home/jenkins-data:/jenkins-data \ +-e DATA_PATH=/var/jenkins_home/ \ +-v /some/dir/jenkins-data:/var/jenkins_home \ helicopterizer [backup|restore] [--tarball|--sync] ``` + +Run [Backup] with other data path & exclude jenkins workspace: + +``` +docker run --rm \ +........ +-e DATA_PATH=/var/jenkins_home/ \ +-e DATA_PATH_EXCLUDE=workspace \ +-v /some/dir/jenkins-data:/var/jenkins_home \ +helicopterizer [backup|restore] [--tarball|--sync] +``` + + Run [Backup|Restore] with other AWS Region: + ``` docker run --rm \ ........ @@ -248,7 +315,9 @@ docker run --rm \ helicopterizer [backup|restore] [--tarball|--sync] ``` + Run [Backup|Restore] with subdirectories in AWS S3: + ``` docker run --rm \ ........ @@ -256,7 +325,9 @@ docker run --rm \ helicopterizer [backup|restore] [--tarball|--sync] ``` + Run [Backup|Restore] with Options [AWS CLI S3]: + ``` docker run --rm \ ........ @@ -273,24 +344,24 @@ There are two general approaches to handling persistent storage requirements wit See [Managing Data in Containers](https://docs.docker.com/userguide/dockervolumes/) for additional information. #### Data volume container - + *Use a data volume container*. Since data volumes are persistent until no containers use them, a container can created specifically for this purpose. -*Example with Jenkins:* - -``` +*Example with Jenkins:* + +``` docker run -d --name jenkins-data jenkinsci/jenkins:2.0 echo "data-only container for Jenkins" docker run -d -p 8080:8080 -p 50000:50000 --name jenkins --volumes-from jenkins-data jenkinsci/jenkins:2.0 ``` - - + + *Example with Nexus:* - + ``` docker run -d --name nexus-data sonatype/nexus3 echo "data-only container for Nexus" docker run -d -p 8081:8081 --name nexus --volumes-from nexus-data sonatype/nexus3 ``` - + #### Data volume *Mount a host directory as the volume*. This is not portable, as it relies on the directory existing with correct permissions on the host. @@ -299,15 +370,26 @@ However it can be useful in certain situations where this volume needs to be ass *Example with Jenkins:* ``` -mkdir /home/jenkins-data -docker run -d -p 8080:8080 -p 50000:50000 --name jenkins -v /home/jenkins-data:/jenkins-data jenkinsci/jenkins:2.0 +mkdir /some/dir/jenkins-data && chown -R 1000:1000 /some/dir/jenkins-data +docker run -d -p 8080:8080 -p 50000:50000 --name jenkins -v /some/dir/jenkins-data:/var/jenkins_home jenkinsci/jenkins + +# or + +docker volume create --name jenkins-data +docker run -d -p 8080:8080 -p 50000:50000 --name jenkins -v jenkins-data:/var/jenkins_home jenkinsci/jenkins ``` + *Example with Nexus:* ``` -mkdir /home/nexus-data && chown -R 200 /home/nexus-data -docker run -d -p 8081:8081 --name nexus -v /home/nexus-data:/nexus-data sonatype/nexus3 +mkdir /some/dir/nexus-data && chown -R 200 /some/dir/nexus-data +docker run -d -p 8081:8081 --name nexus -v /some/dir/nexus-data:/nexus-data sonatype/nexus3 + +# or + +docker volume create --name nexus-data +docker run -d -p 8081:8081 --name nexus -v nexus-data:/nexus-data sonatype/nexus3 ``` @@ -315,32 +397,32 @@ docker run -d -p 8081:8081 --name nexus -v /home/nexus-data:/nexus-data sonatype ### Building: Build with the usual - + docker build -t helicopterizer . - + Tests are written using [bats](https://github.com/sstephenson/bats) under the `tests` dir - + bats tests - - + + ### License: Helicopterizer is **licensed** under the **[MIT License]**. The terms of the license are as follows: The MIT License (MIT) - + Copyright (c) 2016 Leandro Kersting de Freitas - + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/helicopterizer-demo.conf b/helicopterizer-demo.conf index ec9bca2..f3f892f 100644 --- a/helicopterizer-demo.conf +++ b/helicopterizer-demo.conf @@ -10,6 +10,7 @@ BACKUP_VERSION= CRON_SCHEDULE= AWS_ACCESS_KEY_ID=XXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +AWS_S3_BUCKET_CREATE=false AWS_S3_BUCKET_NAME=s3://my-bucket-backup/ AWS_S3_PATH=/ AWS_DEFAULT_REGION=us-east-1 diff --git a/helicopterizer.iml b/helicopterizer.iml index 80cc739..a3c52db 100644 --- a/helicopterizer.iml +++ b/helicopterizer.iml @@ -3,6 +3,7 @@ + - \ No newline at end of file + diff --git a/scripts/core/helper.sh b/scripts/core/helper.sh index 487fc8d..f5ac151 100644 --- a/scripts/core/helper.sh +++ b/scripts/core/helper.sh @@ -88,11 +88,13 @@ printEnvs(){ echo "BACKUP_NAME=$BACKUP_NAME" echo "BACKUP_VERSION=$BACKUP_VERSION" echo "DATA_PATH=$DATA_PATH" + echo "DATA_PATH_EXCLUDE=$DATA_PATH_EXCLUDE" echo "CRON_SCHEDULE=$CRON_SCHEDULE" echo "GZIP_COMPRESSION=$GZIP_COMPRESSION" echo "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" echo "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" echo "AWS_DEFAULT_REGION=$AWS_DEFAULT_REGION" + echo "AWS_S3_BUCKET_CREATE=$AWS_S3_BUCKET_CREATE" echo "AWS_S3_BUCKET_NAME=$AWS_S3_BUCKET_NAME" echo "AWS_S3_PATH=$AWS_S3_PATH" echo "AWS_S3_OPTIONS=$AWS_S3_OPTIONS" @@ -110,19 +112,21 @@ removeSlashUri(){ mountFileName(){ local backupVersion=$1; local gzipCompress=$2; - local backupName=$3; + local backupPrefix=$3; + local backupName=$4; local sufix="" local fileName="" + if [ "$gzipCompress" = "true" ]; then sufix=".tar.gz" else sufix=".tar" fi - if [ "$backupName" ]; then - fileName="$backupName-$backupVersion$sufix" + if [ -n "$backupName" ]; then + fileName="$backupName$sufix" else - fileName="$backupVersion$sufix" + fileName="$backupPrefix$backupVersion$sufix" fi echo "$fileName" } @@ -188,4 +192,15 @@ mountUriS3(){ echo "$s3Uri" } - +createS3Bucket(){ + # Check if create is required + if [ "$AWS_S3_BUCKET_CREATE" = "true" ]; then + local bucketS3Uri="s3://$AWS_S3_BUCKET_NAME" + # Test if bucket doesn't exists + if aws s3 ls "s3://$AWS_S3_BUCKET_NAME" 2>&1 | grep -q 'NoSuchBucket'; then + # Create bucket + local s3BucketCreationResult=$(aws s3 mb "$bucketS3Uri") + echo "$s3BucketCreationResult" + fi + fi +} diff --git a/scripts/core/tarball/backup.sh b/scripts/core/tarball/backup.sh index 8106e65..7e08578 100644 --- a/scripts/core/tarball/backup.sh +++ b/scripts/core/tarball/backup.sh @@ -20,11 +20,20 @@ echo "Starting Tarball Backup to ${STORAGE_PROVIDER^^} ..." #Create tarball with gzip or not. compressTarball(){ echo "Starting compress: $DATA_PATH/ to /tmp/$1" + # Build tar arguments + TAR_CMD="" + ## Exclude options + for i in $(echo "$DATA_PATH_EXCLUDE," | tr "," "\n"); do + if [ -n "$i" ]; then + TAR_CMD="$TAR_CMD --exclude '$i'" + fi + done if [ "$GZIP_COMPRESSION" = "true" ]; then - tar -zcvf /tmp/$1 -C $DATA_PATH/ . + TAR_CMD="$TAR_CMD -zcvf /tmp/$1 -C $DATA_PATH/ ." else - tar -cvf /tmp/$1 -C $DATA_PATH/ . + TAR_CMD="$TAR_CMD -cvf /tmp/$1 -C $DATA_PATH/ ." fi + eval "tar $TAR_CMD" } @@ -33,7 +42,7 @@ BACKUP_VERSION=$(createBackupVersion) #Call to mount file name. -fileName=$(mountFileName $BACKUP_VERSION $GZIP_COMPRESSION $BACKUP_NAME) +fileName=$(mountFileName "$BACKUP_VERSION" $GZIP_COMPRESSION "$BACKUP_PREFIX" "$BACKUP_NAME") #Call tarball Compress. diff --git a/scripts/core/tarball/restore.sh b/scripts/core/tarball/restore.sh index f596112..708bae8 100644 --- a/scripts/core/tarball/restore.sh +++ b/scripts/core/tarball/restore.sh @@ -29,7 +29,7 @@ extractTarball(){ #Call to mount file name. -fileName=$(mountFileName $BACKUP_VERSION $GZIP_COMPRESSION $BACKUP_NAME) +fileName=$(mountFileName "$BACKUP_VERSION" $GZIP_COMPRESSION "$BACKUP_PREFIX" "$BACKUP_NAME") #Call to mount Restore file name for copy to /tmp. @@ -61,4 +61,3 @@ echo "downloadResult=$downloadResult" echo "timeBegin=$timeBegin" echo "timeEnd=$timeEnd" echo "timeDuration=$timeDuration second(s)" - diff --git a/scripts/provider/aws/sync/upload.sh b/scripts/provider/aws/sync/upload.sh index b7cbd9a..092a344 100644 --- a/scripts/provider/aws/sync/upload.sh +++ b/scripts/provider/aws/sync/upload.sh @@ -12,11 +12,13 @@ SyncUploadToS3(){ echo "$s3Result" } - uploadSync(){ #Call to mount uri S3. s3Uri=$(mountUriS3 "/" $AWS_S3_BUCKET_NAME $AWS_S3_PATH) + local createS3BucketResult=$(createS3Bucket) + echo "$createS3BucketResult" + echo "Starting Upload Sync from: $DATA_PATH/ to $s3Uri/" local s3Result=$(SyncUploadToS3) echo "$s3Result" diff --git a/scripts/provider/aws/tarball/upload.sh b/scripts/provider/aws/tarball/upload.sh index 3613052..ad96c22 100644 --- a/scripts/provider/aws/tarball/upload.sh +++ b/scripts/provider/aws/tarball/upload.sh @@ -15,6 +15,9 @@ uploadTarball(){ #Call to mount uri S3. s3Uri=$(mountUriS3 $1 $AWS_S3_BUCKET_NAME $AWS_S3_PATH) + local createS3BucketResult=$(createS3Bucket) + echo "$createS3BucketResult" + echo "Starting Upload Tarball from: /tmp/$1 to $s3Uri" local s3Result=$(uploadToS3 $1) echo "$s3Result"