diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 21ee2bb..b01a70d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -53,4 +53,4 @@ jobs: version: latest args: release --clean --skip-validate env: - GITHUB_TOKEN: ${{ secrets.OLDSJ_GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }} diff --git a/.goreleaser.yml b/.goreleaser.yml index 3ca81b2..b48c57c 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -45,7 +45,7 @@ signs: release: github: - owner: trailofbits + owner: crytic name: cloudexec brews: diff --git a/README.md b/README.md index c8bff3c..2d72d76 100644 --- a/README.md +++ b/README.md @@ -111,12 +111,6 @@ Confirm `cloudexec` has access to DigitalOcean. cloudexec check ``` -### Initialize your CloudExec environment - -```bash -cloudexec init -``` - ### Launch a new remote job Generate a cloudexec.toml configuration file in the current directory. @@ -195,3 +189,17 @@ cloudexec clean ``` Note that there is often a delay while deleting files from Digital Ocean Spaces buckets. + +## Optional: Create a CloudExec DigitalOcean image + +Building and uploading a dedicated DigitalOcean image for `cloudexec` will simplify your launch configuration and improve startup times. + +To do so, install `packer` with `brew install packer`. If you're using `nix` and `direnv`, it's added to your PATH via the flake's dev shell. + +To build and upload a docker image, run the following command. Make sure your DigitalOcean API key is either in your env vars or replace it with the actual token. + +`packer build -var do_api_token=$DIGITALOCEAN_API_KEY cloudexec.pkr.hcl` + +This will take care of everything and if you visit the [DigitalOcean snapshots page](https://cloud.digitalocean.com/images/snapshots/droplets), you'll see a snapshot called `cloudexec-20230920164605` or similar. `cloudexec` will search for snapshots starts with a `cloudexec-` prefix and it will use the one with the most recent timestamp string. + +Now, you can remove everything from the setup command in the example launch config or replace it to install additional tools. diff --git a/cmd/cloudexec/init.go b/cmd/cloudexec/init.go index d62c12d..fbaf5a3 100644 --- a/cmd/cloudexec/init.go +++ b/cmd/cloudexec/init.go @@ -7,25 +7,74 @@ import ( "github.com/crytic/cloudexec/pkg/s3" ) -func Init(username string, config config.Config) error { - bucketName := fmt.Sprintf("cloudexec-%s-trailofbits", username) - // Create a new bucket (or get an existing one) - err := s3.GetOrCreateBucket(config, username) +func Init(config config.Config, bucket string) error { + // Get a list of existing buckets + listBucketsOutput, err := s3.ListBuckets(config) if err != nil { - return fmt.Errorf("Failed to get bucket for %s: %w", username, err) + return fmt.Errorf("Failed to list buckets: %w", err) } - fmt.Printf("Using bucket: %v\n", bucketName) - // Create the state directory - err = s3.PutObject(config, bucketName, "state/", []byte{}) + // Return if the desired bucket already exists + bucketExists := false + for _, thisBucket := range listBucketsOutput { + if thisBucket == bucket { + bucketExists = true + } + } + + if !bucketExists { + // Create a new bucket + fmt.Printf("Creating new %s bucket...\n", bucket) + err = s3.CreateBucket(config, bucket) + if err != nil { + return err + } + } + + // Ensure versioning is enabled, necessary if bucket creation was interrupted + err = s3.SetVersioning(config, bucket) if err != nil { - return fmt.Errorf("Failed to create state directory in bucket %s: %w", bucketName, err) + return err } - // Create the initial state file - err = s3.PutObject(config, bucketName, "state/state.json", []byte("{}")) + // Initialize bucket state if not already present + err = initState(config, bucket) if err != nil { - return fmt.Errorf("Failed to create state file in bucket %s: %w", bucketName, err) + return fmt.Errorf("Failed to initialize state for bucket %s: %w", bucket, err) + } + + return nil +} + +func initState(config config.Config, bucket string) error { + // Check if the state directory already exists + stateDir := "state/" + stateDirExists, err := s3.ObjectExists(config, bucket, stateDir) + if err != nil { + return fmt.Errorf("Failed to check whether the state directory exists: %w", err) + } + // Create the state directory if it does not already exist + if !stateDirExists { + fmt.Printf("Creating new state directory at %s/%s\n", bucket, stateDir) + err = s3.PutObject(config, bucket, stateDir, []byte{}) + if err != nil { + return fmt.Errorf("Failed to create state directory at %s/%s: %w", bucket, stateDir, err) + } + } + + // Check if the state file already exists + statePath := "state/state.json" + statePathExists, err := s3.ObjectExists(config, bucket, statePath) + if err != nil { + return fmt.Errorf("Failed to check whether the state file exists: %w", err) + } + // Create the initial state file if it does not already exist + if !statePathExists { + fmt.Printf("Creating new state file at %s/%s\n", bucket, statePath) + err = s3.PutObject(config, bucket, statePath, []byte("{}")) + if err != nil { + return fmt.Errorf("Failed to create state file in bucket %s: %w", bucket, err) + } } return nil diff --git a/cmd/cloudexec/launch.go b/cmd/cloudexec/launch.go index 58d01f1..87f85e5 100644 --- a/cmd/cloudexec/launch.go +++ b/cmd/cloudexec/launch.go @@ -11,7 +11,6 @@ import ( "github.com/BurntSushi/toml" "github.com/crytic/cloudexec/pkg/config" do "github.com/crytic/cloudexec/pkg/digitalocean" - "github.com/crytic/cloudexec/pkg/s3" "github.com/crytic/cloudexec/pkg/ssh" "github.com/crytic/cloudexec/pkg/state" ) @@ -87,13 +86,7 @@ func LoadLaunchConfig(launchConfigPath string) (LaunchConfig, error) { func Launch(user *user.User, config config.Config, dropletSize string, dropletRegion string, lc LaunchConfig) error { username := user.Username - bucketName := fmt.Sprintf("cloudexec-%s-trailofbits", username) - - fmt.Printf("Getting or creating new bucket for %s...\n", username) - err := s3.GetOrCreateBucket(config, username) - if err != nil { - return fmt.Errorf("Failed to get bucket for %s: %w", username, err) - } + bucketName := fmt.Sprintf("cloudexec-%s", username) // get existing state from bucket fmt.Printf("Getting existing state from bucket %s...\n", bucketName) diff --git a/cmd/cloudexec/main.go b/cmd/cloudexec/main.go index 94f703c..c382e5d 100644 --- a/cmd/cloudexec/main.go +++ b/cmd/cloudexec/main.go @@ -31,7 +31,8 @@ func main() { os.Exit(1) } userName := user.Username - bucketName := fmt.Sprintf("cloudexec-%s-trailofbits", userName) + // TODO: sanitize username usage in bucketname + bucketName := fmt.Sprintf("cloudexec-%s", userName) // Attempt to load the configuration config, configErr := LoadConfig(configFilePath) @@ -63,23 +64,6 @@ func main() { return nil }, }, - { - Name: "init", - Usage: "Initialize the cloud environment", - Aliases: []string{"i"}, - Action: func(*cli.Context) error { - // Abort on configuration error - if configErr != nil { - return configErr - } - - err = Init(userName, config) - if err != nil { - return err - } - return nil - }, - }, { Name: "configure", Usage: "Configure credentials", @@ -148,6 +132,12 @@ func main() { dropletSize := c.String("size") dropletRegion := c.String("region") + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + fmt.Printf("Launching a %s droplet in the %s region\n", dropletSize, dropletRegion) err = Launch(user, config, dropletSize, dropletRegion, lc) if err != nil { @@ -172,6 +162,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + existingState, err := state.GetState(config, bucketName) if err != nil { return err @@ -206,6 +202,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + instanceToJobs, err := state.GetJobIdsByInstance(config, bucketName) if err != nil { return err @@ -244,6 +246,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + instanceToJobs, err := state.GetJobIdsByInstance(config, bucketName) if err != nil { return err @@ -287,6 +295,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + existingState, err := state.GetState(config, bucketName) if err != nil { return err @@ -329,6 +343,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + existingState, err := state.GetState(config, bucketName) if err != nil { return err @@ -392,6 +412,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + // Retrieve existing state existingState, err := state.GetState(config, bucketName) if err != nil { @@ -414,6 +440,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + jobID := c.Args().First() // Get the job ID from the arguments if jobID == "" { fmt.Println("Please provide a job ID to remove") @@ -449,6 +481,12 @@ func main() { return configErr } + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + // Retrieve existing state existingState, err := state.GetState(config, bucketName) if err != nil { @@ -474,6 +512,13 @@ func main() { if configErr != nil { return configErr } + + // Initialize the s3 state + err = Init(config, bucketName) + if err != nil { + return err + } + // First check if there's a running job existingState, err := state.GetState(config, bucketName) if err != nil { diff --git a/cmd/cloudexec/user_data.go b/cmd/cloudexec/user_data.go index 9839780..c8d058e 100644 --- a/cmd/cloudexec/user_data.go +++ b/cmd/cloudexec/user_data.go @@ -4,6 +4,7 @@ import ( "bytes" _ "embed" "fmt" + "strings" "text/template" "time" @@ -36,13 +37,14 @@ func GenerateUserData(config config.Config, lc LaunchConfig) (string, error) { timeoutStr := fmt.Sprintf("%d", int(timeout.Seconds())) // Set the values for the template + // double quotes are escaped so the command strings can be safely contained by double quotes in bash data := UserData{ SpacesAccessKey: config.DigitalOcean.SpacesAccessKey, SpacesSecretKey: config.DigitalOcean.SpacesSecretKey, SpacesRegion: config.DigitalOcean.SpacesRegion, DigitalOceanToken: config.DigitalOcean.ApiKey, - SetupCommands: lc.Commands.Setup, - RunCommand: lc.Commands.Run, + SetupCommands: strings.ReplaceAll(lc.Commands.Setup, `"`, `\"`), + RunCommand: strings.ReplaceAll(lc.Commands.Run, `"`, `\"`), Timeout: timeoutStr, } diff --git a/cmd/cloudexec/user_data.sh.tmpl b/cmd/cloudexec/user_data.sh.tmpl index 46291ff..c041733 100644 --- a/cmd/cloudexec/user_data.sh.tmpl +++ b/cmd/cloudexec/user_data.sh.tmpl @@ -4,6 +4,9 @@ set -e shopt -s inherit_errexit +######################################## +# Setup env vars and constants + # Import env vars from user data export DIGITALOCEAN_ACCESS_TOKEN={{.DigitalOceanToken}} export AWS_ACCESS_KEY_ID={{.SpacesAccessKey}} @@ -16,6 +19,93 @@ export TIMEOUT="{{.Timeout}}" stdout_log="/tmp/cloudexec-stdout.log" stderr_log="/tmp/cloudexec-stderr.log" +######################################## +# Required setup + +# Wait for unattended-upgr to finish install/upgrading stuff in the background +while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do + echo "Waiting for unattended-upgr to finish..." + sleep 3 +done + +echo "Installing prereqs..." +export DEBIAN_FRONTEND=noninteractive +apt-get update +apt-get install -y jq s3cmd tmux python3-pip python3-venv unzip + +# set hostname +current_hostname="$(hostname)" +if [[ ${current_hostname} != "cloudexec" ]]; then + echo "Setting hostname..." + echo "cloudexec" >/etc/hostname + hostname -F /etc/hostname +fi + +if ! command -v doctl >/dev/null 2>&1; then + echo "Downloading doctl..." + curl -fsSL -o /tmp/doctl-1.92.0-linux-amd64.tar.gz https://github.com/digitalocean/doctl/releases/download/v1.92.0/doctl-1.92.0-linux-amd64.tar.gz + echo "Extracting doctl..." + tar -xzf /tmp/doctl-1.92.0-linux-amd64.tar.gz -C /tmp + echo "Installing doctl..." + mv /tmp/doctl /usr/local/bin + echo "Cleaning up..." + rm /tmp/doctl-1.92.0-linux-amd64.tar.gz +fi + +######################################## +# Confirm required env vars are present + +echo "Confirming this is a CloudExec droplet..." +TAGS=$(curl -s http://169.254.169.254/metadata/v1/tags) +echo "Droplet tags:" +echo "${TAGS}" +export JOB_ID="" +export USERNAME="" +export CLOUDEXEC=false +for tag in ${TAGS}; do + if [[ ${tag} == "Purpose:cloudexec" ]]; then + CLOUDEXEC=true + elif [[ ${tag} == "Owner:"* ]]; then + USERNAME=${tag#"Owner:"} + elif [[ ${tag} == "Job:"* ]]; then + JOB_ID=${tag#"Job:"} + fi +done + +export BUCKET_NAME="cloudexec-${USERNAME}" +echo "Using bucket ${BUCKET_NAME}" + +echo "Setting up DigitalOcean credentials..." +# ensure these are set in the environment +if [[ -z ${DIGITALOCEAN_ACCESS_TOKEN} ]]; then + echo "ERROR: DIGITALOCEAN_ACCESS_TOKEN is not set" + echo "CloudExec will not be able to destroy the droplet" + echo "on exit and you will incur charges." + exit 1 +fi + +echo "Setting up S3 credentials..." +# Spaces uses the AWS S3 API +for var in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_DEFAULT_REGION; do + if [[ -z ${!var} ]]; then + echo "${var} is not set, exiting..." + exit 1 + fi +done + +if [[ ${CLOUDEXEC} == false ]] || [[ ${USERNAME} == "" ]]; then + echo "Not a CloudExec droplet, exiting..." + # exit 1 +fi + +if [[ ${JOB_ID} == "" ]]; then + echo "No job ID, exiting..." + exit 1 +fi + +######################################## +# Define helper functions + fmtDate() { date -d "@$1" "+%Y-%m-%d %H:%M:%S" } @@ -98,49 +188,12 @@ update_state() { # Set the trap to call the cleanup function on signals or exit trap cleanup EXIT SIGHUP SIGINT SIGTERM -echo "Setting up DigitalOcean credentials..." -# ensure these are set in the environment -if [[ -z ${DIGITALOCEAN_ACCESS_TOKEN} ]]; then - echo "ERROR: DIGITALOCEAN_ACCESS_TOKEN is not set" - echo "CloudExec will not be able to destroy the droplet" - echo "on exit and you will incur charges." - exit 1 -fi +######################################## +# Job-specific setup -echo "Confirming this is a CloudExec droplet..." -TAGS=$(curl -s http://169.254.169.254/metadata/v1/tags) -echo "Droplet tags:" -echo "${TAGS}" -export JOB_ID="" -export USERNAME="" -export CLOUDEXEC=false -for tag in ${TAGS}; do - if [[ ${tag} == "Purpose:cloudexec" ]]; then - CLOUDEXEC=true - elif [[ ${tag} == "Owner:"* ]]; then - USERNAME=${tag#"Owner:"} - elif [[ ${tag} == "Job:"* ]]; then - JOB_ID=${tag#"Job:"} - fi -done -if [[ ${CLOUDEXEC} == false ]] || [[ ${USERNAME} == "" ]]; then - echo "Not a CloudExec droplet, exiting..." - # exit 1 -fi -if [[ ${JOB_ID} == "" ]]; then - echo "No job ID, exiting..." - exit 1 -fi -export BUCKET_NAME="cloudexec-${USERNAME}-trailofbits" - -echo "Setting up S3 credentials..." -# Spaces uses the AWS S3 API -for var in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_DEFAULT_REGION; do - if [[ -z ${!var} ]]; then - echo "${var} is not set, exiting..." - exit 1 - fi -done +echo "Running setup..." +mkdir -p ~/output +eval "${SETUP_COMMANDS}" echo "Downloading inputs..." s3cmd get -r "s3://${BUCKET_NAME}/job-${JOB_ID}/input" ~/ @@ -152,16 +205,15 @@ fi source ~/venv/bin/activate -echo "Running setup..." -mkdir -p ~/output -eval "${SETUP_COMMANDS}" - # Update state to running update_state "running" # Create a temporary file to track the completion of the task exit_code_flag="/tmp/cloudexec-exit-code" +######################################## +# Execute Job + # Use Ctrl-C to detach from the tmux session echo "bind-key -n C-c detach" >~/.tmux.conf # Run the tmux command in the background @@ -182,6 +234,9 @@ end_time=$(("${start_time}" + TIMEOUT)) pretty_end_time="$(fmtDate "${end_time}")" echo "Workload is running, timer started at ${pretty_start_time}, we'll time out at ${pretty_end_time}" +######################################## +# Wait for job to finish + # Wait for the temporary file to be created while true; do if [[ -s ${exit_code_flag} ]]; then diff --git a/example/cloudexec.toml b/example/cloudexec.toml index e629c8d..0781404 100644 --- a/example/cloudexec.toml +++ b/example/cloudexec.toml @@ -4,11 +4,48 @@ timeout = "48h" [commands] setup = ''' -sudo apt-get update; sudo apt-get install -y unzip -curl -fsSL https://github.com/crytic/medusa/releases/download/v0.1.0/medusa-linux-x64.zip -o medusa.zip -unzip medusa.zip -chmod +x medusa -sudo mv medusa /usr/local/bin +if ! command -v slither >/dev/null 2>&1; then + echo "Installing solc and slither..." + python3 -m venv ~/venv + source ~/venv/bin/activate + pip3 install solc-select slither-analyzer crytic-compile + solc-select install 0.8.6 + solc-select use 0.8.6 +fi + +if ! command -v echidna >/dev/null 2>&1; then + echo "Downloading echidna..." + curl -fsSL -o /tmp/echidna.zip https://github.com/crytic/echidna/releases/download/v2.2.1/echidna-2.2.1-Linux.zip + echo "Extracting echidna..." + unzip /tmp/echidna.zip -d /tmp + tar -xzf /tmp/echidna.tar.gz -C /tmp + echo "Installing echidna..." + mv /tmp/echidna /usr/local/bin + rm /tmp/echidna.tar.gz +fi + +if ! command -v medusa >/dev/null 2>&1; then + echo "Downloading medusa..." + sudo apt-get update; sudo apt-get install -y unzip + curl -fsSL https://github.com/crytic/medusa/releases/download/v0.1.0/medusa-linux-x64.zip -o medusa.zip + unzip medusa.zip + chmod +x medusa + sudo mv medusa /usr/local/bin +fi + +if ! command -v docker >/dev/null 2>&1; then + echo "Installing docker and its dependencies..." + apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common + docker_key="$(curl -fsSL https://download.docker.com/linux/ubuntu/gpg)" + echo "${docker_key}" | apt-key add - + release="$(lsb_release -cs)" + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${release} stable" + apt-get update -y + apt-get install -y docker-ce docker-ce-cli containerd.io + user="$(whoami)" + usermod -aG docker "${user}" + systemctl enable docker + fi ''' # This command is run after the setup script completes. diff --git a/packer/provision.sh b/packer/provision.sh index 2c24ed0..ba992b4 100644 --- a/packer/provision.sh +++ b/packer/provision.sh @@ -2,6 +2,9 @@ # shellcheck source=/dev/null set -e +######################################## +## Required Configuration and Dependencies + # set hostname echo "Setting hostname..." echo "cloudexec" >/etc/hostname @@ -21,6 +24,16 @@ mv /tmp/doctl /usr/local/bin echo "Cleaning up..." rm /tmp/doctl-1.92.0-linux-amd64.tar.gz +######################################## +## Common fuzz testing and analysis tools + +echo "Installing solc and slither..." +python3 -m venv ~/venv +source ~/venv/bin/activate +pip3 install solc-select slither-analyzer crytic-compile +solc-select install 0.8.6 +solc-select use 0.8.6 + echo "Downloading echidna..." curl -fsSL -o /tmp/echidna.zip https://github.com/crytic/echidna/releases/download/v2.2.1/echidna-2.2.1-Linux.zip echo "Extracting echidna..." @@ -30,12 +43,13 @@ echo "Installing echidna..." mv /tmp/echidna /usr/local/bin rm /tmp/echidna.tar.gz -echo "Installing solc and slither..." -python3 -m venv ~/venv -source ~/venv/bin/activate -pip3 install solc-select slither-analyzer crytic-compile -solc-select install 0.8.6 -solc-select use 0.8.6 +echo "Downloading medusa..." +sudo apt-get update +sudo apt-get install -y unzip +curl -fsSL https://github.com/crytic/medusa/releases/download/v0.1.0/medusa-linux-x64.zip -o medusa.zip +unzip medusa.zip +chmod +x medusa +sudo mv medusa /usr/local/bin echo "Installing docker and its dependencies..." apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common diff --git a/pkg/digitalocean/digitalocean.go b/pkg/digitalocean/digitalocean.go index ee9c227..fa987e0 100644 --- a/pkg/digitalocean/digitalocean.go +++ b/pkg/digitalocean/digitalocean.go @@ -302,7 +302,10 @@ func GetLatestSnapshot(config config.Config) (Snapshot, error) { } if latestSnapshot == nil { - return empty, fmt.Errorf("Failed to find cloudexec snapshot") + return Snapshot{ + ID: "ubuntu-22-04-x64", + Name: "default", + }, nil } return Snapshot{ diff --git a/pkg/s3/s3.go b/pkg/s3/s3.go index 6c7708c..a6137ee 100644 --- a/pkg/s3/s3.go +++ b/pkg/s3/s3.go @@ -33,14 +33,14 @@ var s3Client *s3.S3 // Note: not safe to use concurrently from multiple goroutines (yet) func initializeS3Client(config config.Config, init bool) (*s3.S3, error) { // Immediately return our cached client if available - if s3Client != nil { + if !init && s3Client != nil { return s3Client, nil } // Unpack required config values spacesAccessKey := config.DigitalOcean.SpacesAccessKey spacesSecretKey := config.DigitalOcean.SpacesSecretKey - endpointRegion := config.DigitalOcean.SpacesRegion + endpoint := fmt.Sprintf("https://%s.digitaloceanspaces.com", config.DigitalOcean.SpacesRegion) // Region must be "us-east-1" when creating new Spaces. Otherwise, use the region in your endpoint, such as "nyc3". var spacesRegion string @@ -53,7 +53,7 @@ func initializeS3Client(config config.Config, init bool) (*s3.S3, error) { // Configure the Spaces client spacesConfig := &aws.Config{ Credentials: credentials.NewStaticCredentials(spacesAccessKey, spacesSecretKey, ""), - Endpoint: aws.String(fmt.Sprintf("https://%s.digitaloceanspaces.com", endpointRegion)), + Endpoint: aws.String(endpoint), Region: aws.String(spacesRegion), S3ForcePathStyle: aws.Bool(false), } @@ -64,11 +64,12 @@ func initializeS3Client(config config.Config, init bool) (*s3.S3, error) { return nil, fmt.Errorf("Failed to create S3 client: %w", err) } - // Cache client for subsequent usage iff not the client used for initialization - if !init { - s3Client = s3.New(newSession) + if init { + return s3.New(newSession), nil } + // Cache client for subsequent usage iff not the client used for initialization + s3Client = s3.New(newSession) return s3Client, nil } @@ -95,51 +96,36 @@ func ListBuckets(config config.Config) ([]string, error) { return buckets, nil } -func GetOrCreateBucket(config config.Config, username string) error { - // TODO: sanitize username & centralize bucket name creation - bucket := fmt.Sprintf("cloudexec-%s-trailofbits", username) - - listBucketsOutput, err := ListBuckets(config) +func SetVersioning(config config.Config, bucket string) error { + // create a non-init client + s3Client, err := initializeS3Client(config, false) if err != nil { - return fmt.Errorf("Failed to list buckets: %w", err) + return err } - - // Check if the desired Space already exists - for _, thisBucket := range listBucketsOutput { - if thisBucket == bucket { - // create a non-init client - s3Client, err := initializeS3Client(config, false) - if err != nil { - return err - } - - // ensure versioning is enabled on the bucket - _, err = s3Client.PutBucketVersioning(&s3.PutBucketVersioningInput{ - Bucket: aws.String(bucket), - VersioningConfiguration: &s3.VersioningConfiguration{ - Status: aws.String("Enabled"), - }, - }) - if err != nil { - return fmt.Errorf("Failed to enable versioning on bucket '%s': %w", bucket, err) - } - return nil - } + // ensure versioning is enabled on the bucket + _, err = s3Client.PutBucketVersioning(&s3.PutBucketVersioningInput{ + Bucket: aws.String(bucket), + VersioningConfiguration: &s3.VersioningConfiguration{ + Status: aws.String("Enabled"), + }, + }) + if err != nil { + return fmt.Errorf("Failed to enable versioning on bucket '%s': %w", bucket, err) } + return nil +} +func CreateBucket(config config.Config, bucket string) error { // create an initialization client s3Client, err := initializeS3Client(config, true) if err != nil { return err } - // craft bucket creation request - createBucketInput := &s3.CreateBucketInput{ - Bucket: aws.String(bucket), - } - // execution bucket creation - _, err = s3Client.CreateBucket(createBucketInput) + _, err = s3Client.CreateBucket(&s3.CreateBucketInput{ + Bucket: aws.String(bucket), + }) if err != nil { return fmt.Errorf("Failed to create bucket '%s': %w", bucket, err) } @@ -152,21 +138,11 @@ func GetOrCreateBucket(config config.Config, username string) error { return fmt.Errorf("Failed to wait for bucket '%s': %w", bucket, err) } - // enable versioning on the bucket - _, err = s3Client.PutBucketVersioning(&s3.PutBucketVersioningInput{ - Bucket: aws.String(bucket), - VersioningConfiguration: &s3.VersioningConfiguration{ - Status: aws.String("Enabled"), - }, - }) - if err != nil { - return fmt.Errorf("Failed to enable versioning on bucket '%s': %w", bucket, err) - } - fmt.Printf("Created bucket '%s'...\n", bucket) return nil } +// Note: will overwrite existing object. func PutObject(config config.Config, bucket string, key string, value []byte) error { // create a client s3Client, err := initializeS3Client(config, false) @@ -176,10 +152,9 @@ func PutObject(config config.Config, bucket string, key string, value []byte) er // If zero-length value is given, create a directory instead of a file if len(value) == 0 { - // Create the state directory _, err = s3Client.PutObject(&s3.PutObjectInput{ Bucket: aws.String(bucket), - Key: aws.String("state/"), + Key: aws.String(key), }) if err != nil { return fmt.Errorf("Failed to create %s directory in bucket %s: %w", key, bucket, err) @@ -209,6 +184,17 @@ func PutObject(config config.Config, bucket string, key string, value []byte) er return nil } +func ObjectExists(config config.Config, bucket string, key string) (bool, error) { + // Get a list of objects that are prefixed by the target key + objects, err := ListObjects(config, bucket, key) + if err != nil { + return false, err + } + + // return true if we got a non-zero number of objects that match the prefix + return len(objects) != 0, nil +} + func GetObject(config config.Config, bucket string, key string) ([]byte, error) { // create a client s3Client, err := initializeS3Client(config, false) diff --git a/pkg/state/state.go b/pkg/state/state.go index e17af85..abdba19 100644 --- a/pkg/state/state.go +++ b/pkg/state/state.go @@ -205,10 +205,10 @@ func GetJobIdsByInstance(config config.Config, bucketName string) (map[int64][]i if err != nil { return nil, fmt.Errorf("Failed to get state: %w", err) } + instanceToJobIds := make(map[int64][]int64) if existingState.Jobs == nil { - return nil, fmt.Errorf("No jobs found in the existing state") + return instanceToJobIds, nil } - instanceToJobIds := make(map[int64][]int64) for _, job := range existingState.Jobs { instanceToJobIds[job.InstanceID] = append(instanceToJobIds[job.InstanceID], job.ID) }