diff --git a/benchmarks/README.md b/benchmarks/README.md
index 05a91b436..77c713818 100644
--- a/benchmarks/README.md
+++ b/benchmarks/README.md
@@ -132,4 +132,4 @@ terraform plan
terraform apply
```
-To further interact with the Locust inference benchmark, view the README.md file in `benchmark/tools/locust-load-inference`
\ No newline at end of file
+To further interact with the Locust inference benchmark, view the README.md file in `benchmark/tools/locust-load-inference`
diff --git a/benchmarks/benchmark/dataset/ShareGPT_v3_unflitered_cleaned_split/README.md b/benchmarks/benchmark/dataset/ShareGPT_v3_unflitered_cleaned_split/README.md
index 47311a0ce..82bcd443d 100644
--- a/benchmarks/benchmark/dataset/ShareGPT_v3_unflitered_cleaned_split/README.md
+++ b/benchmarks/benchmark/dataset/ShareGPT_v3_unflitered_cleaned_split/README.md
@@ -1,10 +1,13 @@
This directory contains the script for uploading a filtered and formatted file of prompts based on the "anon8231489123/ShareGPT_Vicuna_unfiltered" dataset to a given GCS path.
Example usage:
- python3 upload_sharegpt.py --gcs_path="gs://$BUCKET_NAME/ShareGPT_V3_unfiltered_cleaned_split_filtered_prompts.txt"
+```
+python3 upload_sharegpt.py --gcs_path="gs://$BUCKET_NAME/ShareGPT_V3_unfiltered_cleaned_split_filtered_prompts.txt"
+```
pre-work:
-- upload_sharegpt.py assumes that the bucket already exists. If it does not exist, make sure that you create your bucket $BUCKET_NAME in your project prior to running the script. You can do that with the following command:
+- upload_sharegpt.py may require additional python libraries; see below.
+- upload_sharegpt.py assumes that the bucket already exists. If you've created your cluster via the terraform scripts in `./infra/stage-2`, then the bucket was created for you. (See `terraform.tfvars` in that directory for the name.) If it does not exist, make sure that you create your bucket $BUCKET_NAME in your project prior to running the script. You can do that with the following command:
```
gcloud storage buckets create gs://$BUCKET_NAME --location=BUCKET_LOCATION
```
@@ -20,7 +23,7 @@ Assumes in your environment you:
- have access to use google storage APIs via Application Default Credentials (ADC)
You may need to do the following:
-- run "pip install google-cloud-storage" to install storage client library dependencies
+- run "pip install wget google-cloud-storage" to install storage client library dependencies. (Optionally, you can run this within a venv, i.e. `python3 -m venv ./venv && source ./venv/bin/activate && pip install ...`)
- run "gcloud auth application-default login" to enable ADC
-For more information on running the google cloud storage API, see https://cloud.google.com/python/docs/reference/storage
\ No newline at end of file
+For more information on running the google cloud storage API, see https://cloud.google.com/python/docs/reference/storage
diff --git a/benchmarks/benchmark/tools/locust-load-inference/README.md b/benchmarks/benchmark/tools/locust-load-inference/README.md
index 096f11e8e..043125d3e 100644
--- a/benchmarks/benchmark/tools/locust-load-inference/README.md
+++ b/benchmarks/benchmark/tools/locust-load-inference/README.md
@@ -58,7 +58,7 @@ The Locust workload requires storage.admin access to view the dataset in the giv
To give viewer permissions on the gcs bucket to the gcloud service account, run the following:
```
-gcloud storage buckets add-iam-policy-binding gs://$BUCKET/$DATASET_FILENAME
+gcloud storage buckets add-iam-policy-binding gs://$BUCKET
--member=serviceAccount:$GOOGLE_SERVICE_ACCOUNT@$PROJECT_ID.iam.gserviceaccount.com --role=roles/storage.admin
```
@@ -237,4 +237,4 @@ To change the benchmark configuration, you will have to rerun terraform destroy
| [sax\_model](#input\_sax\_model) | Benchmark server configuration for sax model. Only required if framework is sax. | `string` | `""` | no |
| [tokenizer](#input\_tokenizer) | Benchmark server configuration for tokenizer. | `string` | `"tiiuae/falcon-7b"` | yes |
| [use\_beam\_search](#input\_use\_beam\_search) | Benchmark server configuration for use beam search. | `bool` | `false` | no |
-
\ No newline at end of file
+
diff --git a/benchmarks/benchmark/tools/locust-load-inference/sample-terraform.tfvars b/benchmarks/benchmark/tools/locust-load-inference/sample-terraform.tfvars
index a02ce945f..a91b3a67d 100644
--- a/benchmarks/benchmark/tools/locust-load-inference/sample-terraform.tfvars
+++ b/benchmarks/benchmark/tools/locust-load-inference/sample-terraform.tfvars
@@ -12,7 +12,7 @@ artifact_registry = "us-central1-docker.pkg.dev/$PROJECT_
inference_server_service = "tgi" # inference server service name
locust_runner_kubernetes_service_account = "sample-runner-ksa"
output_bucket = "benchmark-output"
-gcs_path = "gs://ai-on-gke-benchmark/ShareGPT_V3_unfiltered_cleaned_split_filtered_prompts.txt"
+gcs_path = "gs://${PROJECT_ID}-ai-gke-benchmark-fuse/ShareGPT_V3_unfiltered_cleaned_split_filtered_prompts.txt"
# Benchmark configuration for Locust Docker accessing inference server
inference_server_framework = "tgi"
@@ -21,4 +21,4 @@ tokenizer = "tiiuae/falcon-7b"
# Benchmark configuration for triggering single test via Locust Runner
test_duration = 60
test_users = 1
-test_rate = 5
\ No newline at end of file
+test_rate = 5
diff --git a/benchmarks/infra/stage-2/sample-terraform.tfvars b/benchmarks/infra/stage-2/sample-terraform.tfvars
index da97413ab..7900e9b9f 100644
--- a/benchmarks/infra/stage-2/sample-terraform.tfvars
+++ b/benchmarks/infra/stage-2/sample-terraform.tfvars
@@ -8,14 +8,14 @@ credentials_config = {
# terraform output -json | jq '."project_id".value'
project_id = "change-me"
-bucket_name = "ai-gke-benchmark-fuse"
+bucket_name = "${PROJECT_ID}-ai-gke-benchmark-fuse"
bucket_location = "US"
-output_bucket_name = "benchmark-output"
+output_bucket_name = "${PROJECT_ID}-benchmark-output"
output_bucket_location = "US"
google_service_account = "benchmark-sa"
kubernetes_service_account = "benchmark-ksa"
benchmark_runner_google_service_account = "sample-runner-sa"
-benchmark_runner_kubernetes_service_account = "sample-runner-ksa"
\ No newline at end of file
+benchmark_runner_kubernetes_service_account = "sample-runner-ksa"