From 7679ae5353c648baad95e370516b3f34872ed19e Mon Sep 17 00:00:00 2001 From: Artem Minyaylov Date: Wed, 6 Mar 2024 07:04:51 +0000 Subject: [PATCH] Upgrade ray version; shrink worker resource allocation --- applications/rag/README.md | 56 ++++++++----------- .../rag-kaggle-ray-sql-latest.ipynb | 5 +- .../kuberay-autopilot-values.yaml | 36 ++++++------ 3 files changed, 41 insertions(+), 56 deletions(-) diff --git a/applications/rag/README.md b/applications/rag/README.md index e68d1f327..8c904a76f 100644 --- a/applications/rag/README.md +++ b/applications/rag/README.md @@ -1,6 +1,6 @@ # RAG-on-GKE Application -**NOTE:** This solution is in beta/a work in progress - please expect friction while using it. +**NOTE:** This solution is in beta. Please expect friction while using it. This is a sample to deploy a RAG application on GKE. Retrieval Augmented Generation (RAG) is a popular approach for boosting the accuracy of LLM responses, particularly for domain specific or private data sets. The basic idea is to have a semantically searchable knowledge base (often using vector search), which is used to retrieve relevant snippets for a given prompt to provide additional context to the LLM. Augmenting the knowledge base with additional data is typically cheaper than fine tuning and is more scalable when incorporating current events and other rapidly changing data spaces. @@ -32,7 +32,7 @@ CLUSTER_REGION=us-central1 ``` 2. Use the following instructions to create a GKE cluster. We recommend using Autopilot for a simpler setup. -##### Autopilot +##### Autopilot (recommended) RAG requires the latest Autopilot features, available on GKE cluster version `1.29.1-gke.1575000`+ ``` @@ -46,7 +46,7 @@ gcloud container clusters create-auto ${CLUSTER_NAME:?} \ --cluster-version ${CLUSTER_VERSION:?} ``` -##### Standard (recommended) +##### Standard 1. To create a GKE Standard cluster using Terraform, follow the [instructions here](https://github.com/GoogleCloudPlatform/ai-on-gke/blob/main/infrastructure/README.md). Use the preconfigured node pools in `/infrastructure/platform.tfvars` as this solution requires T4s and L4s. @@ -105,6 +105,7 @@ gcloud container clusters get-credentials ${CLUSTER_NAME:?} --location ${CLUSTER ``` kubectl port-forward -n ${NAMESPACE:?} deployment/mistral-7b-instruct 8080:8080 ``` + * In a new terminal, try a few prompts: ``` export USER_PROMPT="How to deploy a container on K8s?" @@ -119,6 +120,7 @@ curl 127.0.0.1:8080/generate -X POST \ } EOF ``` + * At the end of the smoke test with the TGI server, stop port forwarding by using Ctrl-C on the original terminal. 5. Verify the frontend chat interface is setup: @@ -145,10 +147,10 @@ This step generates the vector embeddings for your input dataset. Currently, the 1. Create a CloudSQL user to access the database: `gcloud sql users create rag-user-notebook --password=${SQL_PASSWORD:?} --instance=pgvector-instance --host=%` 2. Go to the Jupyterhub service endpoint in a browser: - * IAP disable: `kubectl get services proxy-public -n $NAMESPACE --output jsonpath='{.status.loadBalancer.ingress[0].ip}'` - * IAP enabled: Read terraform output `jupyter_uri` or use commend: `kubectl get managedcertificates jupyter-managed-cert -n $NAMESPACE --output jsonpath='{.status.domainStatus[0].domain}'` - * Remeber login GCP to check if user has role `IAP-secured Web App User` - * Waiting for domain status to be `Active` + * IAP disabled: `kubectl get services proxy-public -n $NAMESPACE --output jsonpath='{.status.loadBalancer.ingress[0].ip}'` + * IAP enabled: Read terraform output `jupyter_uri` or use command: `kubectl get managedcertificates jupyter-managed-cert -n $NAMESPACE --output jsonpath='{.status.domainStatus[0].domain}'` + * Open Google Cloud Console IAM to verify that the user has role `IAP-secured Web App User` + * Wait for the domain status to be `Active` 3. Login with placeholder credentials [TBD: replace with instructions for IAP]: * username: user * password: use `terraform output jupyter_password` to fetch the password value @@ -167,40 +169,28 @@ This step generates the vector embeddings for your input dataset. Currently, the * `os.environ['KAGGLE_KEY']` 9. Run all the cells in the notebook. This will generate vector embeddings for the input dataset (`denizbilginn/google-maps-restaurant-reviews`) and store them in the `pgvector-instance` via a Ray job. - * Once submitted, Ray will take several minutes to create the runtime environment and optionally scale up Ray worker nodes. During this time, the job status will remain PENDING. - * When the job status is SUCCEEDED, the vector embeddings have been generated and we are ready to launch the frontend chat interface. + * If the Ray job has FAILED, re-run the cell. + * When the Ray job has SUCCEEDED, we are ready to launch the frontend chat interface. -### Launch the Frontend Chat Interface +### Access the Frontend Chat Interface -#### Accessing the Frontend with IAP Disabled +#### With IAP Disabled 1. Setup port forwarding for the frontend: `kubectl port-forward service/rag-frontend -n $NAMESPACE 8080:8080 &` 2. Go to `localhost:8080` in a browser & start chatting! This will fetch context related to your prompt from the vector embeddings in the `pgvector-instance`, augment the original prompt with the context & query the inference model (`mistral-7b`) with the augmented prompt. -#### Accessing the Frontend with IAP Enabled -1. Verify IAP is Enabled - - * Ensure that IAP is enabled on Google Cloud Platform (GCP) for your application. If you encounter any errors, try re-enabling IAP. - -2. Verify User Role - - * Make sure you have the role `IAP-secured Web App User` assigned to your user account. This role is necessary to access the application through IAP. - -3. Verify Domain is Active - * Make sure the domain is active using commend: - `kubectl get managedcertificates frontend-managed-cert -n rag --output jsonpath='{.status.domainStatus[0].status}'` - -3. Retrieve the Domain - - * Read terraform output `frontend_uri` or use the following command to find the domain created by IAP for accessing your service: - `kubectl get managedcertificates frontend-managed-cert -n $NAMESPACE --output jsonpath='{.status.domainStatus[0].domain}'` - -4. Access the Frontend +#### With IAP Enabled +1. Verify that IAP is enabled on Google Cloud Platform (GCP) for your application. If you encounter any errors, try re-enabling IAP. +2. Verify that you have the role `IAP-secured Web App User` assigned to your user account. This role is necessary to access the application through IAP. +3. Verify the domain is active using command: + `kubectl get managedcertificates frontend-managed-cert -n rag --output jsonpath='{.status.domainStatus[0].status}'` +3. Read terraform output `frontend_uri` or use the following command to find the domain created by IAP for accessing your service: + `kubectl get managedcertificates frontend-managed-cert -n $NAMESPACE --output jsonpath='{.status.domainStatus[0].domain}'` +4. Open your browser and navigate to the domain you retrieved in the previous step to start chatting! - * Open your browser and navigate to the domain you retrieved in the previous step to start chatting! +#### Prompt Examples -#### Prompts Example -3. [TODO: Add some example prompts for the dataset]. +*TODO:* Add some example prompts for the dataset. ### Cleanup diff --git a/applications/rag/example_notebooks/rag-kaggle-ray-sql-latest.ipynb b/applications/rag/example_notebooks/rag-kaggle-ray-sql-latest.ipynb index b0a769af1..3570e7db3 100644 --- a/applications/rag/example_notebooks/rag-kaggle-ray-sql-latest.ipynb +++ b/applications/rag/example_notebooks/rag-kaggle-ray-sql-latest.ipynb @@ -252,7 +252,7 @@ "id": "7ba6c3ff-a25a-4f4d-b58e-68f7fe7d33df", "metadata": {}, "outputs": [], - "source": [ + "source": [ "job_id = client.submit_job(\n", " entrypoint=\"python test.py\",\n", " # Path to the local directory that contains the entrypoint file.\n", @@ -278,10 +278,9 @@ " status = client.get_job_status(job_id)\n", " if status != prev_status:\n", " print(\"Job status:\", status)\n", + " print(\"Job info:\", client.get_job_info(job_id).message)\n", " prev_status = status\n", " if status.is_terminal():\n", - " if status == 'FAILED':\n", - " print(\"Job info:\", client.get_job_info(job_id))\n", " break\n", " time.sleep(5)\n" ] diff --git a/modules/kuberay-cluster/kuberay-autopilot-values.yaml b/modules/kuberay-cluster/kuberay-autopilot-values.yaml index 11a20a90e..1f2e5e83c 100644 --- a/modules/kuberay-cluster/kuberay-autopilot-values.yaml +++ b/modules/kuberay-cluster/kuberay-autopilot-values.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ image: # Replace this with your own image if needed. repository: rayproject/ray - tag: 2.6.1-py310-gpu + tag: 2.9.3-py310-gpu pullPolicy: IfNotPresent nameOverride: "kuberay" @@ -64,8 +64,6 @@ head: # containerEnv specifies environment variables for the Ray container, # Follows standard K8s container env schema. containerEnv: - # - name: EXAMPLE_ENV - # value: "1" - name: RAY_memory_monitor_refresh_ms value: "0" - name: RAY_GRAFANA_IFRAME_HOST @@ -90,18 +88,18 @@ head: # for further guidance. resources: limits: - cpu: "8" + cpu: "1" # To avoid out-of-memory issues, never allocate less than 2G memory for the Ray head. - memory: "20G" + memory: "8G" ephemeral-storage: 20Gi requests: - cpu: "8" - memory: "20G" + cpu: "1" + memory: "8G" ephemeral-storage: 20Gi annotations: gke-gcsfuse/volumes: "true" - gke-gcsfuse/cpu-limit: "2" - gke-gcsfuse/memory-limit: 20Gi + gke-gcsfuse/cpu-limit: "1" + gke-gcsfuse/memory-limit: 4Gi gke-gcsfuse/ephemeral-storage-limit: 20Gi nodeSelector: cloud.google.com/compute-class: "Performance" @@ -158,8 +156,6 @@ worker: disabled: true # The map's key is used as the groupName. -# For example, key:small-group in the map below -# will be used as the groupName additionalWorkerGroups: cpuGroup: # Disabled by default @@ -194,16 +190,16 @@ additionalWorkerGroups: resources: limits: cpu: 4 - memory: "20G" + memory: "16G" ephemeral-storage: 20Gi requests: cpu: 4 - memory: "20G" + memory: "16G" ephemeral-storage: 20Gi annotations: gke-gcsfuse/volumes: "true" gke-gcsfuse/cpu-limit: "2" - gke-gcsfuse/memory-limit: 20Gi + gke-gcsfuse/memory-limit: 8Gi gke-gcsfuse/ephemeral-storage-limit: 20Gi nodeSelector: cloud.google.com/compute-class: "Performance" @@ -287,19 +283,19 @@ additionalWorkerGroups: # for further guidance. resources: limits: - cpu: "8" + cpu: "4" nvidia.com/gpu: "2" - memory: "40G" + memory: "16G" ephemeral-storage: 20Gi requests: - cpu: "8" + cpu: "4" nvidia.com/gpu: "2" - memory: "40G" + memory: "16G" ephemeral-storage: 20Gi annotations: gke-gcsfuse/volumes: "true" gke-gcsfuse/cpu-limit: "2" - gke-gcsfuse/memory-limit: 20Gi + gke-gcsfuse/memory-limit: 8Gi gke-gcsfuse/ephemeral-storage-limit: 20Gi nodeSelector: cloud.google.com/compute-class: "Accelerator"