diff --git a/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb b/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb index 1646f461..5a192c73 100644 --- a/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb +++ b/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb @@ -133,7 +133,7 @@ "outputs": [], "source": [ "parameters = {\n", - " \"temperature\": 0.5, # Temperature controls the degree of randomness in token selection.\n", + " \"temperature\": 0.2, # Temperature controls the degree of randomness in token selection.\n", " \"max_output_tokens\": 256, # Token limit determines the maximum amount of text output.\n", " \"top_p\": 0.8, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value.\n", " \"top_k\": 40, # A top_k of 1 means the selected token is the most probable among all tokens.\n", @@ -300,8 +300,8 @@ }, "outputs": [], "source": [ - "PROMPT = \"What are managed datasets in Vertex AI?\"\n", - "PROMPT_FOLLOWUP = \"What types of data can I use\"" + "PROMPT = \"How can I ground LLM responses in Vertex AI?\"\n", + "PROMPT_FOLLOWUP = \"What types of data can I use for grounding?\"" ] }, { @@ -323,9 +323,11 @@ "source": [ "chat = chat_model.start_chat()\n", "\n", + "print(f\"PROMPT: {PROMPT}\")\n", "response = chat.send_message(PROMPT)\n", "print(response.text)\n", "\n", + "print(f\"PROMPT: {PROMPT_FOLLOWUP}\")\n", "response = chat.send_message(PROMPT_FOLLOWUP)\n", "print(response.text)" ] @@ -354,6 +356,7 @@ " data_store_id=DATA_STORE_ID, location=DATA_STORE_REGION\n", ")\n", "\n", + "print(f\"PROMPT: {PROMPT}\")\n", "response = chat.send_message(\n", " PROMPT,\n", " grounding_source=grounding_source,\n", @@ -361,6 +364,7 @@ "print(response.text)\n", "print(response.grounding_metadata)\n", "\n", + "print(f\"PROMPT: {PROMPT_FOLLOWUP}\")\n", "response = chat.send_message(\n", " PROMPT_FOLLOWUP,\n", " grounding_source=grounding_source,\n",