From f4f3c8c559e0dac11fc8c1b60f0c7e689896e47b Mon Sep 17 00:00:00 2001 From: Sanjana Reddy Date: Wed, 22 May 2024 20:24:37 +0000 Subject: [PATCH] changed chat followup prompt --- .../solutions/grounding_vertex_agent_builder.ipynb | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb b/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb index 5a192c73..df0e9460 100644 --- a/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb +++ b/notebooks/vertex_genai/solutions/grounding_vertex_agent_builder.ipynb @@ -108,7 +108,6 @@ }, "outputs": [], "source": [ - "import vertexai\n", "from IPython.display import Markdown, display\n", "from vertexai.language_models import (\n", " ChatModel,\n", @@ -134,7 +133,6 @@ "source": [ "parameters = {\n", " \"temperature\": 0.2, # Temperature controls the degree of randomness in token selection.\n", - " \"max_output_tokens\": 256, # Token limit determines the maximum amount of text output.\n", " \"top_p\": 0.8, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value.\n", " \"top_k\": 40, # A top_k of 1 means the selected token is the most probable among all tokens.\n", "}\n", @@ -301,7 +299,7 @@ "outputs": [], "source": [ "PROMPT = \"How can I ground LLM responses in Vertex AI?\"\n", - "PROMPT_FOLLOWUP = \"What types of data can I use for grounding?\"" + "PROMPT_FOLLOWUP = \"Is grounding available in PaLM models?\"" ] }, {