diff --git a/notebooks/official/generative_ai/mistralai_intro.ipynb b/notebooks/official/generative_ai/mistralai_intro.ipynb index 1207cc478..ee45ce050 100644 --- a/notebooks/official/generative_ai/mistralai_intro.ipynb +++ b/notebooks/official/generative_ai/mistralai_intro.ipynb @@ -74,16 +74,19 @@ "\n", "### Available Mistral AI models\n", "\n", + "* ### Codestral (25.01)\n", + "A cutting-edge model specifically designed for code generation, including fill-in-the-middle and code completion.\n", + "\n", "* ### Mistral Large (24.11)\n", "Mistral Large (24.11) is the latest version of the Mistral Large model now with improved reasoning and function calling capabilities.\n", "\n", - "* ### Mistral Large (2407)\n", + "* ### Mistral Large (24.07)\n", "Complex tasks that require large reasoning capabilities or are highly specialized (synthetic text Generation, code generation, RAG, or agents). [Blog Post](https://mistral.ai/news/mistral-large-2407/)\n", "\n", "* ### Mistral Nemo\n", "Reasoning, world knowledge, and coding performance are state-of-the-art in its size category.\n", "\n", - "* ### Codestral\n", + "* ### Codestral (24.05)\n", "Coding specific tasks to enhance developers productivity with code completion and fill-in-the-middle capabilities.\n", "\n", "\n", @@ -160,19 +163,22 @@ }, "outputs": [], "source": [ - "MODEL = \"mistral-large-2411\" # @param [\"mistral-large-2411\", \"mistral-large\", \"mistral-nemo\", \"codestral\"]\n", + "MODEL = \"codestral-2501\" # @param [\"codestral-2501\", \"mistral-large-2411\", \"mistral-large\", \"mistral-nemo\", \"codestral\"]\n", "if MODEL == \"mistral-large-2411\":\n", " available_regions = [\"europe-west4\", \"us-central1\"]\n", " available_versions = [\"latest\"]\n", "elif MODEL == \"mistral-large\":\n", " available_regions = [\"europe-west4\", \"us-central1\"]\n", - " available_versions = [\"latest\", \"2407\"]\n", + " available_versions = [\"2407\"]\n", "elif MODEL == \"mistral-nemo\":\n", " available_regions = [\"europe-west4\", \"us-central1\"]\n", - " available_versions = [\"latest\", \"2407\"]\n", + " available_versions = [\"2407\"]\n", "elif MODEL == \"codestral\":\n", " available_regions = [\"europe-west4\", \"us-central1\"]\n", - " available_versions = [\"latest\", \"2405\"]" + " available_versions = [\"2405\"]\n", + "elif MODEL == \"codestral-2501\":\n", + " available_regions = [\"europe-west4\", \"us-central1\"]\n", + " available_versions = [\"latest\"]" ] }, { @@ -460,8 +466,8 @@ }, "outputs": [], "source": [ - "MODEL = \"codestral\"\n", - "SELECTED_MODEL_VERSION = \"\"\n", + "MODEL = \"codestral-2501\" # use \"codestral\" for Codestral (24.05)\n", + "SELECTED_MODEL_VERSION = \"\" # use \"@2405\" for Codestral (24.05)\n", "\n", "PAYLOAD = {\n", " \"model\": MODEL,\n", @@ -490,7 +496,8 @@ }, "outputs": [], "source": [ - "MODEL = \"codestral\"\n", + "MODEL = \"codestral-2501\" # use \"codestral\" for Codestral (24.05)\n", + "SELECTED_MODEL_VERSION = \"\" # use \"@2405\" for Codestral (24.05)\n", "\n", "# Get the access token\n", "process = subprocess.Popen(\n", @@ -506,7 +513,7 @@ "}\n", "\n", "# Replace with your actual values\n", - "url = f\"{ENDPOINT}/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/mistralai/models/{MODEL}:rawPredict\"\n", + "url = f\"{ENDPOINT}/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/mistralai/models/{MODEL}{SELECTED_MODEL_VERSION}:rawPredict\"\n", "data = {\n", " \"model\": MODEL,\n", " \"prompt\": \"def f(\",\n",