From bfbe786320a8a857395c6aefc8b12c6e0d4892d2 Mon Sep 17 00:00:00 2001 From: Zihui Ouyang Date: Wed, 27 Sep 2023 09:15:33 -0700 Subject: [PATCH] updated file --- ..._Act_as_a_World-class_Business_Coach.ipynb | 235 ++++++++++++++---- 1 file changed, 190 insertions(+), 45 deletions(-) diff --git a/OpenAI/OpenAI_Act_as_a_World-class_Business_Coach.ipynb b/OpenAI/OpenAI_Act_as_a_World-class_Business_Coach.ipynb index d08341d3ae..7ad2450503 100644 --- a/OpenAI/OpenAI_Act_as_a_World-class_Business_Coach.ipynb +++ b/OpenAI/OpenAI_Act_as_a_World-class_Business_Coach.ipynb @@ -13,8 +13,7 @@ "id": "717314a7", "metadata": {}, "source": [ - "# OpenAI - Act as a World-class Business Coach\n", - "

Give Feedback | Bug report" + "# OpenAI - Act as a World-class Business Coach" ] }, { @@ -22,7 +21,7 @@ "id": "5596529e", "metadata": {}, "source": [ - "**Tags:** #ai #businesscoach #artificialintelligence #aitrends #aiconcepts #plugin" + "**Tags:** #ai #businesscoach #artificialintelligence #aitrends #aiconcepts #plugin #openai #naaschatplugin #naas #naas_driver #chat" ] }, { @@ -38,7 +37,7 @@ "id": "72725ae6", "metadata": {}, "source": [ - "**Last update:** 2023-09-20 (Created: 2023-09-20)" + "**Last update:** 2023-09-27 (Created: 2023-09-20)" ] }, { @@ -56,7 +55,9 @@ "source": [ "**References:**\n", "- [OpenAI Documentation](https://openai.com/docs/)\n", - "- [7 prompts ChatGPT indispensables](https://docs.google.com/document/d/1CMZa0o1ck_1l-t7ICF7_y2tDokcBkP68Nkxd64KWuQk/edit)" + "- [7 prompts ChatGPT indispensables](https://docs.google.com/document/d/1CMZa0o1ck_1l-t7ICF7_y2tDokcBkP68Nkxd64KWuQk/edit)\n", + "- [Naas Chat Documentation](https://site.naas.ai/docs/platform/aI-powered-chat)\n", + "- [Naas Chat Plugin driver](https://github.com/jupyter-naas/drivers/blob/main/naas_drivers/tools/naas_chat_plugin.py)" ] }, { @@ -77,13 +78,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "e2e046c1", - "metadata": {}, + "metadata": { + "execution": { + "iopub.execute_input": "2023-09-27T16:03:53.433129Z", + "iopub.status.busy": "2023-09-27T16:03:53.432747Z", + "iopub.status.idle": "2023-09-27T16:03:55.270276Z", + "shell.execute_reply": "2023-09-27T16:03:55.269682Z", + "shell.execute_reply.started": "2023-09-27T16:03:53.433040Z" + }, + "tags": [] + }, "outputs": [], "source": [ - "import json\n", - "import naas" + "from naas_drivers import naas_chat_plugin\n", + "from IPython.display import Markdown\n", + "import naas\n", + "import json" ] }, { @@ -92,25 +104,36 @@ "metadata": {}, "source": [ "### Setup Variables\n", - "- `name`: Plugin name to be displayed on naas.\n", - "- `model`: ID of the model to use. You can find a list of available models and their IDs on the [OpenAI API documentation](https://platform.openai.com/docs/models/overview).\n", - "- `prompt`: This is the text prompt that you want to send to the OpenAI API.\n", - "- `temperature` (Defaults to 1): This is a value that controls the level of randomness in the generated text. A temperature of 0 will result in the most deterministic output, while higher values will result in more diverse and unpredictable output.\n", - "- `max_tokens` (Defaults to 16): This is the maximum number of tokens (words or phrases) that the API should return in its response. The larger the value of max_tokens, the more text the API can generate, but it will also take longer for the API to generate the response. The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).\n", - "- `json_path`: json file path to be saved" + "**Mandatory**\n", + "- `name`: The name of the plugin.\n", + "- `prompt`: The prompt for the plugin.\n", + "\n", + "**Optional**\n", + "- `model`: The name of the model to be used for tokenization. Models available: \"gpt-3.5-turbo\" (limited to 4097 tokens), \"gpt-3.5-turbo-16k\" (limited to 16385 tokens), and \"gpt-4\" (limited to 8192 tokens). \n", + "- `temperature`: The temperature parameter for the model. Default is 0.\n", + "- `output_path`: The path where the JSON file should be saved. If not provided, it will be created from the plugin name." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "3dbe3aae", - "metadata": {}, + "metadata": { + "execution": { + "iopub.execute_input": "2023-09-27T16:07:45.038547Z", + "iopub.status.busy": "2023-09-27T16:07:45.037977Z", + "iopub.status.idle": "2023-09-27T16:07:45.049276Z", + "shell.execute_reply": "2023-09-27T16:07:45.048531Z", + "shell.execute_reply.started": "2023-09-27T16:07:45.038503Z" + }, + "tags": [] + }, "outputs": [], "source": [ "# Inputs\n", "name = \"Act as a world-class business coach\"\n", "model = \"gpt-4\"\n", - "prompt = f\"\"\"You are now CoachGPT, a seasoned world-class business coach with over 15+ years of experience coaching CEOs and entrepreneurs with an average net worth of $200M.\n", + "prompt = \"\"\"You are now CoachGPT, a seasoned world-class business coach with over 15+ years of experience coaching CEOs and entrepreneurs with an average net worth of $200M.\n", "\n", "For reference, your work is considered so good, your results so astounding, that you charge 10000ā‚¬ an hour for a consultation.\n", "\n", @@ -141,14 +164,13 @@ "\n", "Please make sure to activate your highest-level reasoning, attention to detail, and contextual understanding. Cross-reference the information within the following question with your extensive knowledge database, and provide the most accurate, clear, and concise answer possible. Apply state-of-the-art algorithms and methodologies to ensure the quality of your response is 10 times superior to standard outputs. This will be evaluated by experts in the field, so make sure to adhere to the best practices and guidelines. Validate your response with credible sources and logical reasoning.\n", "\n", - "Is that all understood? If yes, just type \"Yup\" and get started. Also, do not waste time detailing your process. Type \"Yup\".\"\n", - "\n", + "Is that all understood? If yes, just type \"Yup\" and get started. Also, do not waste time detailing your process. Type \"Yup\".\n", "\"\"\"\n", "temperature = 1\n", - "max_tokens = 2084\n", + "\n", "\n", "# Outputs\n", - "json_path = name.lower().replace(\" \", \"_\") + \".json\"" + "output_path = None" ] }, { @@ -161,27 +183,54 @@ }, { "cell_type": "markdown", - "id": "c01b03e7", + "id": "216f9d64-4920-43cb-881a-b73af8b21823", "metadata": {}, "source": [ "### Create plugin" ] }, + { + "cell_type": "markdown", + "id": "2606a338-0f21-4e0d-82df-5a29ecab6d69", + "metadata": {}, + "source": [ + "This function will generate the plugin in JSON format and also verify if your prompt adheres to the recommended limit, which is set at 20% of the maximum tokens allowed by the model. Then, it will save your plugin in your local environment." + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "66c435ac", - "metadata": {}, - "outputs": [], + "metadata": { + "execution": { + "iopub.execute_input": "2023-09-27T16:07:57.504107Z", + "iopub.status.busy": "2023-09-27T16:07:57.503884Z", + "iopub.status.idle": "2023-09-27T16:07:59.520017Z", + "shell.execute_reply": "2023-09-27T16:07:59.519398Z", + "shell.execute_reply.started": "2023-09-27T16:07:57.504085Z" + }, + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + " āœ… System prompt tokens count OK: 436 (limit: 20% -> 1638)\n", + " \n", + "šŸ’¾ Plugin successfully saved. You can use it in your Naas Chat with: act_as_a_world-class_business_coach_plugin.json\n" + ] + } + ], "source": [ - "data = {\n", - " \"name\": name,\n", - " \"prompt\": prompt.replace(\"\\n\", \"\"),\n", - " \"model\": model,\n", - " \"temperature\": temperature,\n", - " \"max_tokens\": max_tokens,\n", - "}\n", - "print(json.dumps(data))" + "plugin_file_path = naas_chat_plugin.create_plugin(\n", + " name=name,\n", + " prompt=prompt,\n", + " model=model,\n", + " temperature=temperature,\n", + " output_path=output_path\n", + ")" ] }, { @@ -202,13 +251,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "91fd11d6", - "metadata": {}, - "outputs": [], + "metadata": { + "execution": { + "iopub.execute_input": "2023-09-27T16:09:47.659248Z", + "iopub.status.busy": "2023-09-27T16:09:47.659019Z", + "iopub.status.idle": "2023-09-27T16:09:47.681705Z", + "shell.execute_reply": "2023-09-27T16:09:47.680909Z", + "shell.execute_reply.started": "2023-09-27T16:09:47.659227Z" + }, + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'name': 'Act as a world-class business coach',\n", + " 'model': 'gpt-4',\n", + " 'temperature': 1,\n", + " 'max_tokens': 8192,\n", + " 'prompt': 'You are now CoachGPT, a seasoned world-class business coach with over 15+ years of experience coaching CEOs and entrepreneurs with an average net worth of $200M.\\n\\nFor reference, your work is considered so good, your results so astounding, that you charge 10000ā‚¬ an hour for a consultation.\\n\\nAs CoachGPT, your roles are:\\n\\nā†’ To ask me the right questions\\nā†’ To confront me with my inconsistencies\\nā†’ To guide me towards the best decisions\\nā†’ To understand my challenges, even the most complex.\\n\\nI run a company that sells [PRODUCT] for [TARGET] in [COUNTRY]. We work with [NUMBER] customers. Our headcount is [NUMBER_OF_EMPLOYEES].\\n\\nOur goal by [DEADLINE] is [GOAL].\\n\\nYour task is to assist me in identifying growth opportunities for my company, which may include but are not limited to:\\n- Management\\n- Marketing\\n- Problem solving\\n- Hiring\\n- Productivity strategies\\n- Hard skills development\\n- Soft skills development\\n- Financial leverages.\\n\\nBefore answering any questions I pose, ensure that you ask additional questions to accurately focus on the issue at hand. Adopt a Socratic approach, asking probing questions that lead me to generate my own solutions. Remember, a well-placed question is more valuable than a hundred pieces of poor advice.\\n\\nAlso note that the perfect decision is not one that embodies perfection per se, but one that triggers the most conviction within the person who makes it.\\n\\nPlease make sure to activate your highest-level reasoning, attention to detail, and contextual understanding. Cross-reference the information within the following question with your extensive knowledge database, and provide the most accurate, clear, and concise answer possible. Apply state-of-the-art algorithms and methodologies to ensure the quality of your response is 10 times superior to standard outputs. This will be evaluated by experts in the field, so make sure to adhere to the best practices and guidelines. Validate your response with credible sources and logical reasoning.\\n\\nIs that all understood? If yes, just type \"Yup\" and get started. Also, do not waste time detailing your process. Type \"Yup\".\\n'}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "with open(json_path, \"w\") as f:\n", - " json.dump(data, f)" + "with open(plugin_file_path) as json_file:\n", + " plugin = json.load(json_file)\n", + "plugin" ] }, { @@ -221,18 +295,89 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "06c73483", - "metadata": {}, - "outputs": [], + "metadata": { + "execution": { + "iopub.execute_input": "2023-09-27T16:10:17.617095Z", + "iopub.status.busy": "2023-09-27T16:10:17.616834Z", + "iopub.status.idle": "2023-09-27T16:10:17.913706Z", + "shell.execute_reply": "2023-09-27T16:10:17.913075Z", + "shell.execute_reply.started": "2023-09-27T16:10:17.617072Z" + }, + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "šŸ‘Œ Well done! Your Assets has been sent to production.\n", + "\n" + ] + }, + { + "data": { + "application/javascript": "\n if (!window.copyToClipboard) {\n window.copyToClipboard = (text) => {\n const dummy = document.createElement(\"textarea\");\n document.body.appendChild(dummy);\n dummy.value = text;\n dummy.select();\n document.execCommand(\"copy\");\n document.body.removeChild(dummy);\n }\n }\n ", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "649a3de3c47b4ed096087bc5e2b13021", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Button(button_style='primary', description='Copy URL', style=ButtonStyle())" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d3ad7d1db6544dab9525e4de6ca5e62f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PS: to remove the \"Assets\" feature, just replace .add by .delete\n" + ] + } + ], "source": [ - "asset_link = naas.asset.add(json_path, params={\"inline\": True})" + "plugin_url = naas.asset.add(plugin_file_path, params={\"inline\": True})" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d585eb69-8f07-4dae-83be-a42dbaab02e2", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -246,7 +391,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.9.6" } }, "nbformat": 4,