diff --git a/bioimageio_chatbot/chatbot.py b/bioimageio_chatbot/chatbot.py
index c854c99..cfd865c 100644
--- a/bioimageio_chatbot/chatbot.py
+++ b/bioimageio_chatbot/chatbot.py
@@ -6,7 +6,7 @@
import secrets
import aiofiles
from functools import partial
-from imjoy_rpc.hypha import login, connect_to_server
+from hypha_rpc import login, connect_to_server
from pydantic import BaseModel, Field
from schema_agents import Role, Message
@@ -487,9 +487,7 @@ async def ping(context=None):
}
)
- server_info = await server.get_connection_info()
-
- await serve_actions(server, server_info.public_base_url, builtin_extensions)
+ await serve_actions(server, server.config.public_base_url, builtin_extensions)
server_url = server.config["public_base_url"]
service_id = hypha_service_info["id"]
diff --git a/bioimageio_chatbot/gpts_action.py b/bioimageio_chatbot/gpts_action.py
index a5b7503..7b20f9a 100644
--- a/bioimageio_chatbot/gpts_action.py
+++ b/bioimageio_chatbot/gpts_action.py
@@ -2,7 +2,7 @@
from openai import AsyncOpenAI
from bioimageio_chatbot.chatbot_extensions import extension_to_tools
from schema_agents.utils.schema_conversion import get_service_openapi_schema
-from imjoy_rpc.hypha import login, connect_to_server
+from hypha_rpc import login, connect_to_server
client = AsyncOpenAI()
diff --git a/bioimageio_chatbot/static/index.html b/bioimageio_chatbot/static/index.html
index bfa9dd6..b35d4a6 100644
--- a/bioimageio_chatbot/static/index.html
+++ b/bioimageio_chatbot/static/index.html
@@ -528,7 +528,7 @@
Welcome to BioImage.IO Chatbot
-
+
@@ -741,7 +741,7 @@ Welcome to BioImage.IO Chatbot
}
// this function is used to remove the __rpc_object__ attribute from the object
- // this is important to make sure the function is correctly encoded and send to imjoy_rpc
+ // this is important to make sure the function is correctly encoded and send to hypha_rpc
function _removeRpcObj(obj) {
if (obj && obj.__rpc_object__) {
delete obj.__rpc_object__
@@ -1042,7 +1042,7 @@ Welcome to BioImage.IO Chatbot
Assist users in bioimage analysis within a Pyodide environment.
You will generate and run script in the code interpreter to help users analyze their data.
The code interperter is a Jupyter notebook-like environment, it support top-level await operations and the asyncio event loop is already running, so you can call "await func()" directly without wrapping in a async function or using asyncio.run.
-The environment has access to remote servers, so you can fetch remote data by using python modules "requests" or "imjoy_rpc.hypha" for connect to the Hypha/BioEngine server.
+The environment has access to remote servers, so you can fetch remote data by using python modules "requests" or "hypha_rpc" for connect to the Hypha/BioEngine server.
User data will be mounted to the \`/mnt\` directory. After mounting, use "os.listdir('/mnt')" to explore available files and ask user what they want to do with the data before other actions.
The code interpreter can produce outputs such as stdout or stderr, matplotlib plots, and image/audio displays which is rendered in the user interface. For key results (e.g. result images), display them in the final response to the user.
Global variables, functions and results will be maintained across multiple code interpreter executions, so try to save the intermediate results into global variables that can be subsequently reused.
@@ -1401,7 +1401,7 @@ Welcome to BioImage.IO Chatbot
"server_url": serverUrl,
"token": token,
})
- const svc = await server.getService(service_id || "public/workspace-manager:bioimageio-chatbot")
+ const svc = await server.getService(service_id || "public/bioimageio-chatbot")
await svc.ping()
if(svc.version) $('#chatbot-version-badge').text(`bioimageio-chatbot ${svc.version}`);
const storeSvc = await server.registerService({
diff --git a/docs/bioimage-chatbot-extension-tutorial.ipynb b/docs/bioimage-chatbot-extension-tutorial.ipynb
index d797bee..e8d0d36 100644
--- a/docs/bioimage-chatbot-extension-tutorial.ipynb
+++ b/docs/bioimage-chatbot-extension-tutorial.ipynb
@@ -1,512 +1,512 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## BioImage.IO Chatbot Extensions"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Documentation: https://github.com/bioimage-io/bioimageio-chatbot/blob/main/docs/development.md"
- ]
- },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## BioImage.IO Chatbot Extensions"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Documentation: https://github.com/bioimage-io/bioimageio-chatbot/blob/main/docs/development.md"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Using the chatbot in an ImJoy plugin\n",
+ "\n",
+ "Below you will find an example of how to use the chatbot in an ImJoy plugin. Using `api.createWindow`, we can display the chatbot inline as an imjoy plugin and interact with its api."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {
+ "trusted": true
+ },
+ "outputs": [
{
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Using the chatbot in an ImJoy plugin\n",
- "\n",
- "Below you will find an example of how to use the chatbot in an ImJoy plugin. Using `api.createWindow`, we can display the chatbot inline as an imjoy plugin and interact with its api."
+ "data": {
+ "application/javascript": "window.connectPlugin && window.connectPlugin(\"b78046eb-f9bd-432c-a1ea-5776c21ba3be\")",
+ "text/plain": [
+ ""
]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
- "cell_type": "code",
- "execution_count": 25,
- "metadata": {
- "trusted": true
- },
- "outputs": [
- {
- "data": {
- "application/javascript": "window.connectPlugin && window.connectPlugin(\"b78046eb-f9bd-432c-a1ea-5776c21ba3be\")",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- ""
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/plain": [
- "<_GatheringFuture pending>"
- ]
- },
- "execution_count": 25,
- "metadata": {},
- "output_type": "execute_result"
- }
+ "data": {
+ "text/html": [
+ ""
],
- "source": [
- "from imjoy import api\n",
- "\n",
- "async def setup():\n",
- " chatbot = await api.createWindow(\n",
- " src=\"https://bioimage.io/chat\",\n",
- " name=\"BioImage.IO Chatbot\",\n",
- " )\n",
- " \n",
- " await api.showMessage(str(await chatbot.getAllExtensions()))\n",
- "\n",
- "api.export({\"setup\": setup})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Create a chatbot extension\n",
- "\n",
- "A chatbot extension object is a dictionary with the following keys:\n",
- " - `id`: a unique identifier for the extension\n",
- " - `name`: the name of the extension\n",
- " - `description`: a short description of the extension\n",
- " - `type`: it must be `bioimageio-chatbot-extension`\n",
- " - `tools`: a dictionary with functions of tools, it represents the set of functions your extension offers, each accepting configuration parameters as input. These functions should carry out specific tasks and return their results in a dictionary.\n",
- " - `get_schema`: a function returns the schema for the tools, it returns a JSON schema for each tool function, specifying the structure and types of the expected parameters. This schema is crucial for instructing the chatbot to generate the correct input paramters and validate the inputs and ensuring they adhere to the expected format. Importantly, the chatbot uses the title and description for each field to understand what expected for the tool will generating a function call to run the tool (also see the detailed instructions below).\n",
- "\n",
- "\n",
- " The following is an example of creating a chatbot extension:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from pydantic import BaseModel, Field\n",
- "from skimage import io, filters, data\n",
- "\n",
- "\n",
- "class ApplyFilterInput(BaseModel):\n",
- " \"\"\"Apply a gaussian filter to an example image\"\"\"\n",
- " sigma: float = Field(..., description=\"Standard deviation for Gaussian kernel\")\n",
- "\n",
- "async def apply_filter(kwargs):\n",
- " config = ApplyFilterInput(**kwargs)\n",
- "\n",
- " # Load the image\n",
- " image = data.cell()\n",
- "\n",
- " # Apply the filter\n",
- " filtered_image = filters.gaussian(image, sigma=config.sigma)\n",
- "\n",
- " # Display the image\n",
- " viewer = await api.showDialog(src=\"https://kitware.github.io/itk-vtk-viewer/app/\")\n",
- " await viewer.setImage(filtered_image)\n",
- " \n",
- " return \"Filter applied successfully\"\n",
- "\n",
- "def get_schema():\n",
- " return {\n",
- " \"apply_filter\": ApplyFilterInput.schema(),\n",
- " }\n",
- "\n",
- "# Define a chatbot extension\n",
- "image_processing_extension = {\n",
- " \"_rintf\": True,\n",
- " \"id\": \"image-processing\",\n",
- " \"type\": \"bioimageio-chatbot-extension\",\n",
- " \"name\": \"Image Processing\",\n",
- " \"description\": \"Apply gaussian filter to an example image\",\n",
- " \"get_schema\": get_schema,\n",
- " \"tools\": {\n",
- " \"apply_filter\": apply_filter,\n",
- " }\n",
- "}"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Option 1: Register a chatbot extension as an ImJoy plugin\n",
- "\n",
- "The following will show the chatbot window, then you can type for example `run the gaussian filter example` to invoke the tool you defined in the extension."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from imjoy_rpc import api\n",
- "\n",
- "async def setup():\n",
- " chatbot = await api.createWindow(src=\"https://bioimage.io/chat\", name=\"BioImage.IO Chatbot\")\n",
- " await chatbot.registerExtension(image_processing_extension)\n",
- "\n",
- "api.export({\"setup\": setup})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Option 2: Serving extension remotely using hypha\n",
- "\n",
- "Besides running the extension in the browser using ImJoy, you can also run the extension remotely using [hypha](https://ha.amun.ai). This allows the extension to run in a native Python environment which have easier access to hardware devices (e.g. the actual microscope) and more computational resources.\n",
- "\n",
- "Similar to the ImJoy plugin, you need to register the extension as a hypha service."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from imjoy_rpc.hypha import connect_to_server, login\n",
- "\n",
- "server_url = \"https://chat.bioimage.io\"\n",
- "token = await login({\"server_url\": server_url})\n",
- "server = await connect_to_server({\"server_url\": server_url, \"token\": token})\n",
- "# Below, we set the visibility to public\n",
- "image_processing_extension['config'] = {\"visibility\": \"public\"}\n",
- "svc = await server.register_service(image_processing_extension)\n",
- "print(f\"Extension service registered with id: {svc.id}, you can visit the service at: https://bioimage.io/chat?server={server_url}&extension={svc.id}&assistant=Skyler\")"
+ "text/plain": [
+ ""
]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To use the chatbot, you can now click the link above, or use the following code to run the chatbot in the browser."
+ "data": {
+ "text/plain": [
+ "<_GatheringFuture pending>"
]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "## Display the chatbot as embedded iframe\n",
- "from IPython.display import display, IFrame\n",
- "display(IFrame(src=\"https://bioimage.io/chat\", width=\"100%\", height=\"600px\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Additional Examples\n",
- "### Example 1: Controlling a microscope using the chatbot"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from pydantic import BaseModel, Field\n",
- " \n",
- "class MoveStageInput(BaseModel):\n",
- " \"\"\"Move the microscope stage\"\"\"\n",
- " x: float = Field(..., description=\"x offset\")\n",
- " y: float = Field(..., description=\"y offset\")\n",
- "\n",
- "class SnapImageInput(BaseModel):\n",
- " \"\"\"Move the microscope stage\"\"\"\n",
- " exposure: float = Field(..., description=\"exposure time\")\n",
- "\n",
- "async def move_stage(kwargs):\n",
- " config = MoveStageInput(**kwargs)\n",
- " print(config.x, config.y)\n",
- "\n",
- " return \"success\"\n",
- "\n",
- "async def snap_image(kwargs):\n",
- " config = SnapImageInput(**kwargs)\n",
- " print(config.exposure)\n",
- " await api.showDialog(src=\"https://bioimage.io\")\n",
- " return \"success\"\n",
- "\n",
- "def get_schema():\n",
- " return {\n",
- " \"move_stage\": MoveStageInput.schema(),\n",
- " \"snap_image\": SnapImageInput.schema()\n",
- " }\n",
- "\n",
- "# Define an chatbot extension\n",
- "microscope_control_extension = {\n",
- " \"_rintf\": True,\n",
- " \"id\": \"microscope-control\",\n",
- " \"type\": \"bioimageio-chatbot-extension\",\n",
- " \"name\": \"Microscope Control\",\n",
- " \"description\": \"Contorl the microscope....\",\n",
- " \"get_schema\": get_schema,\n",
- " \"tools\": {\n",
- " \"move_stage\": move_stage,\n",
- " \"snap_image\": snap_image,\n",
- " }\n",
- "}\n",
- "\n",
- "from imjoy_rpc.hypha import connect_to_server, login\n",
- "\n",
- "server_url = \"https://chat.bioimage.io\"\n",
- "token = await login({\"server_url\": server_url})\n",
- "server = await connect_to_server({\"server_url\": server_url, \"token\": token})\n",
- "# Below, we set the visibility to public\n",
- "microscope_control_extension['config'] = {\"visibility\": \"public\"}\n",
- "svc = await server.register_service(microscope_control_extension)\n",
- "print(f\"Extension service registered with id: {svc.id}, you can visit the service at: https://bioimage.io/chat?server={server_url}&extension={svc.id}&assistant=Skyler\")"
- ]
- },
+ },
+ "execution_count": 25,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from imjoy import api\n",
+ "\n",
+ "async def setup():\n",
+ " chatbot = await api.createWindow(\n",
+ " src=\"https://bioimage.io/chat\",\n",
+ " name=\"BioImage.IO Chatbot\",\n",
+ " )\n",
+ " \n",
+ " await api.showMessage(str(await chatbot.getAllExtensions()))\n",
+ "\n",
+ "api.export({\"setup\": setup})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create a chatbot extension\n",
+ "\n",
+ "A chatbot extension object is a dictionary with the following keys:\n",
+ " - `id`: a unique identifier for the extension\n",
+ " - `name`: the name of the extension\n",
+ " - `description`: a short description of the extension\n",
+ " - `type`: it must be `bioimageio-chatbot-extension`\n",
+ " - `tools`: a dictionary with functions of tools, it represents the set of functions your extension offers, each accepting configuration parameters as input. These functions should carry out specific tasks and return their results in a dictionary.\n",
+ " - `get_schema`: a function returns the schema for the tools, it returns a JSON schema for each tool function, specifying the structure and types of the expected parameters. This schema is crucial for instructing the chatbot to generate the correct input paramters and validate the inputs and ensuring they adhere to the expected format. Importantly, the chatbot uses the title and description for each field to understand what expected for the tool will generating a function call to run the tool (also see the detailed instructions below).\n",
+ "\n",
+ "\n",
+ " The following is an example of creating a chatbot extension:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pydantic import BaseModel, Field\n",
+ "from skimage import io, filters, data\n",
+ "\n",
+ "\n",
+ "class ApplyFilterInput(BaseModel):\n",
+ " \"\"\"Apply a gaussian filter to an example image\"\"\"\n",
+ " sigma: float = Field(..., description=\"Standard deviation for Gaussian kernel\")\n",
+ "\n",
+ "async def apply_filter(kwargs):\n",
+ " config = ApplyFilterInput(**kwargs)\n",
+ "\n",
+ " # Load the image\n",
+ " image = data.cell()\n",
+ "\n",
+ " # Apply the filter\n",
+ " filtered_image = filters.gaussian(image, sigma=config.sigma)\n",
+ "\n",
+ " # Display the image\n",
+ " viewer = await api.showDialog(src=\"https://kitware.github.io/itk-vtk-viewer/app/\")\n",
+ " await viewer.setImage(filtered_image)\n",
+ " \n",
+ " return \"Filter applied successfully\"\n",
+ "\n",
+ "def get_schema():\n",
+ " return {\n",
+ " \"apply_filter\": ApplyFilterInput.schema(),\n",
+ " }\n",
+ "\n",
+ "# Define a chatbot extension\n",
+ "image_processing_extension = {\n",
+ " \"_rintf\": True,\n",
+ " \"id\": \"image-processing\",\n",
+ " \"type\": \"bioimageio-chatbot-extension\",\n",
+ " \"name\": \"Image Processing\",\n",
+ " \"description\": \"Apply gaussian filter to an example image\",\n",
+ " \"get_schema\": get_schema,\n",
+ " \"tools\": {\n",
+ " \"apply_filter\": apply_filter,\n",
+ " }\n",
+ "}"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Option 1: Register a chatbot extension as an ImJoy plugin\n",
+ "\n",
+ "The following will show the chatbot window, then you can type for example `run the gaussian filter example` to invoke the tool you defined in the extension."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from imjoy_rpc import api\n",
+ "\n",
+ "async def setup():\n",
+ " chatbot = await api.createWindow(src=\"https://bioimage.io/chat\", name=\"BioImage.IO Chatbot\")\n",
+ " await chatbot.registerExtension(image_processing_extension)\n",
+ "\n",
+ "api.export({\"setup\": setup})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Option 2: Serving extension remotely using hypha\n",
+ "\n",
+ "Besides running the extension in the browser using ImJoy, you can also run the extension remotely using [hypha](https://ha.amun.ai). This allows the extension to run in a native Python environment which have easier access to hardware devices (e.g. the actual microscope) and more computational resources.\n",
+ "\n",
+ "Similar to the ImJoy plugin, you need to register the extension as a hypha service."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from hypha_rpc import connect_to_server, login\n",
+ "\n",
+ "server_url = \"https://chat.bioimage.io\"\n",
+ "token = await login({\"server_url\": server_url})\n",
+ "server = await connect_to_server({\"server_url\": server_url, \"token\": token})\n",
+ "# Below, we set the visibility to public\n",
+ "image_processing_extension['config'] = {\"visibility\": \"public\"}\n",
+ "svc = await server.register_service(image_processing_extension)\n",
+ "print(f\"Extension service registered with id: {svc.id}, you can visit the service at: https://bioimage.io/chat?server={server_url}&extension={svc.id}&assistant=Skyler\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To use the chatbot, you can now click the link above, or use the following code to run the chatbot in the browser."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "## Display the chatbot as embedded iframe\n",
+ "from IPython.display import display, IFrame\n",
+ "display(IFrame(src=\"https://bioimage.io/chat\", width=\"100%\", height=\"600px\"))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Additional Examples\n",
+ "### Example 1: Controlling a microscope using the chatbot"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pydantic import BaseModel, Field\n",
+ " \n",
+ "class MoveStageInput(BaseModel):\n",
+ " \"\"\"Move the microscope stage\"\"\"\n",
+ " x: float = Field(..., description=\"x offset\")\n",
+ " y: float = Field(..., description=\"y offset\")\n",
+ "\n",
+ "class SnapImageInput(BaseModel):\n",
+ " \"\"\"Move the microscope stage\"\"\"\n",
+ " exposure: float = Field(..., description=\"exposure time\")\n",
+ "\n",
+ "async def move_stage(kwargs):\n",
+ " config = MoveStageInput(**kwargs)\n",
+ " print(config.x, config.y)\n",
+ "\n",
+ " return \"success\"\n",
+ "\n",
+ "async def snap_image(kwargs):\n",
+ " config = SnapImageInput(**kwargs)\n",
+ " print(config.exposure)\n",
+ " await api.showDialog(src=\"https://bioimage.io\")\n",
+ " return \"success\"\n",
+ "\n",
+ "def get_schema():\n",
+ " return {\n",
+ " \"move_stage\": MoveStageInput.schema(),\n",
+ " \"snap_image\": SnapImageInput.schema()\n",
+ " }\n",
+ "\n",
+ "# Define an chatbot extension\n",
+ "microscope_control_extension = {\n",
+ " \"_rintf\": True,\n",
+ " \"id\": \"microscope-control\",\n",
+ " \"type\": \"bioimageio-chatbot-extension\",\n",
+ " \"name\": \"Microscope Control\",\n",
+ " \"description\": \"Contorl the microscope....\",\n",
+ " \"get_schema\": get_schema,\n",
+ " \"tools\": {\n",
+ " \"move_stage\": move_stage,\n",
+ " \"snap_image\": snap_image,\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "from hypha_rpc import connect_to_server, login\n",
+ "\n",
+ "server_url = \"https://chat.bioimage.io\"\n",
+ "token = await login({\"server_url\": server_url})\n",
+ "server = await connect_to_server({\"server_url\": server_url, \"token\": token})\n",
+ "# Below, we set the visibility to public\n",
+ "microscope_control_extension['config'] = {\"visibility\": \"public\"}\n",
+ "svc = await server.register_service(microscope_control_extension)\n",
+ "print(f\"Extension service registered with id: {svc.id}, you can visit the service at: https://bioimage.io/chat?server={server_url}&extension={svc.id}&assistant=Skyler\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Example 2: Query the models in the model zoo via code generation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {
+ "trusted": true
+ },
+ "outputs": [
{
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Example 2: Query the models in the model zoo via code generation"
+ "data": {
+ "application/javascript": "window.connectPlugin && window.connectPlugin(\"b78046eb-f9bd-432c-a1ea-5776c21ba3be\")",
+ "text/plain": [
+ ""
]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
- "cell_type": "code",
- "execution_count": 24,
- "metadata": {
- "trusted": true
- },
- "outputs": [
- {
- "data": {
- "application/javascript": "window.connectPlugin && window.connectPlugin(\"b78046eb-f9bd-432c-a1ea-5776c21ba3be\")",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- ""
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/plain": [
- "<_GatheringFuture pending>"
- ]
- },
- "execution_count": 24,
- "metadata": {},
- "output_type": "execute_result"
- }
+ "data": {
+ "text/html": [
+ ""
],
- "source": [
- "from imjoy_rpc import api\n",
- "import sys\n",
- "import io\n",
- "from imjoy import api\n",
- "from js import fetch\n",
- "from pydantic import BaseModel, Field\n",
- "from typing import Optional\n",
- "from typing import List, Optional, Dict, Any\n",
- "\n",
- "class ResourceType(str):\n",
- " MODEL = \"model\"\n",
- " DATASET = \"dataset\"\n",
- " APPLICATION = \"application\"\n",
- "\n",
- "def normalize_text(text: str) -> str:\n",
- " return text.replace('_', ' ').lower()\n",
- "\n",
- "def matches_keywords(text: str, keywords: List[str]) -> bool:\n",
- " normalized_text = normalize_text(text)\n",
- " return any(keyword in normalized_text for keyword in keywords)\n",
- "\n",
- "def search_item(item: Dict[str, Any], keywords: List[str]) -> bool:\n",
- " search_fields = [item.get('id', ''), item.get('nickname', ''), item.get('name', ''),\n",
- " item.get('nickname_icon', ''), item.get('license', ''), item.get('description', '')\n",
- " ] + [tag for tag in item.get('tags', [])]\n",
- " search_fields += [author['name'] for author in item.get('authors', [])]\n",
- " return any(matches_keywords(field, keywords) for field in search_fields)\n",
- "\n",
- "def search(keywords, type, top_k, resource_items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n",
- " keywords = [normalize_text(keyword) for keyword in keywords]\n",
- " filtered_items = []\n",
- " for item in resource_items:\n",
- " if type and item.get('type') != type:\n",
- " continue\n",
- " if search_item(item, keywords):\n",
- " filtered_items.append(item)\n",
- " if len(filtered_items) == top_k:\n",
- " break\n",
- " return filtered_items\n",
- "\n",
- "async def load_model_info():\n",
- " response = await fetch(\"https://bioimage-io.github.io/collection-bioimage-io/collection.json\")\n",
- " model_info = await response.json()\n",
- " model_info = model_info.to_py()\n",
- " resource_items = model_info['collection']\n",
- " return resource_items\n",
- "\n",
- "def execute_code(script, context=None):\n",
- " if context is None:\n",
- " context = {}\n",
- "\n",
- " # Redirect stdout and stderr to capture their output\n",
- " original_stdout = sys.stdout\n",
- " original_stderr = sys.stderr\n",
- " sys.stdout = io.StringIO()\n",
- " sys.stderr = io.StringIO()\n",
- "\n",
- " try:\n",
- " # Create a copy of the context to avoid modifying the original\n",
- " local_vars = context.copy()\n",
- "\n",
- " # Execute the provided Python script with access to context variables\n",
- " exec(script, local_vars)\n",
- "\n",
- " # Capture the output from stdout and stderr\n",
- " stdout_output = sys.stdout.getvalue()\n",
- " stderr_output = sys.stderr.getvalue()\n",
- "\n",
- " return {\n",
- " \"stdout\": stdout_output,\n",
- " \"stderr\": stderr_output,\n",
- " # \"context\": local_vars # Include context variables in the result\n",
- " }\n",
- " except Exception as e:\n",
- " return {\n",
- " \"stdout\": \"\",\n",
- " \"stderr\": str(e),\n",
- " # \"context\": context # Include context variables in the result even if an error occurs\n",
- " }\n",
- " finally:\n",
- " # Restore the original stdout and stderr\n",
- " sys.stdout = original_stdout\n",
- " sys.stderr = original_stderr\n",
- "\n",
- "async def register_chatbot_extension(register):\n",
- " resource_items = await load_model_info()\n",
- " types = set()\n",
- " tags = set()\n",
- " for resource in resource_items:\n",
- " types.add(resource['type'])\n",
- " tags.update(resource['tags'])\n",
- " types = list(types)\n",
- " tags = list(tags)[:5]\n",
- " resource_item_stats = f\"\"\"- keys: {list(resource_items[0].keys())}\\n- resource types: {types}\\n- Exampletags: {tags}\\n\"\"\" #Here is an example: {resource_items[0]}\n",
- "\n",
- " class ModelZooInfoScript(BaseModel):\n",
- " script: str = Field(..., description=\"\"\"Executable python script (Python runtime: Pyodide) for querying information\"\"\")\n",
- " \n",
- " ModelZooInfoScript.__doc__ = (\n",
- " \"Search the BioImage Model Zoo for statistical information by executing Python3 scripts on the resource items.\"\n",
- " \"For exampling counting models, applications, and datasets filtered by tags in the BioImage Model Zoo (bioimage.io). \"\n",
- " \"The generated scripts will be executed browser pyodide environment, the script can access data through the 'resources' local variable, containing zoo resources as dictionaries. \"\n",
- " \"Handle any missing fields in zoo items, and ensure outputs are directed to stdout. \"\n",
- " \"Filter resources by the 'type' key without making remote server requests. 'resources' variable details:\\\\n\"\n",
- " ) + resource_item_stats\n",
- "\n",
- "\n",
- " class ModelZooSearchInput(BaseModel):\n",
- " \"\"\"Search the BioImage Model Zoo (bioimage.io) resource items such as models, applications, datasets, etc. in the model zoo and return detailed information. The link format to the models etc. is: https://bioimage.io/#/?id=[ResourceID]\"\"\"\n",
- " keywords: List[str] = Field(..., description=\"List of keywords to search for in the model zoo.\")\n",
- " top_k: int = Field(3, description=\"The maximum number of search results to return. Default is 3. Please be aware each item may contain a large amount of data.\")\n",
- " type: Optional[ResourceType] = Field(None, description=\"The type of resource to search for. Options include 'model', 'dataset', 'application'.\")\n",
- "\n",
- "\n",
- " def get_schema():\n",
- " return {\n",
- " \"run_script\": ModelZooInfoScript.schema(),\n",
- " \"search\": ModelZooSearchInput.schema()\n",
- " }\n",
- "\n",
- " async def execute_script(kwargs):\n",
- " info_script = ModelZooInfoScript.parse_obj(kwargs)\n",
- " result = execute_code(info_script.script, {\"resources\": resource_items})\n",
- " return result\n",
- "\n",
- " async def execute_search(kwargs):\n",
- " config = ModelZooSearchInput.parse_obj(kwargs)\n",
- " result = search(config.keywords, config.type, config.top_k, resource_items)\n",
- " return result\n",
- "\n",
- " await register({\n",
- " \"_rintf\": True,\n",
- " \"id\": \"bioimage_model_zoo\",\n",
- " \"type\": \"bioimageio-chatbot-extension\",\n",
- " \"name\": \"BioImage Model Zoo\",\n",
- " \"description\": \"Getting information about models, applications, datasets, etc. in the BioImage Model Zoo. It takes a list of keywords or a python script to query the resources in the BioImage Model Zoo.\",\n",
- " \"get_schema\": get_schema,\n",
- " \"tools\": {\n",
- " \"run_script\": execute_script,\n",
- " \"search\": execute_search,\n",
- " }\n",
- " })\n",
- "\n",
- "\n",
- "async def setup():\n",
- " chatbot = await api.createWindow(src=\"https://bioimage.io/chat\")\n",
- " await register_chatbot_extension(chatbot.registerExtension)\n",
- "\n",
- "api.export({\"setup\": setup})"
+ "text/plain": [
+ ""
]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python (Pyodide)",
- "language": "python",
- "name": "python"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "python",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8"
+ "data": {
+ "text/plain": [
+ "<_GatheringFuture pending>"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
}
+ ],
+ "source": [
+ "from imjoy_rpc import api\n",
+ "import sys\n",
+ "import io\n",
+ "from imjoy import api\n",
+ "from js import fetch\n",
+ "from pydantic import BaseModel, Field\n",
+ "from typing import Optional\n",
+ "from typing import List, Optional, Dict, Any\n",
+ "\n",
+ "class ResourceType(str):\n",
+ " MODEL = \"model\"\n",
+ " DATASET = \"dataset\"\n",
+ " APPLICATION = \"application\"\n",
+ "\n",
+ "def normalize_text(text: str) -> str:\n",
+ " return text.replace('_', ' ').lower()\n",
+ "\n",
+ "def matches_keywords(text: str, keywords: List[str]) -> bool:\n",
+ " normalized_text = normalize_text(text)\n",
+ " return any(keyword in normalized_text for keyword in keywords)\n",
+ "\n",
+ "def search_item(item: Dict[str, Any], keywords: List[str]) -> bool:\n",
+ " search_fields = [item.get('id', ''), item.get('nickname', ''), item.get('name', ''),\n",
+ " item.get('nickname_icon', ''), item.get('license', ''), item.get('description', '')\n",
+ " ] + [tag for tag in item.get('tags', [])]\n",
+ " search_fields += [author['name'] for author in item.get('authors', [])]\n",
+ " return any(matches_keywords(field, keywords) for field in search_fields)\n",
+ "\n",
+ "def search(keywords, type, top_k, resource_items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n",
+ " keywords = [normalize_text(keyword) for keyword in keywords]\n",
+ " filtered_items = []\n",
+ " for item in resource_items:\n",
+ " if type and item.get('type') != type:\n",
+ " continue\n",
+ " if search_item(item, keywords):\n",
+ " filtered_items.append(item)\n",
+ " if len(filtered_items) == top_k:\n",
+ " break\n",
+ " return filtered_items\n",
+ "\n",
+ "async def load_model_info():\n",
+ " response = await fetch(\"https://bioimage-io.github.io/collection-bioimage-io/collection.json\")\n",
+ " model_info = await response.json()\n",
+ " model_info = model_info.to_py()\n",
+ " resource_items = model_info['collection']\n",
+ " return resource_items\n",
+ "\n",
+ "def execute_code(script, context=None):\n",
+ " if context is None:\n",
+ " context = {}\n",
+ "\n",
+ " # Redirect stdout and stderr to capture their output\n",
+ " original_stdout = sys.stdout\n",
+ " original_stderr = sys.stderr\n",
+ " sys.stdout = io.StringIO()\n",
+ " sys.stderr = io.StringIO()\n",
+ "\n",
+ " try:\n",
+ " # Create a copy of the context to avoid modifying the original\n",
+ " local_vars = context.copy()\n",
+ "\n",
+ " # Execute the provided Python script with access to context variables\n",
+ " exec(script, local_vars)\n",
+ "\n",
+ " # Capture the output from stdout and stderr\n",
+ " stdout_output = sys.stdout.getvalue()\n",
+ " stderr_output = sys.stderr.getvalue()\n",
+ "\n",
+ " return {\n",
+ " \"stdout\": stdout_output,\n",
+ " \"stderr\": stderr_output,\n",
+ " # \"context\": local_vars # Include context variables in the result\n",
+ " }\n",
+ " except Exception as e:\n",
+ " return {\n",
+ " \"stdout\": \"\",\n",
+ " \"stderr\": str(e),\n",
+ " # \"context\": context # Include context variables in the result even if an error occurs\n",
+ " }\n",
+ " finally:\n",
+ " # Restore the original stdout and stderr\n",
+ " sys.stdout = original_stdout\n",
+ " sys.stderr = original_stderr\n",
+ "\n",
+ "async def register_chatbot_extension(register):\n",
+ " resource_items = await load_model_info()\n",
+ " types = set()\n",
+ " tags = set()\n",
+ " for resource in resource_items:\n",
+ " types.add(resource['type'])\n",
+ " tags.update(resource['tags'])\n",
+ " types = list(types)\n",
+ " tags = list(tags)[:5]\n",
+ " resource_item_stats = f\"\"\"- keys: {list(resource_items[0].keys())}\\n- resource types: {types}\\n- Exampletags: {tags}\\n\"\"\" #Here is an example: {resource_items[0]}\n",
+ "\n",
+ " class ModelZooInfoScript(BaseModel):\n",
+ " script: str = Field(..., description=\"\"\"Executable python script (Python runtime: Pyodide) for querying information\"\"\")\n",
+ " \n",
+ " ModelZooInfoScript.__doc__ = (\n",
+ " \"Search the BioImage Model Zoo for statistical information by executing Python3 scripts on the resource items.\"\n",
+ " \"For exampling counting models, applications, and datasets filtered by tags in the BioImage Model Zoo (bioimage.io). \"\n",
+ " \"The generated scripts will be executed browser pyodide environment, the script can access data through the 'resources' local variable, containing zoo resources as dictionaries. \"\n",
+ " \"Handle any missing fields in zoo items, and ensure outputs are directed to stdout. \"\n",
+ " \"Filter resources by the 'type' key without making remote server requests. 'resources' variable details:\\\\n\"\n",
+ " ) + resource_item_stats\n",
+ "\n",
+ "\n",
+ " class ModelZooSearchInput(BaseModel):\n",
+ " \"\"\"Search the BioImage Model Zoo (bioimage.io) resource items such as models, applications, datasets, etc. in the model zoo and return detailed information. The link format to the models etc. is: https://bioimage.io/#/?id=[ResourceID]\"\"\"\n",
+ " keywords: List[str] = Field(..., description=\"List of keywords to search for in the model zoo.\")\n",
+ " top_k: int = Field(3, description=\"The maximum number of search results to return. Default is 3. Please be aware each item may contain a large amount of data.\")\n",
+ " type: Optional[ResourceType] = Field(None, description=\"The type of resource to search for. Options include 'model', 'dataset', 'application'.\")\n",
+ "\n",
+ "\n",
+ " def get_schema():\n",
+ " return {\n",
+ " \"run_script\": ModelZooInfoScript.schema(),\n",
+ " \"search\": ModelZooSearchInput.schema()\n",
+ " }\n",
+ "\n",
+ " async def execute_script(kwargs):\n",
+ " info_script = ModelZooInfoScript.parse_obj(kwargs)\n",
+ " result = execute_code(info_script.script, {\"resources\": resource_items})\n",
+ " return result\n",
+ "\n",
+ " async def execute_search(kwargs):\n",
+ " config = ModelZooSearchInput.parse_obj(kwargs)\n",
+ " result = search(config.keywords, config.type, config.top_k, resource_items)\n",
+ " return result\n",
+ "\n",
+ " await register({\n",
+ " \"_rintf\": True,\n",
+ " \"id\": \"bioimage_model_zoo\",\n",
+ " \"type\": \"bioimageio-chatbot-extension\",\n",
+ " \"name\": \"BioImage Model Zoo\",\n",
+ " \"description\": \"Getting information about models, applications, datasets, etc. in the BioImage Model Zoo. It takes a list of keywords or a python script to query the resources in the BioImage Model Zoo.\",\n",
+ " \"get_schema\": get_schema,\n",
+ " \"tools\": {\n",
+ " \"run_script\": execute_script,\n",
+ " \"search\": execute_search,\n",
+ " }\n",
+ " })\n",
+ "\n",
+ "\n",
+ "async def setup():\n",
+ " chatbot = await api.createWindow(src=\"https://bioimage.io/chat\")\n",
+ " await register_chatbot_extension(chatbot.registerExtension)\n",
+ "\n",
+ "api.export({\"setup\": setup})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python (Pyodide)",
+ "language": "python",
+ "name": "python"
},
- "nbformat": 4,
- "nbformat_minor": 4
+ "language_info": {
+ "codemirror_mode": {
+ "name": "python",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
}
diff --git a/docs/development.md b/docs/development.md
index 8d90ff8..91367e3 100644
--- a/docs/development.md
+++ b/docs/development.md
@@ -159,7 +159,7 @@ console.log(`Extension service registered with id: ${svc.id}, you can visit the
### Serve Chatbot Extension with Hypha in Python
```python
-from imjoy_rpc.hypha import connect_to_server, login
+from hypha_rpc import connect_to_server, login
server_url = "https://chat.bioimage.io"
token = await login({"server_url": server_url})
@@ -179,7 +179,7 @@ To make it public, you need to set the visibility of the chatbot extension servi
See the example below:
```python
-from imjoy_rpc.hypha import connect_to_server, login
+from hypha_rpc import connect_to_server, login
server_url = "https://chat.bioimage.io"
token = await login({"server_url": server_url})
diff --git a/docs/figure-2-use-cases.md b/docs/figure-2-use-cases.md
index f6d1dce..3922f3e 100644
--- a/docs/figure-2-use-cases.md
+++ b/docs/figure-2-use-cases.md
@@ -47,7 +47,7 @@ Follow the steps below to develop a new extension for microscope stage control a
- **Pre-requisites**: You will need a microscope and the squid control software
- **Create microscope extension**: Following the example in the above [chatbot extension example notebook](https://imjoy-notebook.netlify.app/lab/index.html?load=https://raw.githubusercontent.com/bioimage-io/bioimageio-chatbot/main/docs/bioimage-chatbot-extension-tutorial.ipynb&open=1), create a hypha service extension for controlling the microscope:
- 1. **Setup the Developer Environment**: Open a Jupyter Notebook. Install and import the `imjoy_rpc` and `pydantic` packages.
+ 1. **Setup the Developer Environment**: Open a Jupyter Notebook. Install and import the `imjoy_rpc`, `hypha_rpc` and `pydantic` packages.
2. **Define Input Schemas**: Create classes for `MoveStageInput` and `SnapImageInput` to structure the user input. (Note: To help the chatbot understand the "center", you will need to tell the chatbot about the boundaries of the stage via the docstring of the `MoveStageInput` class)
3. **Implement Control Functions**: Write asynchronous functions `move_stage` and `snap_image`.
4. **Setup Extension Interface**: Develop the extension interface and define a schema getter function.
diff --git a/pyproject.toml b/pyproject.toml
index 2be0bf9..8a94222 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,12 +3,12 @@ requires = ["setuptools", "wheel"]
[project]
name = "bioimageio-chatbot"
-version = "0.2.11"
+version = "0.2.12"
readme = "README.md"
description = "Your Personal Assistant in Computational BioImaging."
dependencies = [
- "schema-agents>=0.1.58",
- "imjoy-rpc>=0.5.48.post2",
+ "schema-agents>=0.1.59",
+ "hypha-rpc>=0.20.38",
"requests",
"pypdf",
"pillow",
diff --git a/requirements.txt b/requirements.txt
index cd5d0d7..035c818 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,10 +1,10 @@
-schema-agents>=0.1.58
-imjoy-rpc>=0.5.48.post2
+schema-agents>=0.1.59
+hypha-rpc==0.20.38
requests
pypdf
pillow
matplotlib
-hypha==0.15.53
+hypha==0.20.38.post19
tqdm
aiofiles
serpapi