Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Simplify markdown table generation in magic command - %ai list #1251

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 43 additions & 30 deletions packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from langchain.chains import LLMChain
from langchain.schema import HumanMessage
from langchain_core.messages import AIMessage
from py_markdown_table.markdown_table import markdown_table

from ._version import __version__
from .parsers import (
Expand Down Expand Up @@ -210,13 +211,13 @@ def _ai_inline_list_models_for_provider(self, provider_id, Provider):

# Is the required environment variable set?
def _ai_env_status_for_provider_markdown(self, provider_id):
na_message = "Not applicable. | " + NA_MESSAGE
na_message = "Not applicable."

if (
provider_id not in self.providers
or self.providers[provider_id].auth_strategy == None
):
return na_message # No emoji
return na_message, NA_MESSAGE

not_set_title = ENV_NOT_SET
set_title = ENV_SET
Expand All @@ -236,15 +237,14 @@ def _ai_env_status_for_provider_markdown(self, provider_id):
not_set_title = MULTIENV_NOT_SET
set_title = MULTIENV_SET
else: # No environment variables
return na_message
return na_message, NA_MESSAGE # Not applicable, with a "?" emoji

output = f"{env_var_display} | "
if env_status_ok:
output += f'<abbr title="{set_title}">✅</abbr>'
status_emoji = f'<abbr title="{set_title}">✅</abbr>'
else:
output += f'<abbr title="{not_set_title}">❌</abbr>'
status_emoji = f'<abbr title="{not_set_title}">❌</abbr>'

return output
return env_var_display, status_emoji

def _ai_env_status_for_provider_text(self, provider_id):
# only handle providers with "env" or "multienv" auth strategy
Expand Down Expand Up @@ -351,42 +351,55 @@ def handle_update(self, args: UpdateArgs):
return TextOrMarkdown(output, output)

def _ai_list_command_markdown(self, single_provider=None):
output = (
"| Provider | Environment variable | Set? | Models |\n"
+ "|----------|----------------------|------|--------|\n"
)
provider_info_list = []

if single_provider is not None and single_provider not in self.providers:
return f"There is no model provider with ID `{single_provider}`."

for provider_id, Provider in self.providers.items():
if single_provider is not None and provider_id != single_provider:
continue

output += (
f"| `{provider_id}` | "
+ self._ai_env_status_for_provider_markdown(provider_id)
+ " | "
+ self._ai_inline_list_models_for_provider(provider_id, Provider)
+ " |\n"
env_var_display, status_emoji = self._ai_env_status_for_provider_markdown(
provider_id
)
provider_data = {
"Provider": f"`{provider_id}`",
"Environment variable": env_var_display,
"Set?": status_emoji,
"Models": self._ai_inline_list_models_for_provider(
provider_id, Provider
),
}
provider_info_list.append(provider_data)

# Also list aliases.
if single_provider is None and len(self.custom_model_registry) > 0:
output += (
"\nAliases and custom commands:\n\n"
+ "| Name | Target |\n"
+ "|------|--------|\n"
)
alias_list = []
for key, value in self.custom_model_registry.items():
output += f"| `{key}` | "
if isinstance(value, str):
output += f"`{value}`"
else:
output += "*custom chain*"
target = f"`{value}`" if isinstance(value, str) else "*custom chain*"
alias_list.append({"Name": key, "Target": target})

# Generate the markdown table for providers
providers_info_markdown_table = (
markdown_table(provider_info_list)
.set_params(quote=False, row_sep="markdown")
.get_markdown()
)

output += " |\n"
# Generate markdown table for aliases
alias_markdown_table_header = "\n\n Aliases and custom commands:\n"
alias_markdown_table = (
markdown_table(alias_list)
.set_params(quote=False, row_sep="markdown")
.get_markdown()
)

return output
# Return the combined markdown tables
return (
providers_info_markdown_table
+ alias_markdown_table_header
+ alias_markdown_table
)

def _ai_list_command_text(self, single_provider=None):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we return the same string in both the Markdown & text views? The text view is only seen when running ipython from the command line. You can see the different output by running this in your terminal:

% ipython
Python 3.11.11 | packaged by conda-forge | (main, Dec  5 2024, 14:21:42) [Clang 18.1.8 ]
Type 'copyright', 'credits' or 'license' for more information
IPython 8.30.0 -- An enhanced Interactive Python. Type '?' for help.

In [1]: %reload_ext jupyter_ai_magics
In [2]: %ai list

Right now, when you run %ai list from the command line, it is calling _ai_list_command_text(), which produces something different. It seems weird that we have two different implementations for the same command. Can you explore showing the Markdown tables when %ai list is called from ipython directly?

To get started, the handle_list() method should be changed to:

    def handle_list(self, args: ListArgs):
        markdown = self._ai_list_command_markdown(args.provider_id)
        return TextOrMarkdown(
            markdown,
            markdown,
        )

output = ""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ class BedrockCustomProvider(BaseProvider, ChatBedrock):
),
]
help = (
"- For Cross-Region Inference use the appropriate `Inference profile ID` (Model ID with a region prefix, e.g., `us.meta.llama3-2-11b-instruct-v1:0`). See the [inference profiles documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html). \n"
"- For custom/provisioned models, specify the model ARN (Amazon Resource Name) as the model ID. For more information, see the [Amazon Bedrock model IDs documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html).\n\n"
"<ul><li> For Cross-Region Inference use the appropriate `Inference profile ID` (Model ID with a region prefix, e.g., `us.meta.llama3-2-11b-instruct-v1:0`). See the [inference profiles documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html).</li></ul>"
"<ul><li> For custom/provisioned models, specify the model ARN (Amazon Resource Name) as the model ID. For more information, see the [Amazon Bedrock model IDs documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html).</li></ul>"
Comment on lines +127 to +128
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain the motivation behind this change? This doesn't seem related to the other changes.

"The model provider must also be specified below. This is the provider of your foundation model *in lowercase*, e.g., `amazon`, `anthropic`, `cohere`, `meta`, or `mistral`."
)
registry = True
Expand Down
1 change: 1 addition & 0 deletions packages/jupyter-ai-magics/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ dependencies = [
"typing_extensions>=4.5.0",
"click~=8.0",
"jsonpath-ng>=1.5.3,<2",
"py_markdown_table>=1.3.0"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should have a version ceiling in each of our dependencies. Without a version ceiling, users may accidentally install py_markdown_table==2.* if a v2 version of that package were to be released.

Since upgrading a major version generally introduces breaking changes, we should prevent this by adding a version ceiling:

Suggested change
"py_markdown_table>=1.3.0"
"py_markdown_table>=1.3.0,<2.0.0"

]

[project.optional-dependencies]
Expand Down
Loading