diff --git a/README.md b/README.md index f2ad7c1e..492a2676 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ please [reach out to us](mailto:mwk@anyscale.com). ## Getting Help and Filing Bugs / Feature Requests + We are eager to help you get started with Aviary. You can get help on: - Via Slack -- fill in [this form](https://docs.google.com/forms/d/e/1FAIpQLSfAcoiLCHOguOm8e7Jnn-JJdZaCxPGjgVCvFijHB5PLaQLeig/viewform) to sign up. @@ -55,6 +56,21 @@ We have people in both US and European time zones who will help answer your ques We are also interested in accepting contributions. Those could be anything from a new evaluator, to integrating a new model with a yaml file, to more. Feel free to post an issue first to get our feedback on a proposal first, or just file a PR and we commit to giving you prompt feedback. +## Getting Help and Filing Bugs / Feature Requests + +We are eager to help you get started with Aviary. You can get help on: + +- Via Slack -- fill in [this form](https://docs.google.com/forms/d/e/1FAIpQLSfAcoiLCHOguOm8e7Jnn-JJdZaCxPGjgVCvFijHB5PLaQLeig/viewform) to sign up. +- Via [Discuss](https://discuss.ray.io/c/llms-generative-ai/27). + +For bugs or for feature requests, please submit them [here](https://github.com/ray-project/aviary/issues/new). + +We have people in both US and European time zones who will help answer your questions. + +## Contributions +We are also interested in accepting contributions. Those could be anything from a new evaluator, to integrating a new model with a yaml file, to more. +Feel free to post an issue first to get our feedback on a proposal first, or just file a PR and we commit to giving you prompt feedback. + # Aviary User Guides For a video introduction, see the following intro. Note: There have been some minor changes since the video was recorded. The guide below is more up to date. @@ -70,7 +86,7 @@ Open Source cloud deployment. You will need `ray` and `aviary` to be installed on your laptop. -``` +```shell pip install -U "ray>=2.4.0" pip install "aviary @ git+https://github.com/ray-project/aviary.git" ``` @@ -80,7 +96,7 @@ The default Aviary installation only includes the Aviary CLI and SDK. To install the Aviary UI, use the following command. This will enable you to run the Aviary frontend on your laptop. -``` +```shell pip install "aviary[frontend] @ git+https://github.com/ray-project/aviary.git" ``` @@ -101,7 +117,7 @@ You may need to specify your AWS private key in the `deploy/ray/aviary-cluster.y See [Ray on Cloud VMs](https://docs.ray.io/en/latest/cluster/vms/index.html) page in Ray documentation for more details. -``` +```shell git clone https://github.com/ray-project/aviary.git cd aviary @@ -111,7 +127,7 @@ ray up deploy/ray/aviary-cluster.yaml ### Connect to your Cluster -``` +```shell # Connect to the Head node of your Ray Cluster (This will take several minutes to autoscale) ray attach deploy/ray/aviary-cluster.yaml @@ -127,7 +143,7 @@ or define your own model YAML file and run that instead. From the head node, run the following commands. -``` +```shell export AVIARY_URL="http://localhost:8000" # List the available models @@ -136,7 +152,8 @@ amazon/LightGPT # Query the model aviary query --model amazon/LightGPT --prompt "How do I make fried rice?" - +``` +```text amazon/LightGPT: To make fried rice, start by heating up some oil in a large pan over medium-high heat. Once the oil is hot, add your desired amount of vegetables and/or meat to the @@ -146,13 +163,28 @@ everything together and cook for another few minutes until all the ingredients a cooked through. Serve with your favorite sides and enjoy! ``` +You can also use `aviary query` with certain LangChain-compatible APIs. +Currently, we support the following APIs: +* openai (`langchain.llms.OpenAIChat`) + +```shell +# langchain is an optional dependency +pip install langchain + +export OPENAI_API_KEY=... + +# Query an Aviary model and OpenAI model +# [PROVIDER]://[MODEL_NAME] +aviary query --model amazon/LightGPT --model openai://gpt-3.5-turbo --prompt "How do I make fried rice?" +``` + # Aviary Reference ## Installing Aviary To install Aviary and its dependencies, run the following command: -``` +```shell pip install "aviary @ git+https://github.com/ray-project/aviary.git" ``` @@ -162,7 +194,7 @@ Aviary consists of a backend and a frontend, both of which come with additional dependencies. To install the dependencies for both frontend and backend for local development, run the following commands: -``` +```shell pip install "aviary[frontend,backend] @ git+https://github.com/ray-project/aviary.git" ``` diff --git a/aviary/api/sdk.py b/aviary/api/sdk.py index 018ef55d..40f9bf0e 100644 --- a/aviary/api/sdk.py +++ b/aviary/api/sdk.py @@ -1,5 +1,13 @@ from typing import Any, Dict, List, Union +try: + from langchain.llms import OpenAIChat + + LANGCHAIN_INSTALLED = True + LANGCHAIN_SUPPORTED_PROVIDERS = {"openai": OpenAIChat} +except ImportError: + LANGCHAIN_INSTALLED = False + from aviary.api.env import assert_has_backend __all__ = ["models", "metadata", "query", "batch_query", "run"] @@ -13,6 +21,41 @@ def models() -> List[str]: return backend.models() +def _is_aviary_model(model: str) -> bool: + """ + Determine if this is an aviary model. Aviary + models do not have a '://' in them. + """ + return "://" not in model + + +def _supports_batching(model: str) -> bool: + provider, _ = model.split("://", 1) + return provider != "openai" + + +def _get_langchain_model(model: str): + if not LANGCHAIN_INSTALLED: + raise ValueError( + f"Unsupported model {model}. If you want to use a langchain-" + "compatible model, install langchain ( pip install langchain )." + ) + + provider, model_name = model.split("://", 1) + if provider not in LANGCHAIN_SUPPORTED_PROVIDERS: + raise ValueError( + f"Unknown model provider for {model}. Supported providers are: " + f"{' '.join(LANGCHAIN_SUPPORTED_PROVIDERS.keys())}" + ) + return LANGCHAIN_SUPPORTED_PROVIDERS[provider](model_name=model_name) + + +def _convert_to_aviary_format(model: str, llm_result): + generation = llm_result.generations + result_list = [{"generated_text": x.text} for x in generation[0]] + return result_list + + def metadata(model_id: str) -> Dict[str, Dict[str, Any]]: """Get model metadata""" from aviary.common.backend import get_aviary_backend @@ -25,8 +68,11 @@ def query(model: str, prompt: str) -> Dict[str, Union[str, float, int]]: """Query Aviary""" from aviary.common.backend import get_aviary_backend - backend = get_aviary_backend() - return backend.completions(prompt, model) + if _is_aviary_model(model): + backend = get_aviary_backend() + return backend.completions(prompt, model) + llm = _get_langchain_model(model) + return llm.predict(prompt) def batch_query( @@ -35,12 +81,28 @@ def batch_query( """Batch Query Aviary""" from aviary.common.backend import get_aviary_backend - backend = get_aviary_backend() - return backend.batch_completions(prompts, model) + if _is_aviary_model(model): + backend = get_aviary_backend() + return backend.batch_completions(prompts, model) + else: + llm = _get_langchain_model(model) + if _supports_batching(model): + result = llm.generate(prompts) + converted = _convert_to_aviary_format(model, result) + else: + result = [{"generated_text": llm.predict(prompt)} for prompt in prompts] + converted = result + return converted def run(*model: str) -> None: - """Run Aviary on the local ray cluster""" + """Run Aviary on the local ray cluster + + NOTE: This only works if you are running this command + on the Ray or Anyscale cluster directly. It does not + work from a general machine which only has the url and token + for a model. + """ assert_has_backend() from aviary.backend.server.run import run diff --git a/setup.py b/setup.py index 6694504f..077fab87 100644 --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ ], "docs": [ "mkdocs-material", - ] + ], }, dependency_links=["https://download.pytorch.org/whl/cu118"], python_requires=">=3.8",