From d15b76617e6315f8dd238720071b1f470b1a3f3f Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 17 Jan 2024 20:46:18 +0800 Subject: [PATCH] Agent Development Guide Signed-off-by: Future-Outlier --- examples/development_lifecycle/README.md | 1 + .../agent_development.py | 63 +++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 examples/development_lifecycle/development_lifecycle/agent_development.py diff --git a/examples/development_lifecycle/README.md b/examples/development_lifecycle/README.md index e3d8a6bed..eb067b222 100644 --- a/examples/development_lifecycle/README.md +++ b/examples/development_lifecycle/README.md @@ -5,6 +5,7 @@ You will gain an understanding of concepts like caching, the Flyte remote API, A ```{auto-examples-toc} agent_service +agent_development private_images task_cache task_cache_serialize diff --git a/examples/development_lifecycle/development_lifecycle/agent_development.py b/examples/development_lifecycle/development_lifecycle/agent_development.py new file mode 100644 index 000000000..a48a5f91c --- /dev/null +++ b/examples/development_lifecycle/development_lifecycle/agent_development.py @@ -0,0 +1,63 @@ +# %% [markdown] +# # How To Develop Agent Plugin Service? +# +# ```{eval-rst} +# .. tags:: Extensibility, Contribute, Intermediate +# ``` +# +# ## Why Plugin Service but not write the service within a task? +# Flyte can execute external web APIs in two ways: by writing the service within a task or by utilizing FlytePlugins. + +# ### Writing the service within a task +# Suppose you have a workflow that requires the integration of a web API, like ChatGPT. +# +# You might have a Python code similar to the one below: +# +# ```python +# @task() +# def t1(input: str) -> str: +# completion = openai.ChatCompletion.create( +# model="gpt-3.5-turbo", +# messages=[ +# {"role": "user", "content": input} +# ] +# ) +# return completion.choices[0].message +# +# @workflow +# def wf() -> str: +# return t1(input="Your Input Message!") +# ``` +# +# Here is how the task's lifecycle unfolds: +# 1. FlytePropeller initiates a pod to execute the task. +# +# 2. The task, running within the pod, calls the ChatGPT API. +# +# 3. After the task is completed, FlytePropeller terminates the pod. +# +# This process can be resource-intensive and also time-consuming, as initiating and terminating pods for each task execution consumes additional resources and needs much time. +# +# ### Utilizing FlytePlugins +# Let's analyze the example above and compare it to the code provided [here](https://docs.flyte.org/projects/cookbook/en/latest/auto_examples/bigquery_plugin/bigquery.html). +# +# ```python +# bigquery_task_no_io = BigQueryTask( +# name="sql.bigquery.no_io", +# inputs={}, +# query_template="SELECT 1", +# task_config=BigQueryConfig(ProjectID="flyte"), +# ) +# +# +# @workflow +# def no_io_wf(): +# return bigquery_task_no_io() +# ``` +# +# +# In this example, the lifecycle of the bigquery task progresses as follows: +# +# 1. FlytePlugins invokes the BigQuery API, as seen [here](https://github.com/flyteorg/flyte/tree/master/flyteplugins/go/tasks/plugins/webapi/bigquery). +# +# This approach is notably quicker and more resource-efficient.