From ff6f150c3e31c12119c264beb6c8112e8e9c85c4 Mon Sep 17 00:00:00 2001 From: colegottdank Date: Tue, 13 Aug 2024 16:46:34 -0400 Subject: [PATCH 1/2] add upstash to docs --- docs/other-integrations/upstash.mdx | 68 +++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 docs/other-integrations/upstash.mdx diff --git a/docs/other-integrations/upstash.mdx b/docs/other-integrations/upstash.mdx new file mode 100644 index 0000000000..580e372a3e --- /dev/null +++ b/docs/other-integrations/upstash.mdx @@ -0,0 +1,68 @@ +--- +title: "Upstash QStash LLM Integration" +sidebarTitle: "Upstash QStash" +description: "Leverage Upstash QStash with Helicone to make asynchronous LLM calls, perfect for serverless environments. Enhance your LLM operations with extended timeouts, retries, and batching while maintaining full observability." +--- + +## Introduction + +Upstash QStash is a serverless message queue and task scheduler that seamlessly integrates with Helicone, allowing you to call any LLM asynchronously. This integration is particularly powerful for serverless environments, enabling you to overcome timeout limitations and efficiently manage LLM operations. + +## Key Features + +- **Asynchronous LLM Calls**: Make non-blocking LLM requests, ideal for serverless architectures. +- **Support for Any LLM**: Use with any LLM provider while maintaining Helicone observability. +- **Serverless-Friendly**: Perfect for environments with execution time limits. +- **Extended Timeouts**: Up to 2 hours for long-running LLM tasks. +- **Automatic Retries**: Handles rate limit resets efficiently. +- **Batching Support**: Optimize large-scale LLM operations. + +## Integration Steps + + + + Log into [Helicone](https://www.helicone.ai) or create an account. Once you have an account, you + can generate an [API key](https://helicone.ai/developer). + + + Make sure to generate a [write only API key](helicone-headers/helicone-auth). + + + + + Create an [Upstash account](https://upstash.com/) and obtain your QStash token. + + + Use the QStash client with your Helicone API key: + + ```javascript + import { Client, custom } from "@upstash/qstash"; + + const client = new Client({ token: "" }); + + await client.publishJSON({ + api: { + name: "llm", + provider: custom({ + token: "YOUR_LLM_PROVIDER_TOKEN", + baseUrl: "https://api.your-llm-provider.com", + }), + analytics: { name: "helicone", token: "YOUR_HELICONE_API_KEY" }, + }, + body: { + model: "your-chosen-model", + messages: [{ role: "user", content: "Your message here" }], + }, + callback: "https://your-callback-url.com", + }); + ``` + + + + 🎉 You're all set! View your logs at [Helicone](https://www.helicone.ai). + + + +By combining Upstash QStash LLM with Helicone, you can build robust, serverless-compatible AI applications while maintaining comprehensive observability into your LLM operations. + +For more details on QStash LLM features and usage, visit the [Upstash QStash LLM documentation](https://upstash.com/docs/qstash/features/llm). From 4e9a3fc6c656c63f51f686e8477846d252ed849b Mon Sep 17 00:00:00 2001 From: colegottdank Date: Tue, 13 Aug 2024 16:47:56 -0400 Subject: [PATCH 2/2] oops --- docs/mint.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/mint.json b/docs/mint.json index 819c528988..8ed70b1e6c 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -177,7 +177,8 @@ "other-integrations/meta-gpt", "other-integrations/open-devin", "other-integrations/embedchain", - "other-integrations/dify" + "other-integrations/dify", + "other-integrations/upstash" ] }, {