From 3cbeec50766cec8cbf8ac1e4b674cd27c97b7582 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 31 Oct 2024 16:18:59 +0000 Subject: [PATCH] deploy: 5d26adf8fecdbc2ea5e4b233620fdf206bd36446 --- 404.html | 4 ++-- assets/js/2cdbd7a9.cfbbb7ca.js | 1 - assets/js/2cdbd7a9.ec2983b2.js | 1 + assets/js/{8636fc49.6d88f35b.js => 8636fc49.38bbea3d.js} | 2 +- assets/js/{935f2afb.7b3804d9.js => 935f2afb.353156f9.js} | 2 +- assets/js/985a6965.3ed0935e.js | 1 - assets/js/985a6965.c3a8b5f7.js | 1 + assets/js/b29c0d53.3f13f96e.js | 1 + assets/js/b29c0d53.9d1957cf.js | 1 - assets/js/dcf666ca.7afee14b.js | 1 - assets/js/dcf666ca.feec4d42.js | 1 + assets/js/f988011b.38401c8d.js | 1 - assets/js/f988011b.3b1a5cf9.js | 1 + ...{runtime~main.5349ca5f.js => runtime~main.1993ec7f.js} | 2 +- blog/2023/04/27/index/index.html | 4 ++-- .../28/parsing-llm-input-with-llm-chain-0-8-2/index.html | 4 ++-- blog/archive/index.html | 4 ++-- blog/index.html | 4 ++-- blog/introducing-llm-chain-v060/index.html | 4 ++-- blog/introducing-llm-chain-v080/index.html | 4 ++-- blog/introducing-llm-chain/index.html | 4 ++-- blog/tags/chatgpt/index.html | 4 ++-- blog/tags/index.html | 4 ++-- blog/tags/introduction/index.html | 4 ++-- blog/tags/large-language-models/index.html | 4 ++-- blog/tags/llm-chain/index.html | 4 ++-- blog/tags/prompt-system/index.html | 4 ++-- blog/tags/rust/index.html | 4 ++-- blog/tags/templating/index.html | 4 ++-- blog/tags/tera/index.html | 4 ++-- blog/tags/update/index.html | 4 ++-- blog/using-chatgpt-in-rust/index.html | 4 ++-- docs/category/tutorial/index.html | 6 +++--- docs/chains/conversational/index.html | 4 ++-- docs/chains/map-reduce-chains/index.html | 6 +++--- docs/chains/sequential-chains/index.html | 4 ++-- docs/chains/what-are-chains/index.html | 4 ++-- docs/dev-setup/index.html | 4 ++-- .../building-a-multi-step-chain/index.html | 8 ++++---- .../generating-your-first-llm-output/index.html | 8 ++++---- docs/getting-started-tutorial/index/index.html | 4 ++-- .../setting-up-a-project/index.html | 8 ++++---- .../summarizing-text-with-map-reduce/index.html | 8 ++++---- .../using-prompt-templates-and-parameters/index.html | 8 ++++---- docs/introduction/index.html | 4 ++-- docs/llama-tutorial/index.html | 4 ++-- index.html | 4 ++-- llmcasual/index.html | 4 ++-- 48 files changed, 90 insertions(+), 90 deletions(-) delete mode 100644 assets/js/2cdbd7a9.cfbbb7ca.js create mode 100644 assets/js/2cdbd7a9.ec2983b2.js rename assets/js/{8636fc49.6d88f35b.js => 8636fc49.38bbea3d.js} (83%) rename assets/js/{935f2afb.7b3804d9.js => 935f2afb.353156f9.js} (62%) delete mode 100644 assets/js/985a6965.3ed0935e.js create mode 100644 assets/js/985a6965.c3a8b5f7.js create mode 100644 assets/js/b29c0d53.3f13f96e.js delete mode 100644 assets/js/b29c0d53.9d1957cf.js delete mode 100644 assets/js/dcf666ca.7afee14b.js create mode 100644 assets/js/dcf666ca.feec4d42.js delete mode 100644 assets/js/f988011b.38401c8d.js create mode 100644 assets/js/f988011b.3b1a5cf9.js rename assets/js/{runtime~main.5349ca5f.js => runtime~main.1993ec7f.js} (93%) diff --git a/404.html b/404.html index a511e048..d05188f6 100644 --- a/404.html +++ b/404.html @@ -13,13 +13,13 @@ - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/2cdbd7a9.cfbbb7ca.js b/assets/js/2cdbd7a9.cfbbb7ca.js deleted file mode 100644 index 13ce557f..00000000 --- a/assets/js/2cdbd7a9.cfbbb7ca.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2174],{3905:(e,t,r)=>{r.d(t,{Zo:()=>m,kt:()=>h});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var s=n.createContext({}),p=function(e){var t=n.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},m=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,m=l(e,["components","mdxType","originalType","parentName"]),u=p(r),d=a,h=u["".concat(s,".").concat(d)]||u[d]||c[d]||i;return r?n.createElement(h,o(o({ref:t},m),{},{components:r})):n.createElement(h,o({ref:t},m))}));function h(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=r.length,o=new Array(i);o[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[u]="string"==typeof e?e:a,o[1]=l;for(var p=2;p{r.r(t),r.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>c,frontMatter:()=>i,metadata:()=>l,toc:()=>p});var n=r(7462),a=(r(7294),r(3905));const i={},o="Summarizing Text with Map-Reduce in LLM-Chain",l={unversionedId:"getting-started-tutorial/summarizing-text-with-map-reduce",id:"getting-started-tutorial/summarizing-text-with-map-reduce",title:"Summarizing Text with Map-Reduce in LLM-Chain",description:"Having problems? Don't worry reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/05-summarizing-text-with-map-reduce.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/summarizing-text-with-map-reduce",permalink:"/docs/getting-started-tutorial/summarizing-text-with-map-reduce",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/05-summarizing-text-with-map-reduce.md",tags:[],version:"current",sidebarPosition:5,frontMatter:{},sidebar:"sidebar",previous:{title:"Creating Your First Sequential Chain",permalink:"/docs/getting-started-tutorial/building-a-multi-step-chain"},next:{title:"Development Setup",permalink:"/docs/dev-setup"}},s={},p=[],m={toc:p},u="wrapper";function c(e){let{components:t,...r}=e;return(0,a.kt)(u,(0,n.Z)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"summarizing-text-with-map-reduce-in-llm-chain"},"Summarizing Text with Map-Reduce in LLM-Chain"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"Map-reduce is a powerful technique for processing and aggregating data in parallel. In this tutorial, we'll explore how to use map-reduce in ",(0,a.kt)("inlineCode",{parentName:"p"},"llm-chain")," to summarize text effectively. We'll cover implementing a basic map-reduce for text summarization."),(0,a.kt)("p",null,'To start create a file named in "article_to_summarize.md" take the content of a wikipedia article and paste it in there.'),(0,a.kt)("p",null,"Here's a Rust program that demonstrates how to create a map-reduce chain for summarizing text:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::chains::map_reduce::Chain;\nuse llm_chain::step::Step;\nuse llm_chain::{executor, parameters, prompt, Parameters};\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor with the default settings\n let exec = executor!()?;\n\n // Create the "map" step to summarize an article into bullet points\n let map_prompt = Step::for_prompt_template(prompt!(\n "You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",\n "Summarize this article into bullet points:\\n{{text}}"\n ));\n\n // Create the "reduce" step to combine multiple summaries into one\n let reduce_prompt = Step::for_prompt_template(prompt!(\n "You are a diligent bot that summarizes text",\n "Please combine the articles below into one summary as bullet points:\\n{{text}}"\n ));\n\n // Create a map-reduce chain with the map and reduce steps\n let chain = Chain::new(map_prompt, reduce_prompt);\n\n // Load the content of the article to be summarized\n let article = include_str!("article_to_summarize.md");\n\n // Create a vector with the Parameters object containing the text of the article\n let docs = vec![parameters!(article)];\n\n // Run the chain with the provided documents and an empty Parameters object for the "reduce" step\n let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();\n\n // Print the result to the console\n println!("{}", res.to_immediate().await?.as_content());\n Ok(())\n}\n')),(0,a.kt)("p",null,"Let's break down the code and understand the different parts:"),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},"Define the map and reduce prompts as Step objects:\na. The map_prompt summarizes a given article into bullet points.\nb. The reduce_prompt combines multiple summaries into a single summary as bullet points."),(0,a.kt)("li",{parentName:"ol"},"Create a new map-reduce Chain by providing the map_prompt and reduce_prompt."),(0,a.kt)("li",{parentName:"ol"},"Load the article to be summarized and create a Parameters object with the text."),(0,a.kt)("li",{parentName:"ol"},"Execute the map-reduce Chain with the provided Parameters and store the result in res."),(0,a.kt)("li",{parentName:"ol"},"Print the LLM response to the console.")),(0,a.kt)("p",null,"This should be able to summarize any wikipedia article you might find. Play around with the prompt templates to make it best fit your usecase."),(0,a.kt)("hr",null),(0,a.kt)("p",null,"That's it folks, thanks for following along for the tutorial. You are now ready to use ",(0,a.kt)("inlineCode",{parentName:"p"},"llm-chain")," for something useful. Don't forget to stop by ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and share what you are making."))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2cdbd7a9.ec2983b2.js b/assets/js/2cdbd7a9.ec2983b2.js new file mode 100644 index 00000000..20a5ffcc --- /dev/null +++ b/assets/js/2cdbd7a9.ec2983b2.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2174],{3905:(e,t,r)=>{r.d(t,{Zo:()=>m,kt:()=>h});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function o(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var s=n.createContext({}),p=function(e){var t=n.useContext(s),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},m=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,m=l(e,["components","mdxType","originalType","parentName"]),u=p(r),d=a,h=u["".concat(s,".").concat(d)]||u[d]||c[d]||i;return r?n.createElement(h,o(o({ref:t},m),{},{components:r})):n.createElement(h,o({ref:t},m))}));function h(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=r.length,o=new Array(i);o[0]=d;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[u]="string"==typeof e?e:a,o[1]=l;for(var p=2;p{r.r(t),r.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>c,frontMatter:()=>i,metadata:()=>l,toc:()=>p});var n=r(7462),a=(r(7294),r(3905));const i={},o="Summarizing Text with Map-Reduce in LLM-Chain",l={unversionedId:"getting-started-tutorial/summarizing-text-with-map-reduce",id:"getting-started-tutorial/summarizing-text-with-map-reduce",title:"Summarizing Text with Map-Reduce in LLM-Chain",description:"Having problems? Don't worry, reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/05-summarizing-text-with-map-reduce.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/summarizing-text-with-map-reduce",permalink:"/docs/getting-started-tutorial/summarizing-text-with-map-reduce",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/05-summarizing-text-with-map-reduce.md",tags:[],version:"current",sidebarPosition:5,frontMatter:{},sidebar:"sidebar",previous:{title:"Creating Your First Sequential Chain",permalink:"/docs/getting-started-tutorial/building-a-multi-step-chain"},next:{title:"Development Setup",permalink:"/docs/dev-setup"}},s={},p=[],m={toc:p},u="wrapper";function c(e){let{components:t,...r}=e;return(0,a.kt)(u,(0,n.Z)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"summarizing-text-with-map-reduce-in-llm-chain"},"Summarizing Text with Map-Reduce in LLM-Chain"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry, reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"Map-reduce is a powerful technique for processing and aggregating data in parallel. In this tutorial, we'll explore how to use map-reduce in ",(0,a.kt)("inlineCode",{parentName:"p"},"llm-chain")," to summarize text effectively. We'll cover implementing a basic map-reduce for text summarization."),(0,a.kt)("p",null,'To start, create a file named "article_to_summarize.md", take the content of a wikipedia article and paste it in there.'),(0,a.kt)("p",null,"Here's a Rust program that demonstrates how to create a map-reduce chain for summarizing text:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::chains::map_reduce::Chain;\nuse llm_chain::step::Step;\nuse llm_chain::{executor, parameters, prompt, Parameters};\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor with the default settings\n let exec = executor!()?;\n\n // Create the "map" step to summarize an article into bullet points\n let map_prompt = Step::for_prompt_template(prompt!(\n "You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",\n "Summarize this article into bullet points:\\n{{text}}"\n ));\n\n // Create the "reduce" step to combine multiple summaries into one\n let reduce_prompt = Step::for_prompt_template(prompt!(\n "You are a diligent bot that summarizes text",\n "Please combine the articles below into one summary as bullet points:\\n{{text}}"\n ));\n\n // Create a map-reduce chain with the map and reduce steps\n let chain = Chain::new(map_prompt, reduce_prompt);\n\n // Load the content of the article to be summarized\n let article = include_str!("article_to_summarize.md");\n\n // Create a vector with the Parameters object containing the text of the article\n let docs = vec![parameters!(article)];\n\n // Run the chain with the provided documents and an empty Parameters object for the "reduce" step\n let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();\n\n // Print the result to the console\n println!("{}", res.to_immediate().await?.as_content());\n Ok(())\n}\n')),(0,a.kt)("p",null,"Let's break down the code and understand the different parts:"),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},"Define the map and reduce prompts as Step objects:\na. The map_prompt summarizes a given article into bullet points.\nb. The reduce_prompt combines multiple summaries into a single summary as bullet points."),(0,a.kt)("li",{parentName:"ol"},"Create a new map-reduce Chain by providing the map_prompt and reduce_prompt."),(0,a.kt)("li",{parentName:"ol"},"Load the article to be summarized and create a Parameters object with the text."),(0,a.kt)("li",{parentName:"ol"},"Execute the map-reduce Chain with the provided Parameters and store the result in res."),(0,a.kt)("li",{parentName:"ol"},"Print the LLM response to the console.")),(0,a.kt)("p",null,"This should be able to summarize any wikipedia article you might find. Play around with the prompt templates to make it best fit your usecase."),(0,a.kt)("hr",null),(0,a.kt)("p",null,"That's it folks, thanks for following along for the tutorial. You are now ready to use ",(0,a.kt)("inlineCode",{parentName:"p"},"llm-chain")," for something useful. Don't forget to stop by ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and share what you are making."))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/8636fc49.6d88f35b.js b/assets/js/8636fc49.38bbea3d.js similarity index 83% rename from assets/js/8636fc49.6d88f35b.js rename to assets/js/8636fc49.38bbea3d.js index b577071e..44f20da4 100644 --- a/assets/js/8636fc49.6d88f35b.js +++ b/assets/js/8636fc49.38bbea3d.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[4008],{3905:(e,t,n)=>{n.d(t,{Zo:()=>l,kt:()=>d});var a=n(7294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function s(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var c=a.createContext({}),p=function(e){var t=a.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):s(s({},t),e)),n},l=function(e){var t=p(e.components);return a.createElement(c.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},h=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,i=e.originalType,c=e.parentName,l=o(e,["components","mdxType","originalType","parentName"]),u=p(n),h=r,d=u["".concat(c,".").concat(h)]||u[h]||m[h]||i;return n?a.createElement(d,s(s({ref:t},l),{},{components:n})):a.createElement(d,s({ref:t},l))}));function d(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=n.length,s=new Array(i);s[0]=h;var o={};for(var c in t)hasOwnProperty.call(t,c)&&(o[c]=t[c]);o.originalType=e,o[u]="string"==typeof e?e:r,s[1]=o;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>m,frontMatter:()=>i,metadata:()=>o,toc:()=>p});var a=n(7462),r=(n(7294),n(3905));const i={},s="Map-Reduce Chains",o={unversionedId:"chains/map-reduce-chains",id:"chains/map-reduce-chains",title:"Map-Reduce Chains",description:'Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a "map" step, which processes each text chunk independently, and a "reduce" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM\'s context window size.',source:"@site/docs/chains/02-map-reduce-chains.md",sourceDirName:"chains",slug:"/chains/map-reduce-chains",permalink:"/docs/chains/map-reduce-chains",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/chains/02-map-reduce-chains.md",tags:[],version:"current",sidebarPosition:2,frontMatter:{},sidebar:"sidebar",previous:{title:"sequential-chains",permalink:"/docs/chains/sequential-chains"},next:{title:"Conversational Chains",permalink:"/docs/chains/conversational"}},c={},p=[],l={toc:p},u="wrapper";function m(e){let{components:t,...n}=e;return(0,r.kt)(u,(0,a.Z)({},l,n,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"map-reduce-chains"},"Map-Reduce Chains"),(0,r.kt)("p",null,'Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a "map" step, which processes each text chunk independently, and a "reduce" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM\'s context window size.'),(0,r.kt)("p",null,"In this guide, we'll explain how to create and execute a map-reduce chain using an example. The example demonstrates how to summarize a Wikipedia article into bullet points using a two-step process:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},'The "map" step summarizes each chunk of the article into bullet points.'),(0,r.kt)("li",{parentName:"ol"},'The "reduce" step combines all bullet point summaries into a single summary.')),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::chains::map_reduce::Chain;\nuse llm_chain::step::Step;\nuse llm_chain::{executor, parameters, prompt, Parameters};\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor with the default settings\n let exec = executor!()?;\n\n // Create the "map" step to summarize an article into bullet points\n let map_prompt = Step::for_prompt_template(prompt!(\n "You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",\n "Summarize this article into bullet points:\\n{{text}}"\n ));\n\n // Create the "reduce" step to combine multiple summaries into one\n let reduce_prompt = Step::for_prompt_template(prompt!(\n "You are a diligent bot that summarizes text",\n "Please combine the articles below into one summary as bullet points:\\n{{text}}"\n ));\n\n // Create a map-reduce chain with the map and reduce steps\n let chain = Chain::new(map_prompt, reduce_prompt);\n\n // Load the content of the article to be summarized\n let article = include_str!("article_to_summarize.md");\n\n // Create a vector with the Parameters object containing the text of the article\n let docs = vec![parameters!(article)];\n\n // Run the chain with the provided documents and an empty Parameters object for the "reduce" step\n let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();\n\n // Print the result to the console\n println!("{}", res);\n Ok(())\n}\n')),(0,r.kt)("p",null,"n this example, we start by importing the necessary modules and defining the main function. We then create a new ChatGPT executor using the executor!() macro."),(0,r.kt)("p",null,'Next, we create the "map" and "reduce" steps using Step::for_prompt_template(). The "map" step is responsible for summarizing each article chunk, while the "reduce" step combines the summaries into a single output.'),(0,r.kt)("p",null,'After defining the steps, we create a new Chain object by passing in the "map" and "reduce" steps. We then load the content of the article to be summarized and create a Parameters object containing the text.'),(0,r.kt)("p",null,'Finally, we execute the map-reduce chain using the chain.run() method, passing in the documents, an empty Parameters object for the "reduce" step, and the executor. The result is printed to the console.'),(0,r.kt)("p",null,"Map-Reduce chains offer an effective way to handle large documents or multiple documents using LLMs. By breaking the text into manageable chunks and combining the results, you can create efficient pipelines for text processing tasks such as summarization, translation, and analysis."))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[4008],{3905:(e,t,n)=>{n.d(t,{Zo:()=>l,kt:()=>d});var a=n(7294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function s(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var c=a.createContext({}),p=function(e){var t=a.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):s(s({},t),e)),n},l=function(e){var t=p(e.components);return a.createElement(c.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},h=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,i=e.originalType,c=e.parentName,l=o(e,["components","mdxType","originalType","parentName"]),u=p(n),h=r,d=u["".concat(c,".").concat(h)]||u[h]||m[h]||i;return n?a.createElement(d,s(s({ref:t},l),{},{components:n})):a.createElement(d,s({ref:t},l))}));function d(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=n.length,s=new Array(i);s[0]=h;var o={};for(var c in t)hasOwnProperty.call(t,c)&&(o[c]=t[c]);o.originalType=e,o[u]="string"==typeof e?e:r,s[1]=o;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>m,frontMatter:()=>i,metadata:()=>o,toc:()=>p});var a=n(7462),r=(n(7294),n(3905));const i={},s="Map-Reduce Chains",o={unversionedId:"chains/map-reduce-chains",id:"chains/map-reduce-chains",title:"Map-Reduce Chains",description:'Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a "map" step, which processes each text chunk independently, and a "reduce" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM\'s context window size.',source:"@site/docs/chains/02-map-reduce-chains.md",sourceDirName:"chains",slug:"/chains/map-reduce-chains",permalink:"/docs/chains/map-reduce-chains",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/chains/02-map-reduce-chains.md",tags:[],version:"current",sidebarPosition:2,frontMatter:{},sidebar:"sidebar",previous:{title:"sequential-chains",permalink:"/docs/chains/sequential-chains"},next:{title:"Conversational Chains",permalink:"/docs/chains/conversational"}},c={},p=[],l={toc:p},u="wrapper";function m(e){let{components:t,...n}=e;return(0,r.kt)(u,(0,a.Z)({},l,n,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"map-reduce-chains"},"Map-Reduce Chains"),(0,r.kt)("p",null,'Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a "map" step, which processes each text chunk independently, and a "reduce" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM\'s context window size.'),(0,r.kt)("p",null,"In this guide, we'll explain how to create and execute a map-reduce chain using an example. The example demonstrates how to summarize a Wikipedia article into bullet points using a two-step process:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},'The "map" step summarizes each chunk of the article into bullet points.'),(0,r.kt)("li",{parentName:"ol"},'The "reduce" step combines all bullet point summaries into a single summary.')),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::chains::map_reduce::Chain;\nuse llm_chain::step::Step;\nuse llm_chain::{executor, parameters, prompt, Parameters};\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor with the default settings\n let exec = executor!()?;\n\n // Create the "map" step to summarize an article into bullet points\n let map_prompt = Step::for_prompt_template(prompt!(\n "You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",\n "Summarize this article into bullet points:\\n{{text}}"\n ));\n\n // Create the "reduce" step to combine multiple summaries into one\n let reduce_prompt = Step::for_prompt_template(prompt!(\n "You are a diligent bot that summarizes text",\n "Please combine the articles below into one summary as bullet points:\\n{{text}}"\n ));\n\n // Create a map-reduce chain with the map and reduce steps\n let chain = Chain::new(map_prompt, reduce_prompt);\n\n // Load the content of the article to be summarized\n let article = include_str!("article_to_summarize.md");\n\n // Create a vector with the Parameters object containing the text of the article\n let docs = vec![parameters!(article)];\n\n // Run the chain with the provided documents and an empty Parameters object for the "reduce" step\n let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();\n\n // Print the result to the console\n println!("{}", res);\n Ok(())\n}\n')),(0,r.kt)("p",null,"In this example, we start by importing the necessary modules and defining the main function. We then create a new ChatGPT executor using the executor!() macro."),(0,r.kt)("p",null,'Next, we create the "map" and "reduce" steps using Step::for_prompt_template(). The "map" step is responsible for summarizing each article chunk, while the "reduce" step combines the summaries into a single output.'),(0,r.kt)("p",null,'After defining the steps, we create a new Chain object by passing in the "map" and "reduce" steps. We then load the content of the article to be summarized and create a Parameters object containing the text.'),(0,r.kt)("p",null,'Finally, we execute the map-reduce chain using the chain.run() method, passing in the documents, an empty Parameters object for the "reduce" step, and the executor. The result is printed to the console.'),(0,r.kt)("p",null,"Map-Reduce chains offer an effective way to handle large documents or multiple documents using LLMs. By breaking the text into manageable chunks and combining the results, you can create efficient pipelines for text processing tasks such as summarization, translation, and analysis."))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/935f2afb.7b3804d9.js b/assets/js/935f2afb.353156f9.js similarity index 62% rename from assets/js/935f2afb.7b3804d9.js rename to assets/js/935f2afb.353156f9.js index b76b13e2..9a1d2c96 100644 --- a/assets/js/935f2afb.7b3804d9.js +++ b/assets/js/935f2afb.353156f9.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[53],{1109:t=>{t.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"sidebar":[{"type":"link","label":"Introduction","href":"/docs/introduction","docId":"introduction"},{"type":"category","label":"Tutorial","collapsible":true,"collapsed":false,"customProps":{"description":"A tutorial for getting started with llm-chain"},"className":"red","items":[{"type":"link","label":"Getting started","href":"/docs/getting-started-tutorial/index","docId":"getting-started-tutorial/index"},{"type":"link","label":"Setting up a project with llm-chain","href":"/docs/getting-started-tutorial/setting-up-a-project","docId":"getting-started-tutorial/setting-up-a-project"},{"type":"link","label":"Generating Your First LLM Output","href":"/docs/getting-started-tutorial/generating-your-first-llm-output","docId":"getting-started-tutorial/generating-your-first-llm-output"},{"type":"link","label":"Using Prompt Templates and Parameters","href":"/docs/getting-started-tutorial/using-prompt-templates-and-parameters","docId":"getting-started-tutorial/using-prompt-templates-and-parameters"},{"type":"link","label":"Creating Your First Sequential Chain","href":"/docs/getting-started-tutorial/building-a-multi-step-chain","docId":"getting-started-tutorial/building-a-multi-step-chain"},{"type":"link","label":"Summarizing Text with Map-Reduce in LLM-Chain","href":"/docs/getting-started-tutorial/summarizing-text-with-map-reduce","docId":"getting-started-tutorial/summarizing-text-with-map-reduce"}],"href":"/docs/category/tutorial"},{"type":"link","label":"Development Setup","href":"/docs/dev-setup","docId":"dev-setup"},{"type":"category","label":"chains","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"What are LLM chains and why are they useful?","href":"/docs/chains/what-are-chains","docId":"chains/what-are-chains"},{"type":"link","label":"sequential-chains","href":"/docs/chains/sequential-chains","docId":"chains/sequential-chains"},{"type":"link","label":"Map-Reduce Chains","href":"/docs/chains/map-reduce-chains","docId":"chains/map-reduce-chains"},{"type":"link","label":"Conversational Chains","href":"/docs/chains/conversational","docId":"chains/conversational"}]},{"type":"link","label":"Tutorial: Getting Started using the LLAMA driver","href":"/docs/llama-tutorial","docId":"llama-tutorial"}]},"docs":{"chains/conversational":{"id":"chains/conversational","title":"Conversational Chains","description":"Conversational chains enable you to have an ongoing conversation with a large language model (LLM). They keep track of the conversation history and manage the context, ensuring that the LLM\'s responses remain relevant and coherent throughout the conversation. Conversational chains are particularly useful for chatbot applications, multi-step interactions, and any scenario where context is essential.","sidebar":"sidebar"},"chains/map-reduce-chains":{"id":"chains/map-reduce-chains","title":"Map-Reduce Chains","description":"Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a \\"map\\" step, which processes each text chunk independently, and a \\"reduce\\" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM\'s context window size.","sidebar":"sidebar"},"chains/sequential-chains":{"id":"chains/sequential-chains","title":"sequential-chains","description":"Sequential Chains","sidebar":"sidebar"},"chains/what-are-chains":{"id":"chains/what-are-chains","title":"What are LLM chains and why are they useful?","description":"Chains are a concept in the world of language models designed to model common patterns for applying large language models (LLMs) to a sequence of tasks. Although the term \\"chain\\" might suggest that it strictly involves chaining together LLM steps, the name has stuck, and it is now used more broadly.","sidebar":"sidebar"},"dev-setup":{"id":"dev-setup","title":"Development Setup","description":"First of all, thank you for considering contributing to our project! \ud83c\udf89 We are delighted to have you here and truly appreciate your interest in making our project even better. Your contributions and ideas are highly valued.","sidebar":"sidebar"},"getting-started-tutorial/building-a-multi-step-chain":{"id":"getting-started-tutorial/building-a-multi-step-chain","title":"Creating Your First Sequential Chain","description":"Having problems? Don\'t worry reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/generating-your-first-llm-output":{"id":"getting-started-tutorial/generating-your-first-llm-output","title":"Generating Your First LLM Output","description":"Having problems? Don\'t worry reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/index":{"id":"getting-started-tutorial/index","title":"Getting started","description":"Welcome to the Getting Started tutorial for llm-chain! This series of articles will guide you through the process of installing, setting up, and using the llm-chain library to make cool applications for LLMs. As you progress through the tutorials, you\'ll learn about generating text, using prompt templates, creating sequential chains, and summarizing text with map-reduce. We hope these tutorials provide you with a solid foundation to build upon and inspire you to create unique and innovative solutions using llm-chain. Let\'s get started!","sidebar":"sidebar"},"getting-started-tutorial/setting-up-a-project":{"id":"getting-started-tutorial/setting-up-a-project","title":"Setting up a project with llm-chain","description":"Having problems? Don\'t worry reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/summarizing-text-with-map-reduce":{"id":"getting-started-tutorial/summarizing-text-with-map-reduce","title":"Summarizing Text with Map-Reduce in LLM-Chain","description":"Having problems? Don\'t worry reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/using-prompt-templates-and-parameters":{"id":"getting-started-tutorial/using-prompt-templates-and-parameters","title":"Using Prompt Templates and Parameters","description":"Having problems? Don\'t worry reach out on discord and we will help you out.","sidebar":"sidebar"},"introduction":{"id":"introduction","title":"Introduction","description":"LLM-chain is a collection of Rust crates designed to help you work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can\'t handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.","sidebar":"sidebar"},"llama-tutorial":{"id":"llama-tutorial","title":"Tutorial: Getting Started using the LLAMA driver","description":"In this tutorial, you will learn how to set up an llm-project using the LLAMA drive. If you wish to use the other drivers you can skip this part of the tutorial.","sidebar":"sidebar"}}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[53],{1109:t=>{t.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"sidebar":[{"type":"link","label":"Introduction","href":"/docs/introduction","docId":"introduction"},{"type":"category","label":"Tutorial","collapsible":true,"collapsed":false,"customProps":{"description":"A tutorial for getting started with llm-chain"},"className":"red","items":[{"type":"link","label":"Getting started","href":"/docs/getting-started-tutorial/index","docId":"getting-started-tutorial/index"},{"type":"link","label":"Setting up a project with llm-chain","href":"/docs/getting-started-tutorial/setting-up-a-project","docId":"getting-started-tutorial/setting-up-a-project"},{"type":"link","label":"Generating Your First LLM Output","href":"/docs/getting-started-tutorial/generating-your-first-llm-output","docId":"getting-started-tutorial/generating-your-first-llm-output"},{"type":"link","label":"Using Prompt Templates and Parameters","href":"/docs/getting-started-tutorial/using-prompt-templates-and-parameters","docId":"getting-started-tutorial/using-prompt-templates-and-parameters"},{"type":"link","label":"Creating Your First Sequential Chain","href":"/docs/getting-started-tutorial/building-a-multi-step-chain","docId":"getting-started-tutorial/building-a-multi-step-chain"},{"type":"link","label":"Summarizing Text with Map-Reduce in LLM-Chain","href":"/docs/getting-started-tutorial/summarizing-text-with-map-reduce","docId":"getting-started-tutorial/summarizing-text-with-map-reduce"}],"href":"/docs/category/tutorial"},{"type":"link","label":"Development Setup","href":"/docs/dev-setup","docId":"dev-setup"},{"type":"category","label":"chains","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"What are LLM chains and why are they useful?","href":"/docs/chains/what-are-chains","docId":"chains/what-are-chains"},{"type":"link","label":"sequential-chains","href":"/docs/chains/sequential-chains","docId":"chains/sequential-chains"},{"type":"link","label":"Map-Reduce Chains","href":"/docs/chains/map-reduce-chains","docId":"chains/map-reduce-chains"},{"type":"link","label":"Conversational Chains","href":"/docs/chains/conversational","docId":"chains/conversational"}]},{"type":"link","label":"Tutorial: Getting Started using the LLAMA driver","href":"/docs/llama-tutorial","docId":"llama-tutorial"}]},"docs":{"chains/conversational":{"id":"chains/conversational","title":"Conversational Chains","description":"Conversational chains enable you to have an ongoing conversation with a large language model (LLM). They keep track of the conversation history and manage the context, ensuring that the LLM\'s responses remain relevant and coherent throughout the conversation. Conversational chains are particularly useful for chatbot applications, multi-step interactions, and any scenario where context is essential.","sidebar":"sidebar"},"chains/map-reduce-chains":{"id":"chains/map-reduce-chains","title":"Map-Reduce Chains","description":"Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a \\"map\\" step, which processes each text chunk independently, and a \\"reduce\\" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM\'s context window size.","sidebar":"sidebar"},"chains/sequential-chains":{"id":"chains/sequential-chains","title":"sequential-chains","description":"Sequential Chains","sidebar":"sidebar"},"chains/what-are-chains":{"id":"chains/what-are-chains","title":"What are LLM chains and why are they useful?","description":"Chains are a concept in the world of language models designed to model common patterns for applying large language models (LLMs) to a sequence of tasks. Although the term \\"chain\\" might suggest that it strictly involves chaining together LLM steps, the name has stuck, and it is now used more broadly.","sidebar":"sidebar"},"dev-setup":{"id":"dev-setup","title":"Development Setup","description":"First of all, thank you for considering contributing to our project! \ud83c\udf89 We are delighted to have you here and truly appreciate your interest in making our project even better. Your contributions and ideas are highly valued.","sidebar":"sidebar"},"getting-started-tutorial/building-a-multi-step-chain":{"id":"getting-started-tutorial/building-a-multi-step-chain","title":"Creating Your First Sequential Chain","description":"Having problems? Don\'t worry, reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/generating-your-first-llm-output":{"id":"getting-started-tutorial/generating-your-first-llm-output","title":"Generating Your First LLM Output","description":"Having problems? Don\'t worry, reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/index":{"id":"getting-started-tutorial/index","title":"Getting started","description":"Welcome to the Getting Started tutorial for llm-chain! This series of articles will guide you through the process of installing, setting up, and using the llm-chain library to make cool applications for LLMs. As you progress through the tutorials, you\'ll learn about generating text, using prompt templates, creating sequential chains, and summarizing text with map-reduce. We hope these tutorials provide you with a solid foundation to build upon and inspire you to create unique and innovative solutions using llm-chain. Let\'s get started!","sidebar":"sidebar"},"getting-started-tutorial/setting-up-a-project":{"id":"getting-started-tutorial/setting-up-a-project","title":"Setting up a project with llm-chain","description":"Having problems? Don\'t worry, reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/summarizing-text-with-map-reduce":{"id":"getting-started-tutorial/summarizing-text-with-map-reduce","title":"Summarizing Text with Map-Reduce in LLM-Chain","description":"Having problems? Don\'t worry, reach out on discord and we will help you out.","sidebar":"sidebar"},"getting-started-tutorial/using-prompt-templates-and-parameters":{"id":"getting-started-tutorial/using-prompt-templates-and-parameters","title":"Using Prompt Templates and Parameters","description":"Having problems? Don\'t worry, reach out on discord and we will help you out.","sidebar":"sidebar"},"introduction":{"id":"introduction","title":"Introduction","description":"LLM-chain is a collection of Rust crates designed to help you work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can\'t handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.","sidebar":"sidebar"},"llama-tutorial":{"id":"llama-tutorial","title":"Tutorial: Getting Started using the LLAMA driver","description":"In this tutorial, you will learn how to set up an llm-project using the LLAMA drive. If you wish to use the other drivers you can skip this part of the tutorial.","sidebar":"sidebar"}}}')}}]); \ No newline at end of file diff --git a/assets/js/985a6965.3ed0935e.js b/assets/js/985a6965.3ed0935e.js deleted file mode 100644 index aed5b8c0..00000000 --- a/assets/js/985a6965.3ed0935e.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[4086],{3905:(e,t,a)=>{a.d(t,{Zo:()=>c,kt:()=>d});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var s=n.createContext({}),p=function(e){var t=n.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},c=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,i=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),u=p(a),h=r,d=u["".concat(s,".").concat(h)]||u[h]||m[h]||i;return a?n.createElement(d,o(o({ref:t},c),{},{components:a})):n.createElement(d,o({ref:t},c))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=a.length,o=new Array(i);o[0]=h;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[u]="string"==typeof e?e:r,o[1]=l;for(var p=2;p{a.r(t),a.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>m,frontMatter:()=>i,metadata:()=>l,toc:()=>p});var n=a(7462),r=(a(7294),a(3905));const i={},o="Creating Your First Sequential Chain",l={unversionedId:"getting-started-tutorial/building-a-multi-step-chain",id:"getting-started-tutorial/building-a-multi-step-chain",title:"Creating Your First Sequential Chain",description:"Having problems? Don't worry reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/04-building-a-multi-step-chain.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/building-a-multi-step-chain",permalink:"/docs/getting-started-tutorial/building-a-multi-step-chain",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/04-building-a-multi-step-chain.md",tags:[],version:"current",sidebarPosition:4,frontMatter:{},sidebar:"sidebar",previous:{title:"Using Prompt Templates and Parameters",permalink:"/docs/getting-started-tutorial/using-prompt-templates-and-parameters"},next:{title:"Summarizing Text with Map-Reduce in LLM-Chain",permalink:"/docs/getting-started-tutorial/summarizing-text-with-map-reduce"}},s={},p=[{value:"Best Practices and Tips",id:"best-practices-and-tips",level:2}],c={toc:p},u="wrapper";function m(e){let{components:t,...a}=e;return(0,r.kt)(u,(0,n.Z)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"creating-your-first-sequential-chain"},"Creating Your First Sequential Chain"),(0,r.kt)("admonition",{type:"tip"},(0,r.kt)("p",{parentName:"admonition"},"Having problems? Don't worry reach out on ",(0,r.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,r.kt)("p",null,"Sequential chains in LLM-Chain allow you to execute a series of steps, with the output of each step feeding into the next one. This tutorial will guide you through creating a sequential chain, extending it with more steps, and provide some best practices and tips."),(0,r.kt)("p",null,"Here's a Rust program that demonstrates how to create a sequential chain:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::parameters;\nuse llm_chain::step::Step;\nuse llm_chain::traits::Executor as ExecutorTrait;\nuse llm_chain::{chains::sequential::Chain, prompt};\nuse llm_chain_openai::chatgpt::Executor;\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor with the default settings\n let exec = Executor::new()?;\n\n // Create a chain of steps with two prompts\n let chain: Chain = Chain::new(vec![\n // First step: make a personalized birthday email\n Step::for_prompt_template(\n prompt!("You are a bot for making personalized greetings", "Make personalized birthday e-mail to the whole company for {{name}} who has their birthday on {{date}}. Include their name")\n ),\n\n // Second step: summarize the email into a tweet. Importantly, the text parameter becomes the result of the previous prompt.\n Step::for_prompt_template(\n prompt!( "You are an assistant for managing social media accounts for a company", "Summarize this email into a tweet to be sent by the company, use emoji if you can. \\n--\\n{{text}}")\n )\n ]);\n\n // Run the chain with the provided parameters\n let res = chain\n .run(\n // Create a Parameters object with key-value pairs for the placeholders\n parameters!("name" => "Emil", "date" => "February 30th 2023"),\n &exec,\n )\n .await\n .unwrap();\n\n // Print the result to the console\n println!("{}", res.to_immediate().await?.as_content());\n Ok(())\n}\n')),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We start by importing the necessary modules from the ",(0,r.kt)("inlineCode",{parentName:"p"},"llm_chain")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"llm_chain_openai")," libraries.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"The main async function is defined, using Tokio as the runtime.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We create a new ",(0,r.kt)("inlineCode",{parentName:"p"},"Executor")," with the default settings.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We create a ",(0,r.kt)("inlineCode",{parentName:"p"},"Chain")," that contains two steps, each with a different prompt:"),(0,r.kt)("ul",{parentName:"li"},(0,r.kt)("li",{parentName:"ul"},"The first step has a prompt to make a personalized birthday email for a company."),(0,r.kt)("li",{parentName:"ul"},"The second step has a prompt to summarize the email into a tweet.")),(0,r.kt)("p",{parentName:"li"},"Both prompts use placeholders (e.g., ",(0,r.kt)("inlineCode",{parentName:"p"},"{{name}}"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"{{date}}"),", and ",(0,r.kt)("inlineCode",{parentName:"p"},"{{text}}"),") that will be replaced with specific values later. Importantly the value of ",(0,r.kt)("inlineCode",{parentName:"p"},"{{text}}")," will replaced by result of the first step in the chain.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We run the ",(0,r.kt)("inlineCode",{parentName:"p"},"Chain")," with the provided parameters:"),(0,r.kt)("ul",{parentName:"li"},(0,r.kt)("li",{parentName:"ul"},"We create a ",(0,r.kt)("inlineCode",{parentName:"li"},"Parameters")," object with key-value pairs for the placeholders: ",(0,r.kt)("inlineCode",{parentName:"li"},'("name", "Emil")')," and ",(0,r.kt)("inlineCode",{parentName:"li"},'("date", "February 30th 2023")'),"."),(0,r.kt)("li",{parentName:"ul"},"We pass the ",(0,r.kt)("inlineCode",{parentName:"li"},"Parameters")," object and the ",(0,r.kt)("inlineCode",{parentName:"li"},"Executor")," to the ",(0,r.kt)("inlineCode",{parentName:"li"},"run()")," method."))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We unwrap the result and print it to the console."))),(0,r.kt)("h2",{id:"best-practices-and-tips"},"Best Practices and Tips"),(0,r.kt)("p",null,"When working with sequential chains, consider the following tips and best practices:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},"Use descriptive and clear instructions for the system role to help guide the LLM."),(0,r.kt)("li",{parentName:"ol"},"Keep the chain as short and simple as possible. Longer chains are harder to manage and debug."),(0,r.kt)("li",{parentName:"ol"},"Test each step independently before in")),(0,r.kt)("p",null,"For the next tutorial we will switch our focus from sequential to map-reduce chains. Map reduce chains are more complicated than sequential chains but allow us to do things that sequential chains can't. Stay tuned!"))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/985a6965.c3a8b5f7.js b/assets/js/985a6965.c3a8b5f7.js new file mode 100644 index 00000000..41512b94 --- /dev/null +++ b/assets/js/985a6965.c3a8b5f7.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[4086],{3905:(e,t,a)=>{a.d(t,{Zo:()=>c,kt:()=>d});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var s=n.createContext({}),p=function(e){var t=n.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},c=function(e){var t=p(e.components);return n.createElement(s.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,i=e.originalType,s=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),u=p(a),h=r,d=u["".concat(s,".").concat(h)]||u[h]||m[h]||i;return a?n.createElement(d,o(o({ref:t},c),{},{components:a})):n.createElement(d,o({ref:t},c))}));function d(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=a.length,o=new Array(i);o[0]=h;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[u]="string"==typeof e?e:r,o[1]=l;for(var p=2;p{a.r(t),a.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>m,frontMatter:()=>i,metadata:()=>l,toc:()=>p});var n=a(7462),r=(a(7294),a(3905));const i={},o="Creating Your First Sequential Chain",l={unversionedId:"getting-started-tutorial/building-a-multi-step-chain",id:"getting-started-tutorial/building-a-multi-step-chain",title:"Creating Your First Sequential Chain",description:"Having problems? Don't worry, reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/04-building-a-multi-step-chain.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/building-a-multi-step-chain",permalink:"/docs/getting-started-tutorial/building-a-multi-step-chain",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/04-building-a-multi-step-chain.md",tags:[],version:"current",sidebarPosition:4,frontMatter:{},sidebar:"sidebar",previous:{title:"Using Prompt Templates and Parameters",permalink:"/docs/getting-started-tutorial/using-prompt-templates-and-parameters"},next:{title:"Summarizing Text with Map-Reduce in LLM-Chain",permalink:"/docs/getting-started-tutorial/summarizing-text-with-map-reduce"}},s={},p=[{value:"Best Practices and Tips",id:"best-practices-and-tips",level:2}],c={toc:p},u="wrapper";function m(e){let{components:t,...a}=e;return(0,r.kt)(u,(0,n.Z)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"creating-your-first-sequential-chain"},"Creating Your First Sequential Chain"),(0,r.kt)("admonition",{type:"tip"},(0,r.kt)("p",{parentName:"admonition"},"Having problems? Don't worry, reach out on ",(0,r.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,r.kt)("p",null,"Sequential chains in LLM-Chain allow you to execute a series of steps, with the output of each step feeding into the next one. This tutorial will guide you through creating a sequential chain, extending it with more steps, and provide some best practices and tips."),(0,r.kt)("p",null,"Here's a Rust program that demonstrates how to create a sequential chain:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::parameters;\nuse llm_chain::step::Step;\nuse llm_chain::traits::Executor as ExecutorTrait;\nuse llm_chain::{chains::sequential::Chain, prompt};\nuse llm_chain_openai::chatgpt::Executor;\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor with the default settings\n let exec = Executor::new()?;\n\n // Create a chain of steps with two prompts\n let chain: Chain = Chain::new(vec![\n // First step: make a personalized birthday email\n Step::for_prompt_template(\n prompt!("You are a bot for making personalized greetings", "Make personalized birthday e-mail to the whole company for {{name}} who has their birthday on {{date}}. Include their name")\n ),\n\n // Second step: summarize the email into a tweet. Importantly, the text parameter becomes the result of the previous prompt.\n Step::for_prompt_template(\n prompt!( "You are an assistant for managing social media accounts for a company", "Summarize this email into a tweet to be sent by the company, use emoji if you can. \\n--\\n{{text}}")\n )\n ]);\n\n // Run the chain with the provided parameters\n let res = chain\n .run(\n // Create a Parameters object with key-value pairs for the placeholders\n parameters!("name" => "Emil", "date" => "February 30th 2023"),\n &exec,\n )\n .await\n .unwrap();\n\n // Print the result to the console\n println!("{}", res.to_immediate().await?.as_content());\n Ok(())\n}\n')),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We start by importing the necessary modules from the ",(0,r.kt)("inlineCode",{parentName:"p"},"llm_chain")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"llm_chain_openai")," libraries.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"The main async function is defined, using Tokio as the runtime.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We create a new ",(0,r.kt)("inlineCode",{parentName:"p"},"Executor")," with the default settings.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We create a ",(0,r.kt)("inlineCode",{parentName:"p"},"Chain")," that contains two steps, each with a different prompt:"),(0,r.kt)("ul",{parentName:"li"},(0,r.kt)("li",{parentName:"ul"},"The first step has a prompt to make a personalized birthday email for a company."),(0,r.kt)("li",{parentName:"ul"},"The second step has a prompt to summarize the email into a tweet.")),(0,r.kt)("p",{parentName:"li"},"Both prompts use placeholders (e.g., ",(0,r.kt)("inlineCode",{parentName:"p"},"{{name}}"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"{{date}}"),", and ",(0,r.kt)("inlineCode",{parentName:"p"},"{{text}}"),") that will be replaced with specific values later. Importantly, the value of ",(0,r.kt)("inlineCode",{parentName:"p"},"{{text}}")," will be replaced by result of the first step in the chain.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We run the ",(0,r.kt)("inlineCode",{parentName:"p"},"Chain")," with the provided parameters:"),(0,r.kt)("ul",{parentName:"li"},(0,r.kt)("li",{parentName:"ul"},"We create a ",(0,r.kt)("inlineCode",{parentName:"li"},"Parameters")," object with key-value pairs for the placeholders: ",(0,r.kt)("inlineCode",{parentName:"li"},'("name", "Emil")')," and ",(0,r.kt)("inlineCode",{parentName:"li"},'("date", "February 30th 2023")'),"."),(0,r.kt)("li",{parentName:"ul"},"We pass the ",(0,r.kt)("inlineCode",{parentName:"li"},"Parameters")," object and the ",(0,r.kt)("inlineCode",{parentName:"li"},"Executor")," to the ",(0,r.kt)("inlineCode",{parentName:"li"},"run()")," method."))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"We unwrap the result and print it to the console."))),(0,r.kt)("h2",{id:"best-practices-and-tips"},"Best Practices and Tips"),(0,r.kt)("p",null,"When working with sequential chains, consider the following tips and best practices:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},"Use descriptive and clear instructions for the system role to help guide the LLM."),(0,r.kt)("li",{parentName:"ol"},"Keep the chain as short and simple as possible. Longer chains are harder to manage and debug."),(0,r.kt)("li",{parentName:"ol"},"Test each step independently before testing the entire sequence.")),(0,r.kt)("p",null,"For the next tutorial we will switch our focus from sequential to map-reduce chains. Map reduce chains are more complicated than sequential chains but allow us to do things that sequential chains can't. Stay tuned!"))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/b29c0d53.3f13f96e.js b/assets/js/b29c0d53.3f13f96e.js new file mode 100644 index 00000000..05ba7355 --- /dev/null +++ b/assets/js/b29c0d53.3f13f96e.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7325],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>h});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function o(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var s=r.createContext({}),u=function(e){var t=r.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},p=function(e){var t=u(e.components);return r.createElement(s.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},g=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),c=u(n),g=a,h=c["".concat(s,".").concat(g)]||c[g]||d[g]||i;return n?r.createElement(h,o(o({ref:t},p),{},{components:n})):r.createElement(h,o({ref:t},p))}));function h(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=n.length,o=new Array(i);o[0]=g;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[c]="string"==typeof e?e:a,o[1]=l;for(var u=2;u{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>d,frontMatter:()=>i,metadata:()=>l,toc:()=>u});var r=n(7462),a=(n(7294),n(3905));const i={},o="Setting up a project with llm-chain",l={unversionedId:"getting-started-tutorial/setting-up-a-project",id:"getting-started-tutorial/setting-up-a-project",title:"Setting up a project with llm-chain",description:"Having problems? Don't worry, reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/01-setting-up-a-project.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/setting-up-a-project",permalink:"/docs/getting-started-tutorial/setting-up-a-project",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/01-setting-up-a-project.md",tags:[],version:"current",sidebarPosition:1,frontMatter:{},sidebar:"sidebar",previous:{title:"Getting started",permalink:"/docs/getting-started-tutorial/index"},next:{title:"Generating Your First LLM Output",permalink:"/docs/getting-started-tutorial/generating-your-first-llm-output"}},s={},u=[{value:"Installing Rust",id:"installing-rust",level:2},{value:"Creating a New Rust Project",id:"creating-a-new-rust-project",level:2},{value:"Installing LLM-Chain",id:"installing-llm-chain",level:2},{value:"Choosing a Driver: LLAMA vs OpenAI",id:"choosing-a-driver-llama-vs-openai",level:2}],p={toc:u},c="wrapper";function d(e){let{components:t,...n}=e;return(0,a.kt)(c,(0,r.Z)({},p,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"setting-up-a-project-with-llm-chain"},"Setting up a project with llm-chain"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry, reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"Welcome to llm-chain, a Rust library designed to simplify working with large language models (LLMs) and help you create powerful applications. In this tutorial, we'll walk you through installing Rust, setting up a new project, and getting started with LLM-Chain."),(0,a.kt)("h2",{id:"installing-rust"},"Installing Rust"),(0,a.kt)("p",null,"To begin, you'll need to install Rust on your machine. We recommend using ",(0,a.kt)("a",{parentName:"p",href:"https://rustup.rs/"},"rustup")," , the official Rust toolchain manager, to ensure you have the latest version and can manage your installations easily."),(0,a.kt)("p",null,"You need Rust 1.65.0 or higher. If you see errors about unstable feature or dependencies requiring newer Rust version, please update your Rust version."),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},"Follow the instructions on the ",(0,a.kt)("a",{parentName:"li",href:"https://rustup.rs/"},"rustup website")," to install Rust.")),(0,a.kt)("h2",{id:"creating-a-new-rust-project"},"Creating a New Rust Project"),(0,a.kt)("p",null,"Now that you have Rust installed, it's time to create a new Rust project. Run the following command to set up a new binary project:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"\ncargo new --bin my-llm-project\n")),(0,a.kt)("p",null,"This command will create a new directory called ",(0,a.kt)("inlineCode",{parentName:"p"},"my-llm-project")," with the necessary files and directories for a Rust project."),(0,a.kt)("h2",{id:"installing-llm-chain"},"Installing LLM-Chain"),(0,a.kt)("p",null,"With your Rust project set up, it's time to add LLM-Chain as a dependency. To do this, run the following command:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"\ncd my-llm-project\ncargo add llm-chain\n")),(0,a.kt)("p",null,"This will add LLM-Chain to your project's ",(0,a.kt)("inlineCode",{parentName:"p"},"Cargo.toml")," file."),(0,a.kt)("h2",{id:"choosing-a-driver-llama-vs-openai"},"Choosing a Driver: LLAMA vs OpenAI"),(0,a.kt)("p",null,"LLM-Chain supports multiple drivers for working with different LLMs. You can choose between the LLAMA driver (which runs a LLaMA LLM on your computer) and the OpenAI driver (which connects to the OpenAI API). For ease of use and getting started quickly, we'll be using the OpenAI driver in this tutorial. To install it run"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"cargo add llm-chain-openai\n")),(0,a.kt)("p",null,"In the next tutorial, we'll cover generating your first LLM output using the OpenAI driver."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/b29c0d53.9d1957cf.js b/assets/js/b29c0d53.9d1957cf.js deleted file mode 100644 index 0e9183ca..00000000 --- a/assets/js/b29c0d53.9d1957cf.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7325],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>h});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function o(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var s=r.createContext({}),u=function(e){var t=r.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},p=function(e){var t=u(e.components);return r.createElement(s.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},g=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,i=e.originalType,s=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),c=u(n),g=a,h=c["".concat(s,".").concat(g)]||c[g]||d[g]||i;return n?r.createElement(h,o(o({ref:t},p),{},{components:n})):r.createElement(h,o({ref:t},p))}));function h(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var i=n.length,o=new Array(i);o[0]=g;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[c]="string"==typeof e?e:a,o[1]=l;for(var u=2;u{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>o,default:()=>d,frontMatter:()=>i,metadata:()=>l,toc:()=>u});var r=n(7462),a=(n(7294),n(3905));const i={},o="Setting up a project with llm-chain",l={unversionedId:"getting-started-tutorial/setting-up-a-project",id:"getting-started-tutorial/setting-up-a-project",title:"Setting up a project with llm-chain",description:"Having problems? Don't worry reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/01-setting-up-a-project.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/setting-up-a-project",permalink:"/docs/getting-started-tutorial/setting-up-a-project",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/01-setting-up-a-project.md",tags:[],version:"current",sidebarPosition:1,frontMatter:{},sidebar:"sidebar",previous:{title:"Getting started",permalink:"/docs/getting-started-tutorial/index"},next:{title:"Generating Your First LLM Output",permalink:"/docs/getting-started-tutorial/generating-your-first-llm-output"}},s={},u=[{value:"Installing Rust",id:"installing-rust",level:2},{value:"Creating a New Rust Project",id:"creating-a-new-rust-project",level:2},{value:"Installing LLM-Chain",id:"installing-llm-chain",level:2},{value:"Choosing a Driver: LLAMA vs OpenAI",id:"choosing-a-driver-llama-vs-openai",level:2}],p={toc:u},c="wrapper";function d(e){let{components:t,...n}=e;return(0,a.kt)(c,(0,r.Z)({},p,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"setting-up-a-project-with-llm-chain"},"Setting up a project with llm-chain"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"Welcome to llm-chain, a Rust library designed to simplify working with large language models (LLMs) and help you create powerful applications. In this tutorial, we'll walk you through installing Rust, setting up a new project, and getting started with LLM-Chain."),(0,a.kt)("h2",{id:"installing-rust"},"Installing Rust"),(0,a.kt)("p",null,"To begin, you'll need to install Rust on your machine. We recommend using ",(0,a.kt)("a",{parentName:"p",href:"https://rustup.rs/"},"rustup")," , the official Rust toolchain manager, to ensure you have the latest version and can manage your installations easily."),(0,a.kt)("p",null,"You need Rust 1.65.0 or higher. If you see errors about unstable feature or dependencies requiring newer Rust version, please update your Rust version."),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},"Follow the instructions on the ",(0,a.kt)("a",{parentName:"li",href:"https://rustup.rs/"},"rustup website")," to install Rust.")),(0,a.kt)("h2",{id:"creating-a-new-rust-project"},"Creating a New Rust Project"),(0,a.kt)("p",null,"Now that you have Rust installed, it's time to create a new Rust project. Run the following command to set up a new binary project:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"\ncargo new --bin my-llm-project\n")),(0,a.kt)("p",null,"This command will create a new directory called ",(0,a.kt)("inlineCode",{parentName:"p"},"my-llm-project")," with the necessary files and directories for a Rust project."),(0,a.kt)("h2",{id:"installing-llm-chain"},"Installing LLM-Chain"),(0,a.kt)("p",null,"With your Rust project set up, it's time to add LLM-Chain as a dependency. To do this, run the following command:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"\ncd my-llm-project\ncargo add llm-chain\n")),(0,a.kt)("p",null,"This will add LLM-Chain to your project's ",(0,a.kt)("inlineCode",{parentName:"p"},"Cargo.toml")," file."),(0,a.kt)("h2",{id:"choosing-a-driver-llama-vs-openai"},"Choosing a Driver: LLAMA vs OpenAI"),(0,a.kt)("p",null,"LLM-Chain supports multiple drivers for working with different LLMs. You can choose between the LLAMA driver (which runs a LLaMA LLM on your computer) and the OpenAI driver (which connects to the OpenAI API). For ease of use and getting started quickly, we'll be using the OpenAI driver in this tutorial. To install it run"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"cargo add llm-chain-openai\n")),(0,a.kt)("p",null,"In the next tutorial, we'll cover generating your first LLM output using the OpenAI driver."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/dcf666ca.7afee14b.js b/assets/js/dcf666ca.7afee14b.js deleted file mode 100644 index 15f2a6c2..00000000 --- a/assets/js/dcf666ca.7afee14b.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2978],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>g});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var l=r.createContext({}),u=function(e){var t=r.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},p=function(e){var t=u(e.components);return r.createElement(l.Provider,{value:t},e.children)},c="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),c=u(n),d=a,g=c["".concat(l,".").concat(d)]||c[d]||m[d]||o;return n?r.createElement(g,i(i({ref:t},p),{},{components:n})):r.createElement(g,i({ref:t},p))}));function g(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[c]="string"==typeof e?e:a,i[1]=s;for(var u=2;u{n.r(t),n.d(t,{assets:()=>l,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>s,toc:()=>u});var r=n(7462),a=(n(7294),n(3905));const o={},i="Generating Your First LLM Output",s={unversionedId:"getting-started-tutorial/generating-your-first-llm-output",id:"getting-started-tutorial/generating-your-first-llm-output",title:"Generating Your First LLM Output",description:"Having problems? Don't worry reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/02-generating-your-first-llm-output.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/generating-your-first-llm-output",permalink:"/docs/getting-started-tutorial/generating-your-first-llm-output",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/02-generating-your-first-llm-output.md",tags:[],version:"current",sidebarPosition:2,frontMatter:{},sidebar:"sidebar",previous:{title:"Setting up a project with llm-chain",permalink:"/docs/getting-started-tutorial/setting-up-a-project"},next:{title:"Using Prompt Templates and Parameters",permalink:"/docs/getting-started-tutorial/using-prompt-templates-and-parameters"}},l={},u=[{value:"Understanding LLM Response",id:"understanding-llm-response",level:2},{value:"Error Handling and Common Issues",id:"error-handling-and-common-issues",level:2}],p={toc:u},c="wrapper";function m(e){let{components:t,...n}=e;return(0,a.kt)(c,(0,r.Z)({},p,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"generating-your-first-llm-output"},"Generating Your First LLM Output"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"First, we need to install ",(0,a.kt)("inlineCode",{parentName:"p"},"tokio")," in our project. Since this is a tutorial we will install the full ",(0,a.kt)("inlineCode",{parentName:"p"},"tokio")," package crate, in production, of course we should be more selective with what features we install."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"cargo add tokio --features full\n")),(0,a.kt)("p",null,"First, let's start by writing a simple Rust program that generates an LLM output using LLM-Chain and the OpenAI driver:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::{executor, parameters, prompt};\n\n// Declare an async main function\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor\n let exec = executor!()?;\n // Create our prompt...\n let res = prompt!(\n "You are a robot assistant for making personalized greetings",\n "Make a personalized greeting for Joe"\n )\n .run(¶meters!(), &exec) // ...and run it\n .await?;\n println!("{}", res);\n Ok(())\n}\n')),(0,a.kt)("h2",{id:"understanding-llm-response"},"Understanding LLM Response"),(0,a.kt)("p",null,"When you run the program, you'll receive an LLM response. The response contains the generated text and other metadata."),(0,a.kt)("h2",{id:"error-handling-and-common-issues"},"Error Handling and Common Issues"),(0,a.kt)("p",null,"One common issue you might encounter is forgetting to set the OpenAI API key. Make sure you have set the API key in your ",(0,a.kt)("inlineCode",{parentName:"p"},"OPENAI_API_KEY")," environment variable."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},'export OPENAI_API_KEY="YOUR_OPEN_AI_KEY" # TIP: It stars with sk-\n')),(0,a.kt)("p",null,"If you don't want to set enviroment variable or want to multiple api-keys. Then you can use a different macro like this. "),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::{executor, options, parameters, prompt};\nuse tokio;\n\n// Declare an async main function\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor\n let options = options! {\n ApiKey: "sk-proj-..."\n };\n\n let exec = executor!(chatgpt, options);\n match exec {\n Ok(exec) => {\n \n let res = prompt!(\n "You are a robot assistant for making personalized greetings",\n "Make a personalized greeting for Joe"\n )\n .run(¶meters!(), &exec) // ...and run it\n .await?;\n println!("{}", res);\n }\n Err(err) => panic!("Unable to create executor: {}", err),\n }\n // Create our step containing our prompt template\n\n Ok(())\n}\n\n')),(0,a.kt)("p",null,"In the next tutorial, we'll cover adding parameters to customize the LLM prompt to create more complicated interactions."))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/dcf666ca.feec4d42.js b/assets/js/dcf666ca.feec4d42.js new file mode 100644 index 00000000..f3cc7206 --- /dev/null +++ b/assets/js/dcf666ca.feec4d42.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2978],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>g});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var l=r.createContext({}),u=function(e){var t=r.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},p=function(e){var t=u(e.components);return r.createElement(l.Provider,{value:t},e.children)},c="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),c=u(n),d=a,g=c["".concat(l,".").concat(d)]||c[d]||m[d]||o;return n?r.createElement(g,i(i({ref:t},p),{},{components:n})):r.createElement(g,i({ref:t},p))}));function g(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[c]="string"==typeof e?e:a,i[1]=s;for(var u=2;u{n.r(t),n.d(t,{assets:()=>l,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>s,toc:()=>u});var r=n(7462),a=(n(7294),n(3905));const o={},i="Generating Your First LLM Output",s={unversionedId:"getting-started-tutorial/generating-your-first-llm-output",id:"getting-started-tutorial/generating-your-first-llm-output",title:"Generating Your First LLM Output",description:"Having problems? Don't worry, reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/02-generating-your-first-llm-output.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/generating-your-first-llm-output",permalink:"/docs/getting-started-tutorial/generating-your-first-llm-output",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/02-generating-your-first-llm-output.md",tags:[],version:"current",sidebarPosition:2,frontMatter:{},sidebar:"sidebar",previous:{title:"Setting up a project with llm-chain",permalink:"/docs/getting-started-tutorial/setting-up-a-project"},next:{title:"Using Prompt Templates and Parameters",permalink:"/docs/getting-started-tutorial/using-prompt-templates-and-parameters"}},l={},u=[{value:"Understanding LLM Response",id:"understanding-llm-response",level:2},{value:"Error Handling and Common Issues",id:"error-handling-and-common-issues",level:2}],p={toc:u},c="wrapper";function m(e){let{components:t,...n}=e;return(0,a.kt)(c,(0,r.Z)({},p,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"generating-your-first-llm-output"},"Generating Your First LLM Output"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry, reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"First, we need to install ",(0,a.kt)("inlineCode",{parentName:"p"},"tokio")," in our project. Since this is a tutorial we will install the full ",(0,a.kt)("inlineCode",{parentName:"p"},"tokio")," package crate, in production, of course we should be more selective with what features we install."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"cargo add tokio --features full\n")),(0,a.kt)("p",null,"First, let's start by writing a simple Rust program that generates an LLM output using LLM-Chain and the OpenAI driver:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::{executor, parameters, prompt};\n\n// Declare an async main function\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor\n let exec = executor!()?;\n // Create our prompt...\n let res = prompt!(\n "You are a robot assistant for making personalized greetings",\n "Make a personalized greeting for Joe"\n )\n .run(¶meters!(), &exec) // ...and run it\n .await?;\n println!("{}", res);\n Ok(())\n}\n')),(0,a.kt)("h2",{id:"understanding-llm-response"},"Understanding LLM Response"),(0,a.kt)("p",null,"When you run the program, you'll receive an LLM response. The response contains the generated text and other metadata."),(0,a.kt)("h2",{id:"error-handling-and-common-issues"},"Error Handling and Common Issues"),(0,a.kt)("p",null,"One common issue you might encounter is forgetting to set the OpenAI API key. Make sure you have set the API key in your ",(0,a.kt)("inlineCode",{parentName:"p"},"OPENAI_API_KEY")," environment variable."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},'export OPENAI_API_KEY="YOUR_OPEN_AI_KEY" # TIP: It stars with sk-\n')),(0,a.kt)("p",null,"If you don't want to set enviroment variable or want to multiple api-keys. Then you can use a different macro like this. "),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::{executor, options, parameters, prompt};\nuse tokio;\n\n// Declare an async main function\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor\n let options = options! {\n ApiKey: "sk-proj-..."\n };\n\n let exec = executor!(chatgpt, options);\n match exec {\n Ok(exec) => {\n \n let res = prompt!(\n "You are a robot assistant for making personalized greetings",\n "Make a personalized greeting for Joe"\n )\n .run(¶meters!(), &exec) // ...and run it\n .await?;\n println!("{}", res);\n }\n Err(err) => panic!("Unable to create executor: {}", err),\n }\n // Create our step containing our prompt template\n\n Ok(())\n}\n\n')),(0,a.kt)("p",null,"In the next tutorial, we'll cover adding parameters to customize the LLM prompt to create more complicated interactions."))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/f988011b.38401c8d.js b/assets/js/f988011b.38401c8d.js deleted file mode 100644 index 07f3e000..00000000 --- a/assets/js/f988011b.38401c8d.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7393],{3905:(e,t,r)=>{r.d(t,{Zo:()=>m,kt:()=>g});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=n.createContext({}),s=function(e){var t=n.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},m=function(e){var t=s(e.components);return n.createElement(p.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,p=e.parentName,m=l(e,["components","mdxType","originalType","parentName"]),u=s(r),d=a,g=u["".concat(p,".").concat(d)]||u[d]||c[d]||o;return r?n.createElement(g,i(i({ref:t},m),{},{components:r})):n.createElement(g,i({ref:t},m))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>c,frontMatter:()=>o,metadata:()=>l,toc:()=>s});var n=r(7462),a=(r(7294),r(3905));const o={},i="Using Prompt Templates and Parameters",l={unversionedId:"getting-started-tutorial/using-prompt-templates-and-parameters",id:"getting-started-tutorial/using-prompt-templates-and-parameters",title:"Using Prompt Templates and Parameters",description:"Having problems? Don't worry reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/03-using-prompt-templates-and-parameters.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/using-prompt-templates-and-parameters",permalink:"/docs/getting-started-tutorial/using-prompt-templates-and-parameters",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/03-using-prompt-templates-and-parameters.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{},sidebar:"sidebar",previous:{title:"Generating Your First LLM Output",permalink:"/docs/getting-started-tutorial/generating-your-first-llm-output"},next:{title:"Creating Your First Sequential Chain",permalink:"/docs/getting-started-tutorial/building-a-multi-step-chain"}},p={},s=[],m={toc:s},u="wrapper";function c(e){let{components:t,...r}=e;return(0,a.kt)(u,(0,n.Z)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"using-prompt-templates-and-parameters"},"Using Prompt Templates and Parameters"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"In this part of the tutorial series, we'll explore how to use prompt templates and parameters with llm-chain. Prompt templates allow you to create dynamic prompts, and parameters are the text strings you put into your templates."),(0,a.kt)("p",null,"Here's a simple Rust program demonstrating how to use prompt templates and parameters:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::{executor, parameters, prompt, step::Step};\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor\n let exec = executor!()?;\n // Create our step containing our prompt template\n let step = Step::for_prompt_template(prompt!(\n "You are a bot for making personalized greetings",\n "Make a personalized greeting tweet for {{text}}" // Text is the default parameter name, but you can use whatever you want\n ));\n\n // A greeting for emil!\n let res = step.run(¶meters!("Emil"), &exec).await?;\n println!("{}", res);\n\n // A greeting for you\n let res = step.run(¶meters!("Your Name Here"), &exec).await?;\n\n println!("{}", res.to_immediate().await?.as_content());\n\n Ok(())\n}\n\n')),(0,a.kt)("p",null,"Let's break down the different parts of the code:"),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},"We start with importing the necessary libraries, including the traits and structs required for our program."),(0,a.kt)("li",{parentName:"ol"},"The main async function is defined, using Tokio as the runtime."),(0,a.kt)("li",{parentName:"ol"},"We create a new ",(0,a.kt)("inlineCode",{parentName:"li"},"Executor")," with the default settings."),(0,a.kt)("li",{parentName:"ol"},"A ",(0,a.kt)("inlineCode",{parentName:"li"},"Step")," is created containing our prompt template with a placeholder (",(0,a.kt)("inlineCode",{parentName:"li"},"{{text}}"),") that will be replaced with a specific value later."),(0,a.kt)("li",{parentName:"ol"},"We create a ",(0,a.kt)("inlineCode",{parentName:"li"},"Parameters"),' object with the value "Emil" to replace the placeholder in the prompt template.'),(0,a.kt)("li",{parentName:"ol"},"We execute the ",(0,a.kt)("inlineCode",{parentName:"li"},"Step")," with the provided ",(0,a.kt)("inlineCode",{parentName:"li"},"parameters")," and store the result in ",(0,a.kt)("inlineCode",{parentName:"li"},"res"),", then print the response to the console."),(0,a.kt)("li",{parentName:"ol"},"We create another ",(0,a.kt)("inlineCode",{parentName:"li"},"Parameters"),' object, this time with the value "Your Name Here" to replace the placeholder.'),(0,a.kt)("li",{parentName:"ol"},"We execute the ",(0,a.kt)("inlineCode",{parentName:"li"},"Step")," again with the new ",(0,a.kt)("inlineCode",{parentName:"li"},"parameters"),", store the result in ",(0,a.kt)("inlineCode",{parentName:"li"},"res"),", and print the response to the console.")),(0,a.kt)("p",null,"In the next tutorial, we will combine multiple LLM invocations to solve more complicated problems."))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/f988011b.3b1a5cf9.js b/assets/js/f988011b.3b1a5cf9.js new file mode 100644 index 00000000..4a4f95b1 --- /dev/null +++ b/assets/js/f988011b.3b1a5cf9.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7393],{3905:(e,t,r)=>{r.d(t,{Zo:()=>m,kt:()=>g});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=n.createContext({}),s=function(e){var t=n.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},m=function(e){var t=s(e.components);return n.createElement(p.Provider,{value:t},e.children)},u="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,p=e.parentName,m=l(e,["components","mdxType","originalType","parentName"]),u=s(r),d=a,g=u["".concat(p,".").concat(d)]||u[d]||c[d]||o;return r?n.createElement(g,i(i({ref:t},m),{},{components:r})):n.createElement(g,i({ref:t},m))}));function g(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>c,frontMatter:()=>o,metadata:()=>l,toc:()=>s});var n=r(7462),a=(r(7294),r(3905));const o={},i="Using Prompt Templates and Parameters",l={unversionedId:"getting-started-tutorial/using-prompt-templates-and-parameters",id:"getting-started-tutorial/using-prompt-templates-and-parameters",title:"Using Prompt Templates and Parameters",description:"Having problems? Don't worry, reach out on discord and we will help you out.",source:"@site/docs/getting-started-tutorial/03-using-prompt-templates-and-parameters.md",sourceDirName:"getting-started-tutorial",slug:"/getting-started-tutorial/using-prompt-templates-and-parameters",permalink:"/docs/getting-started-tutorial/using-prompt-templates-and-parameters",draft:!1,editUrl:"https://github.com/sobelio/llm-chain/tree/main/docs/docs/getting-started-tutorial/03-using-prompt-templates-and-parameters.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{},sidebar:"sidebar",previous:{title:"Generating Your First LLM Output",permalink:"/docs/getting-started-tutorial/generating-your-first-llm-output"},next:{title:"Creating Your First Sequential Chain",permalink:"/docs/getting-started-tutorial/building-a-multi-step-chain"}},p={},s=[],m={toc:s},u="wrapper";function c(e){let{components:t,...r}=e;return(0,a.kt)(u,(0,n.Z)({},m,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"using-prompt-templates-and-parameters"},"Using Prompt Templates and Parameters"),(0,a.kt)("admonition",{type:"tip"},(0,a.kt)("p",{parentName:"admonition"},"Having problems? Don't worry, reach out on ",(0,a.kt)("a",{parentName:"p",href:"https://discord.gg/kewN9Gtjt2"},"discord")," and we will help you out.")),(0,a.kt)("p",null,"In this part of the tutorial series, we'll explore how to use prompt templates and parameters with llm-chain. Prompt templates allow you to create dynamic prompts, and parameters are the text strings you put into your templates."),(0,a.kt)("p",null,"Here's a simple Rust program demonstrating how to use prompt templates and parameters:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-rust"},'use llm_chain::{executor, parameters, prompt, step::Step};\n\n#[tokio::main(flavor = "current_thread")]\nasync fn main() -> Result<(), Box> {\n // Create a new ChatGPT executor\n let exec = executor!()?;\n // Create our step containing our prompt template\n let step = Step::for_prompt_template(prompt!(\n "You are a bot for making personalized greetings",\n "Make a personalized greeting tweet for {{text}}" // Text is the default parameter name, but you can use whatever you want\n ));\n\n // A greeting for emil!\n let res = step.run(¶meters!("Emil"), &exec).await?;\n println!("{}", res);\n\n // A greeting for you\n let res = step.run(¶meters!("Your Name Here"), &exec).await?;\n\n println!("{}", res.to_immediate().await?.as_content());\n\n Ok(())\n}\n\n')),(0,a.kt)("p",null,"Let's break down the different parts of the code:"),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},"We start with importing the necessary libraries, including the traits and structs required for our program."),(0,a.kt)("li",{parentName:"ol"},"The main async function is defined, using Tokio as the runtime."),(0,a.kt)("li",{parentName:"ol"},"We create a new ",(0,a.kt)("inlineCode",{parentName:"li"},"Executor")," with the default settings."),(0,a.kt)("li",{parentName:"ol"},"A ",(0,a.kt)("inlineCode",{parentName:"li"},"Step")," is created containing our prompt template with a placeholder (",(0,a.kt)("inlineCode",{parentName:"li"},"{{text}}"),") that will be replaced with a specific value later."),(0,a.kt)("li",{parentName:"ol"},"We create a ",(0,a.kt)("inlineCode",{parentName:"li"},"Parameters"),' object with the value "Emil" to replace the placeholder in the prompt template.'),(0,a.kt)("li",{parentName:"ol"},"We execute the ",(0,a.kt)("inlineCode",{parentName:"li"},"Step")," with the provided ",(0,a.kt)("inlineCode",{parentName:"li"},"parameters")," and store the result in ",(0,a.kt)("inlineCode",{parentName:"li"},"res"),", then print the response to the console."),(0,a.kt)("li",{parentName:"ol"},"We create another ",(0,a.kt)("inlineCode",{parentName:"li"},"Parameters"),' object, this time with the value "Your Name Here" to replace the placeholder.'),(0,a.kt)("li",{parentName:"ol"},"We execute the ",(0,a.kt)("inlineCode",{parentName:"li"},"Step")," again with the new ",(0,a.kt)("inlineCode",{parentName:"li"},"parameters"),", store the result in ",(0,a.kt)("inlineCode",{parentName:"li"},"res"),", and print the response to the console.")),(0,a.kt)("p",null,"In the next tutorial, we will combine multiple LLM invocations to solve more complicated problems."))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.5349ca5f.js b/assets/js/runtime~main.1993ec7f.js similarity index 93% rename from assets/js/runtime~main.5349ca5f.js rename to assets/js/runtime~main.1993ec7f.js index 37b16a3a..04ca3b2d 100644 --- a/assets/js/runtime~main.5349ca5f.js +++ b/assets/js/runtime~main.1993ec7f.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,c,t,d={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={exports:{}};return d[e].call(f.exports,f,f.exports,b),f.exports}b.m=d,e=[],b.O=(a,f,c,t)=>{if(!f){var d=1/0;for(i=0;i=t)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,t0&&e[i-1][2]>t;i--)e[i]=e[i-1];e[i]=[f,c,t]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var t=Object.create(null);b.r(t);var d={};a=a||[null,f({}),f([]),f(f)];for(var r=2&c&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>d[a]=()=>e[a]));return d.default=()=>e,b.d(t,d),t},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",56:"d1826513",228:"131879e8",236:"cda6b6e0",373:"d2688b93",533:"b2b675dd",731:"2adc0ba4",743:"20417f73",1429:"e6ef848d",1477:"b2f554cd",1713:"a7023ddc",1842:"ceeecb8f",1936:"2ed736ae",2174:"2cdbd7a9",2271:"a9b2bd2b",2500:"a52ad74c",2535:"814f3328",2573:"a1a5cf01",2911:"21eccb5d",2978:"dcf666ca",2979:"5ad2500b",3085:"1f391b9e",3089:"a6aa9e1f",3229:"6a7f0741",3356:"937c4ed1",3555:"4433ace5",3608:"9e4087bc",3648:"96479e6d",4008:"8636fc49",4013:"01a85c17",4086:"985a6965",4195:"c4f5d8e4",4404:"27f9e276",4463:"de7f460e",5150:"d474978b",5243:"4b56158e",5518:"29211130",5979:"6ecc6e3c",5997:"ef2c1e5e",6103:"ccc49370",7183:"ef9cf4fc",7325:"b29c0d53",7348:"ff9d60af",7393:"f988011b",7690:"2d92dfb9",7703:"1ca119e2",7918:"17896441",8044:"9623d7f4",8175:"b2fcff35",8211:"a9670040",8406:"0e9320b7",8610:"6875c492",8645:"206fe748",8659:"ac03af4d",8681:"9f325798",8827:"a8a015ed",8865:"acb76528",9239:"1fdd9f7e",9514:"1be78505",9671:"0e384e19",9812:"2587c287",9817:"14eb3368",9930:"ea1eb7c3"}[e]||e)+"."+{53:"7b3804d9",56:"6b6ddc98",210:"4d2f5804",228:"d37e57d9",236:"650534bb",373:"91af870d",533:"1eedfa46",731:"3bf8e410",743:"374abc14",1429:"567bfe9a",1477:"30b9f53d",1713:"dd0e3ce3",1842:"3cb8db9f",1936:"0f6a313d",2174:"cfbbb7ca",2271:"01ebe8d7",2500:"8bdf31d2",2529:"da2bcb01",2535:"9bb42f95",2573:"99b6d096",2911:"4999ed8d",2978:"7afee14b",2979:"f6b72d11",3085:"f8464388",3089:"1e1af270",3229:"1569e564",3356:"323d16bc",3555:"be72d402",3608:"9a815895",3648:"d8cb3c58",4008:"6d88f35b",4013:"5653d10a",4086:"3ed0935e",4195:"1e1115eb",4404:"b9ef66ce",4463:"c8bebeba",4832:"5ebe7110",4972:"9374abde",5150:"67da1221",5243:"862be05b",5518:"513574a7",5979:"c92970ab",5997:"1fb52aa2",6103:"5cfe080a",7183:"2144bd08",7325:"9d1957cf",7348:"5ab893a3",7393:"38401c8d",7690:"bb62d938",7703:"50aa175d",7918:"bacd5894",8044:"db58661a",8175:"f6fbc7ef",8211:"3910772a",8406:"42a009f9",8610:"da158881",8645:"88f9a29e",8659:"e1bd1535",8681:"c63198de",8827:"870cf212",8865:"62a11fc1",9239:"1ef96d30",9514:"82b3557a",9671:"d5b5018e",9812:"5092e06d",9817:"3bb53ce2",9930:"6e0a1f43"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},t="website:",b.l=(e,a,f,d)=>{if(c[e])c[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var t=c[e];if(delete c[e],r.parentNode&&r.parentNode.removeChild(r),t&&t.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={17896441:"7918",29211130:"5518","935f2afb":"53",d1826513:"56","131879e8":"228",cda6b6e0:"236",d2688b93:"373",b2b675dd:"533","2adc0ba4":"731","20417f73":"743",e6ef848d:"1429",b2f554cd:"1477",a7023ddc:"1713",ceeecb8f:"1842","2ed736ae":"1936","2cdbd7a9":"2174",a9b2bd2b:"2271",a52ad74c:"2500","814f3328":"2535",a1a5cf01:"2573","21eccb5d":"2911",dcf666ca:"2978","5ad2500b":"2979","1f391b9e":"3085",a6aa9e1f:"3089","6a7f0741":"3229","937c4ed1":"3356","4433ace5":"3555","9e4087bc":"3608","96479e6d":"3648","8636fc49":"4008","01a85c17":"4013","985a6965":"4086",c4f5d8e4:"4195","27f9e276":"4404",de7f460e:"4463",d474978b:"5150","4b56158e":"5243","6ecc6e3c":"5979",ef2c1e5e:"5997",ccc49370:"6103",ef9cf4fc:"7183",b29c0d53:"7325",ff9d60af:"7348",f988011b:"7393","2d92dfb9":"7690","1ca119e2":"7703","9623d7f4":"8044",b2fcff35:"8175",a9670040:"8211","0e9320b7":"8406","6875c492":"8610","206fe748":"8645",ac03af4d:"8659","9f325798":"8681",a8a015ed:"8827",acb76528:"8865","1fdd9f7e":"9239","1be78505":"9514","0e384e19":"9671","2587c287":"9812","14eb3368":"9817",ea1eb7c3:"9930"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var c=b.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var t=new Promise(((f,t)=>c=e[a]=[f,t]));f.push(c[2]=t);var d=b.p+b.u(a),r=new Error;b.l(d,(f=>{if(b.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var t=f&&("load"===f.type?"missing":f.type),d=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+t+": "+d+")",r.name="ChunkLoadError",r.type=t,r.request=d,c[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var c,t,d=f[0],r=f[1],o=f[2],n=0;if(d.some((a=>0!==e[a]))){for(c in r)b.o(r,c)&&(b.m[c]=r[c]);if(o)var i=o(b)}for(a&&a(f);n{"use strict";var e,a,f,c,t,d={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={exports:{}};return d[e].call(f.exports,f,f.exports,b),f.exports}b.m=d,e=[],b.O=(a,f,c,t)=>{if(!f){var d=1/0;for(i=0;i=t)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,t0&&e[i-1][2]>t;i--)e[i]=e[i-1];e[i]=[f,c,t]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var t=Object.create(null);b.r(t);var d={};a=a||[null,f({}),f([]),f(f)];for(var r=2&c&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>d[a]=()=>e[a]));return d.default=()=>e,b.d(t,d),t},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",56:"d1826513",228:"131879e8",236:"cda6b6e0",373:"d2688b93",533:"b2b675dd",731:"2adc0ba4",743:"20417f73",1429:"e6ef848d",1477:"b2f554cd",1713:"a7023ddc",1842:"ceeecb8f",1936:"2ed736ae",2174:"2cdbd7a9",2271:"a9b2bd2b",2500:"a52ad74c",2535:"814f3328",2573:"a1a5cf01",2911:"21eccb5d",2978:"dcf666ca",2979:"5ad2500b",3085:"1f391b9e",3089:"a6aa9e1f",3229:"6a7f0741",3356:"937c4ed1",3555:"4433ace5",3608:"9e4087bc",3648:"96479e6d",4008:"8636fc49",4013:"01a85c17",4086:"985a6965",4195:"c4f5d8e4",4404:"27f9e276",4463:"de7f460e",5150:"d474978b",5243:"4b56158e",5518:"29211130",5979:"6ecc6e3c",5997:"ef2c1e5e",6103:"ccc49370",7183:"ef9cf4fc",7325:"b29c0d53",7348:"ff9d60af",7393:"f988011b",7690:"2d92dfb9",7703:"1ca119e2",7918:"17896441",8044:"9623d7f4",8175:"b2fcff35",8211:"a9670040",8406:"0e9320b7",8610:"6875c492",8645:"206fe748",8659:"ac03af4d",8681:"9f325798",8827:"a8a015ed",8865:"acb76528",9239:"1fdd9f7e",9514:"1be78505",9671:"0e384e19",9812:"2587c287",9817:"14eb3368",9930:"ea1eb7c3"}[e]||e)+"."+{53:"353156f9",56:"6b6ddc98",210:"4d2f5804",228:"d37e57d9",236:"650534bb",373:"91af870d",533:"1eedfa46",731:"3bf8e410",743:"374abc14",1429:"567bfe9a",1477:"30b9f53d",1713:"dd0e3ce3",1842:"3cb8db9f",1936:"0f6a313d",2174:"ec2983b2",2271:"01ebe8d7",2500:"8bdf31d2",2529:"da2bcb01",2535:"9bb42f95",2573:"99b6d096",2911:"4999ed8d",2978:"feec4d42",2979:"f6b72d11",3085:"f8464388",3089:"1e1af270",3229:"1569e564",3356:"323d16bc",3555:"be72d402",3608:"9a815895",3648:"d8cb3c58",4008:"38bbea3d",4013:"5653d10a",4086:"c3a8b5f7",4195:"1e1115eb",4404:"b9ef66ce",4463:"c8bebeba",4832:"5ebe7110",4972:"9374abde",5150:"67da1221",5243:"862be05b",5518:"513574a7",5979:"c92970ab",5997:"1fb52aa2",6103:"5cfe080a",7183:"2144bd08",7325:"3f13f96e",7348:"5ab893a3",7393:"3b1a5cf9",7690:"bb62d938",7703:"50aa175d",7918:"bacd5894",8044:"db58661a",8175:"f6fbc7ef",8211:"3910772a",8406:"42a009f9",8610:"da158881",8645:"88f9a29e",8659:"e1bd1535",8681:"c63198de",8827:"870cf212",8865:"62a11fc1",9239:"1ef96d30",9514:"82b3557a",9671:"d5b5018e",9812:"5092e06d",9817:"3bb53ce2",9930:"6e0a1f43"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},t="website:",b.l=(e,a,f,d)=>{if(c[e])c[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var t=c[e];if(delete c[e],r.parentNode&&r.parentNode.removeChild(r),t&&t.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={17896441:"7918",29211130:"5518","935f2afb":"53",d1826513:"56","131879e8":"228",cda6b6e0:"236",d2688b93:"373",b2b675dd:"533","2adc0ba4":"731","20417f73":"743",e6ef848d:"1429",b2f554cd:"1477",a7023ddc:"1713",ceeecb8f:"1842","2ed736ae":"1936","2cdbd7a9":"2174",a9b2bd2b:"2271",a52ad74c:"2500","814f3328":"2535",a1a5cf01:"2573","21eccb5d":"2911",dcf666ca:"2978","5ad2500b":"2979","1f391b9e":"3085",a6aa9e1f:"3089","6a7f0741":"3229","937c4ed1":"3356","4433ace5":"3555","9e4087bc":"3608","96479e6d":"3648","8636fc49":"4008","01a85c17":"4013","985a6965":"4086",c4f5d8e4:"4195","27f9e276":"4404",de7f460e:"4463",d474978b:"5150","4b56158e":"5243","6ecc6e3c":"5979",ef2c1e5e:"5997",ccc49370:"6103",ef9cf4fc:"7183",b29c0d53:"7325",ff9d60af:"7348",f988011b:"7393","2d92dfb9":"7690","1ca119e2":"7703","9623d7f4":"8044",b2fcff35:"8175",a9670040:"8211","0e9320b7":"8406","6875c492":"8610","206fe748":"8645",ac03af4d:"8659","9f325798":"8681",a8a015ed:"8827",acb76528:"8865","1fdd9f7e":"9239","1be78505":"9514","0e384e19":"9671","2587c287":"9812","14eb3368":"9817",ea1eb7c3:"9930"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var c=b.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var t=new Promise(((f,t)=>c=e[a]=[f,t]));f.push(c[2]=t);var d=b.p+b.u(a),r=new Error;b.l(d,(f=>{if(b.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var t=f&&("load"===f.type?"missing":f.type),d=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+t+": "+d+")",r.name="ChunkLoadError",r.type=t,r.request=d,c[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var c,t,d=f[0],r=f[1],o=f[2],n=0;if(d.some((a=>0!==e[a]))){for(c in r)b.o(r,c)&&(b.m[c]=r[c]);if(o)var i=o(b)}for(a&&a(f);n - +

Introducing v0.8.1: Enhanced Prompt Macro and New Conversational Chain Type

· 3 min read

We are excited to announce the release of version 0.8.1, which brings two major improvements to our Large Language Model (LLM) library: an enhanced prompt! macro and a new Conversational chain type. These updates make it even easier for developers to create rich and interactive applications powered by LLMs.

Enhanced Prompt Macro with Prefixes

The prompt! macro has been updated to support prefixes, making it more expressive and convenient to use. With this new feature, you can now create chat prompts by simply prefixing them with user:, assistant:, or system:. Here's an example of how to use the new syntax:


let user_prompt = prompt!(user: "Hello, Mr Bot, help me figure out what to do next");
let system_prompt = prompt!(system: "You are a clever assistant that");

By using these prefixes, you can create more complex and interactive prompts for various use cases, such as building chatbots, automating tasks, or generating text.

New Conversational Chain Type

We're also introducing the Conversational chain type, which enables you to have ongoing conversations with LLMs. Conversational chains manage the conversation history and context, ensuring that the LLM's responses remain relevant and coherent throughout the interaction. This new chain type is particularly useful for chatbot applications, multi-step interactions, and any scenario where context is essential.

Here's a quick example of a Conversational chain:

use llm_chain::{
chains::conversation::Chain, executor, output::Output, parameters, prompt, step::Step,
};
use tokio;

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor.
let exec = executor!()?;

// Create a new Chain with the executor.
let mut chain = Chain::new(
prompt!(system: "You are a robot assistant for making personalized greetings."),
)?;

// Define the conversation steps.
let step1 = Step::for_prompt_template(prompt!(user: "Make a personalized greeting for Joe."));
let step2 =
Step::for_prompt_template(prompt!(user: "Now, create a personalized greeting for Jane."));
let step3 = Step::for_prompt_template(
prompt!(user: "Finally, create a personalized greeting for Alice."),
);

let step4 = Step::for_prompt_template(prompt!(user: "Remind me who did we just greet."));

// Execute the conversation steps.
let res1 = chain.send_message(step1, &parameters!(), &exec).await?;
println!("Step 1: {}", res1.primary_textual_output().await.unwrap());

let res2 = chain.send_message(step2, &parameters!(), &exec).await?;
println!("Step 2: {}", res2.primary_textual_output().await.unwrap());

let res3 = chain.send_message(step3, &parameters!(), &exec).await?;
println!("Step 3: {}", res3.primary_textual_output().await.unwrap());

let res4 = chain.send_message(step4, &parameters!(), &exec).await?;
println!("Step 4: {}", res4.primary_textual_output().await.unwrap());

Ok(())
}

With the Conversational chain, you can now easily send multiple messages and manage the conversation context without having to worry about manual context management.

Upgrade Today

We encourage you to upgrade to version 0.8.1 and take advantage of these new features. The enhanced prompt! macro and the new Conversational chain type will make your LLM-powered applications even more interactive and engaging.

As always, we appreciate your feedback and suggestions. Feel free to reach out to our team for any questions or concerns. Happy coding!

- + \ No newline at end of file diff --git a/blog/2023/04/28/parsing-llm-input-with-llm-chain-0-8-2/index.html b/blog/2023/04/28/parsing-llm-input-with-llm-chain-0-8-2/index.html index 8426bc38..88230f89 100644 --- a/blog/2023/04/28/parsing-llm-input-with-llm-chain-0-8-2/index.html +++ b/blog/2023/04/28/parsing-llm-input-with-llm-chain-0-8-2/index.html @@ -13,13 +13,13 @@ - +

Announcement: LLM Chain Update 0.8.2

· One min read

We are excited to announce the release of LLM Chain version 0.8.2! This update introduces some important improvements to our library, making it even more powerful and easy to use.

What's new in 0.8.2

New extract_labeled_text function

We've added a new function called extract_labeled_text in the parsing module. This function is designed to help you parse labeled text that is often generated by LLMs (Language Learning Machines). LLMs typically generate text like this:

- *foo*: bar
- hello: world

Improved find_yaml function

In this update, we have also improved the find_yaml function. It now returns the results in the order they appear in the document, making it more consistent and easier to work with.

Get started with 0.8.2

To start using LLM Chain version 0.8.2, update your dependency in your Cargo.toml file:

llm_chain = "0.8.2"

We hope you enjoy these new features and improvements! As always, if you have any questions or feedback, please feel free to reach out to our team.

- + \ No newline at end of file diff --git a/blog/archive/index.html b/blog/archive/index.html index 5eca5589..5bbec1e7 100644 --- a/blog/archive/index.html +++ b/blog/archive/index.html @@ -13,13 +13,13 @@ - + - + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index ab0a5d8b..e35048c6 100644 --- a/blog/index.html +++ b/blog/index.html @@ -13,13 +13,13 @@ - +

· One min read

We are excited to announce the release of LLM Chain version 0.8.2! This update introduces some important improvements to our library, making it even more powerful and easy to use.

What's new in 0.8.2

New extract_labeled_text function

We've added a new function called extract_labeled_text in the parsing module. This function is designed to help you parse labeled text that is often generated by LLMs (Language Learning Machines). LLMs typically generate text like this:

- *foo*: bar
- hello: world

Improved find_yaml function

In this update, we have also improved the find_yaml function. It now returns the results in the order they appear in the document, making it more consistent and easier to work with.

Get started with 0.8.2

To start using LLM Chain version 0.8.2, update your dependency in your Cargo.toml file:

llm_chain = "0.8.2"

We hope you enjoy these new features and improvements! As always, if you have any questions or feedback, please feel free to reach out to our team.

· 3 min read

We are excited to announce the release of version 0.8.1, which brings two major improvements to our Large Language Model (LLM) library: an enhanced prompt! macro and a new Conversational chain type. These updates make it even easier for developers to create rich and interactive applications powered by LLMs.

Enhanced Prompt Macro with Prefixes

The prompt! macro has been updated to support prefixes, making it more expressive and convenient to use. With this new feature, you can now create chat prompts by simply prefixing them with user:, assistant:, or system:. Here's an example of how to use the new syntax:


let user_prompt = prompt!(user: "Hello, Mr Bot, help me figure out what to do next");
let system_prompt = prompt!(system: "You are a clever assistant that");

By using these prefixes, you can create more complex and interactive prompts for various use cases, such as building chatbots, automating tasks, or generating text.

New Conversational Chain Type

We're also introducing the Conversational chain type, which enables you to have ongoing conversations with LLMs. Conversational chains manage the conversation history and context, ensuring that the LLM's responses remain relevant and coherent throughout the interaction. This new chain type is particularly useful for chatbot applications, multi-step interactions, and any scenario where context is essential.

Here's a quick example of a Conversational chain:

use llm_chain::{
chains::conversation::Chain, executor, output::Output, parameters, prompt, step::Step,
};
use tokio;

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor.
let exec = executor!()?;

// Create a new Chain with the executor.
let mut chain = Chain::new(
prompt!(system: "You are a robot assistant for making personalized greetings."),
)?;

// Define the conversation steps.
let step1 = Step::for_prompt_template(prompt!(user: "Make a personalized greeting for Joe."));
let step2 =
Step::for_prompt_template(prompt!(user: "Now, create a personalized greeting for Jane."));
let step3 = Step::for_prompt_template(
prompt!(user: "Finally, create a personalized greeting for Alice."),
);

let step4 = Step::for_prompt_template(prompt!(user: "Remind me who did we just greet."));

// Execute the conversation steps.
let res1 = chain.send_message(step1, &parameters!(), &exec).await?;
println!("Step 1: {}", res1.primary_textual_output().await.unwrap());

let res2 = chain.send_message(step2, &parameters!(), &exec).await?;
println!("Step 2: {}", res2.primary_textual_output().await.unwrap());

let res3 = chain.send_message(step3, &parameters!(), &exec).await?;
println!("Step 3: {}", res3.primary_textual_output().await.unwrap());

let res4 = chain.send_message(step4, &parameters!(), &exec).await?;
println!("Step 4: {}", res4.primary_textual_output().await.unwrap());

Ok(())
}

With the Conversational chain, you can now easily send multiple messages and manage the conversation context without having to worry about manual context management.

Upgrade Today

We encourage you to upgrade to version 0.8.1 and take advantage of these new features. The enhanced prompt! macro and the new Conversational chain type will make your LLM-powered applications even more interactive and engaging.

As always, we appreciate your feedback and suggestions. Feel free to reach out to our team for any questions or concerns. Happy coding!

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

· 2 min read
will rudenmalm

In this blog post, we'll explore how to use ChatGPT in Rust with the help of the llm-chain library. We will walk through a simple example that demonstrates how to generate responses using OpenAI's ChatGPT model.

Getting Started

First, let's start by installing the necessary packages using cargo add. You will need the llm-chain and llm-chain-openai libraries:

cargo add llm-chain llm-chain-openai

Now, let's dive into the code:


use llm_chain::{traits::StepExt, Parameters};
use llm_chain_openai::chatgpt::{Executor, Model, Role, Step};

#[tokio::main(flavor = "current_thread")]
async fn main() {
let exec = Executor::new_default();
let chain = Step::new(
Model::ChatGPT3_5Turbo,
[
(
Role::System,
"You are a helpful assistant",
),
(Role::User, "Tell me about the Rust programming language"),
],
)
.to_chain();
let res = chain.run(Parameters::new(), &exec).await.unwrap();
println!("{:?}", res);
}

In the code snippet above, we begin by importing the necessary modules and functions from the llm-chain and llm-chain-openai libraries. We then define a simple main function that uses the Executor and Step structs to create a conversational chain.

The Model::ChatGPT3_5Turbo model is used as the language model in this example. We also define two steps in the conversation: the first one sets the role of the assistant and the second one asks a question about the Rust programming language.

Finally, we execute the conversation chain using the run method and print the generated response.

Wrapping Up

As you can see, using ChatGPT in Rust with llm-chain is a straightforward and efficient process. The library makes it easy to build and manage conversational agents in Rust, allowing developers to focus on creating more powerful and interactive applications.

To continue learning about ChatGPT in Rust and how to make the most of the llm-chain library, try our tutorial .

· 2 min read
will rudenmalm

We're excited to announce the release of LLM-chain, a Rust library designed to help developers work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can't handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.

Features of LLM-chain

LLM-chain comes with a variety of features that make it easier to work with LLMs, including:

  • Prompt templates: Create reusable and easily customizable prompt templates for consistent and structured interactions with LLMs.
  • Chains: Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.
  • ChatGPT support: Currently supports ChatGPT models, with plans to add support for more LLMs in the future, such as LLaMa and Stanford's Alpaca models.
  • Tools: Enhance your AI agents' capabilities by giving them access to various tools, such as running Bash commands, executing Python scripts, or performing web searches, enabling more complex and powerful interactions.
  • Extensibility: Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows and new models are developed.
  • Community-driven: We welcome and encourage contributions from the community to help improve and expand the capabilities of LLM-chain.

Connect with Us

If you have any questions, suggestions, or feedback, feel free to join our Discord community. We're always excited to hear from our users and learn about your experiences with LLM-chain.

Getting Started with LLM-chain

Check out our Github repository or the documentation to get started.

- + \ No newline at end of file diff --git a/blog/introducing-llm-chain-v060/index.html b/blog/introducing-llm-chain-v060/index.html index cb60668f..5e0922f4 100644 --- a/blog/introducing-llm-chain-v060/index.html +++ b/blog/introducing-llm-chain-v060/index.html @@ -13,13 +13,13 @@ - +

Introducing LLM-chain v0.6.0: Powerful Templating and Improved Prompt System

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

- + \ No newline at end of file diff --git a/blog/introducing-llm-chain-v080/index.html b/blog/introducing-llm-chain-v080/index.html index d6c2f49b..6ab382ee 100644 --- a/blog/introducing-llm-chain-v080/index.html +++ b/blog/introducing-llm-chain-v080/index.html @@ -13,13 +13,13 @@ - +

Introducing LLM-chain v0.8.0 - Expanding the prompt system

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

- + \ No newline at end of file diff --git a/blog/introducing-llm-chain/index.html b/blog/introducing-llm-chain/index.html index fadfd3c5..c3c59e9f 100644 --- a/blog/introducing-llm-chain/index.html +++ b/blog/introducing-llm-chain/index.html @@ -13,13 +13,13 @@ - +

Unleashing the Power of Large Language Models with LLM-chain

· 2 min read
will rudenmalm

We're excited to announce the release of LLM-chain, a Rust library designed to help developers work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can't handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.

Features of LLM-chain

LLM-chain comes with a variety of features that make it easier to work with LLMs, including:

  • Prompt templates: Create reusable and easily customizable prompt templates for consistent and structured interactions with LLMs.
  • Chains: Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.
  • ChatGPT support: Currently supports ChatGPT models, with plans to add support for more LLMs in the future, such as LLaMa and Stanford's Alpaca models.
  • Tools: Enhance your AI agents' capabilities by giving them access to various tools, such as running Bash commands, executing Python scripts, or performing web searches, enabling more complex and powerful interactions.
  • Extensibility: Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows and new models are developed.
  • Community-driven: We welcome and encourage contributions from the community to help improve and expand the capabilities of LLM-chain.

Connect with Us

If you have any questions, suggestions, or feedback, feel free to join our Discord community. We're always excited to hear from our users and learn about your experiences with LLM-chain.

Getting Started with LLM-chain

Check out our Github repository or the documentation to get started.

- + \ No newline at end of file diff --git a/blog/tags/chatgpt/index.html b/blog/tags/chatgpt/index.html index afcab863..449241ba 100644 --- a/blog/tags/chatgpt/index.html +++ b/blog/tags/chatgpt/index.html @@ -13,13 +13,13 @@ - +

One post tagged with "chatgpt"

View All Tags

· 2 min read
will rudenmalm

In this blog post, we'll explore how to use ChatGPT in Rust with the help of the llm-chain library. We will walk through a simple example that demonstrates how to generate responses using OpenAI's ChatGPT model.

Getting Started

First, let's start by installing the necessary packages using cargo add. You will need the llm-chain and llm-chain-openai libraries:

cargo add llm-chain llm-chain-openai

Now, let's dive into the code:


use llm_chain::{traits::StepExt, Parameters};
use llm_chain_openai::chatgpt::{Executor, Model, Role, Step};

#[tokio::main(flavor = "current_thread")]
async fn main() {
let exec = Executor::new_default();
let chain = Step::new(
Model::ChatGPT3_5Turbo,
[
(
Role::System,
"You are a helpful assistant",
),
(Role::User, "Tell me about the Rust programming language"),
],
)
.to_chain();
let res = chain.run(Parameters::new(), &exec).await.unwrap();
println!("{:?}", res);
}

In the code snippet above, we begin by importing the necessary modules and functions from the llm-chain and llm-chain-openai libraries. We then define a simple main function that uses the Executor and Step structs to create a conversational chain.

The Model::ChatGPT3_5Turbo model is used as the language model in this example. We also define two steps in the conversation: the first one sets the role of the assistant and the second one asks a question about the Rust programming language.

Finally, we execute the conversation chain using the run method and print the generated response.

Wrapping Up

As you can see, using ChatGPT in Rust with llm-chain is a straightforward and efficient process. The library makes it easy to build and manage conversational agents in Rust, allowing developers to focus on creating more powerful and interactive applications.

To continue learning about ChatGPT in Rust and how to make the most of the llm-chain library, try our tutorial .

- + \ No newline at end of file diff --git a/blog/tags/index.html b/blog/tags/index.html index 55b016ba..6b26b64a 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -13,13 +13,13 @@ - + - + \ No newline at end of file diff --git a/blog/tags/introduction/index.html b/blog/tags/introduction/index.html index f5929ea9..d3feb882 100644 --- a/blog/tags/introduction/index.html +++ b/blog/tags/introduction/index.html @@ -13,13 +13,13 @@ - +

2 posts tagged with "introduction"

View All Tags

· 2 min read
will rudenmalm

In this blog post, we'll explore how to use ChatGPT in Rust with the help of the llm-chain library. We will walk through a simple example that demonstrates how to generate responses using OpenAI's ChatGPT model.

Getting Started

First, let's start by installing the necessary packages using cargo add. You will need the llm-chain and llm-chain-openai libraries:

cargo add llm-chain llm-chain-openai

Now, let's dive into the code:


use llm_chain::{traits::StepExt, Parameters};
use llm_chain_openai::chatgpt::{Executor, Model, Role, Step};

#[tokio::main(flavor = "current_thread")]
async fn main() {
let exec = Executor::new_default();
let chain = Step::new(
Model::ChatGPT3_5Turbo,
[
(
Role::System,
"You are a helpful assistant",
),
(Role::User, "Tell me about the Rust programming language"),
],
)
.to_chain();
let res = chain.run(Parameters::new(), &exec).await.unwrap();
println!("{:?}", res);
}

In the code snippet above, we begin by importing the necessary modules and functions from the llm-chain and llm-chain-openai libraries. We then define a simple main function that uses the Executor and Step structs to create a conversational chain.

The Model::ChatGPT3_5Turbo model is used as the language model in this example. We also define two steps in the conversation: the first one sets the role of the assistant and the second one asks a question about the Rust programming language.

Finally, we execute the conversation chain using the run method and print the generated response.

Wrapping Up

As you can see, using ChatGPT in Rust with llm-chain is a straightforward and efficient process. The library makes it easy to build and manage conversational agents in Rust, allowing developers to focus on creating more powerful and interactive applications.

To continue learning about ChatGPT in Rust and how to make the most of the llm-chain library, try our tutorial .

· 2 min read
will rudenmalm

We're excited to announce the release of LLM-chain, a Rust library designed to help developers work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can't handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.

Features of LLM-chain

LLM-chain comes with a variety of features that make it easier to work with LLMs, including:

  • Prompt templates: Create reusable and easily customizable prompt templates for consistent and structured interactions with LLMs.
  • Chains: Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.
  • ChatGPT support: Currently supports ChatGPT models, with plans to add support for more LLMs in the future, such as LLaMa and Stanford's Alpaca models.
  • Tools: Enhance your AI agents' capabilities by giving them access to various tools, such as running Bash commands, executing Python scripts, or performing web searches, enabling more complex and powerful interactions.
  • Extensibility: Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows and new models are developed.
  • Community-driven: We welcome and encourage contributions from the community to help improve and expand the capabilities of LLM-chain.

Connect with Us

If you have any questions, suggestions, or feedback, feel free to join our Discord community. We're always excited to hear from our users and learn about your experiences with LLM-chain.

Getting Started with LLM-chain

Check out our Github repository or the documentation to get started.

- + \ No newline at end of file diff --git a/blog/tags/large-language-models/index.html b/blog/tags/large-language-models/index.html index dbe10ec7..5dd0185d 100644 --- a/blog/tags/large-language-models/index.html +++ b/blog/tags/large-language-models/index.html @@ -13,13 +13,13 @@ - +

3 posts tagged with "large language models"

View All Tags

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

· 2 min read
will rudenmalm

We're excited to announce the release of LLM-chain, a Rust library designed to help developers work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can't handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.

Features of LLM-chain

LLM-chain comes with a variety of features that make it easier to work with LLMs, including:

  • Prompt templates: Create reusable and easily customizable prompt templates for consistent and structured interactions with LLMs.
  • Chains: Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.
  • ChatGPT support: Currently supports ChatGPT models, with plans to add support for more LLMs in the future, such as LLaMa and Stanford's Alpaca models.
  • Tools: Enhance your AI agents' capabilities by giving them access to various tools, such as running Bash commands, executing Python scripts, or performing web searches, enabling more complex and powerful interactions.
  • Extensibility: Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows and new models are developed.
  • Community-driven: We welcome and encourage contributions from the community to help improve and expand the capabilities of LLM-chain.

Connect with Us

If you have any questions, suggestions, or feedback, feel free to join our Discord community. We're always excited to hear from our users and learn about your experiences with LLM-chain.

Getting Started with LLM-chain

Check out our Github repository or the documentation to get started.

- + \ No newline at end of file diff --git a/blog/tags/llm-chain/index.html b/blog/tags/llm-chain/index.html index d5e11b25..3b0065de 100644 --- a/blog/tags/llm-chain/index.html +++ b/blog/tags/llm-chain/index.html @@ -13,13 +13,13 @@ - +

4 posts tagged with "llm-chain"

View All Tags

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

· 2 min read
will rudenmalm

In this blog post, we'll explore how to use ChatGPT in Rust with the help of the llm-chain library. We will walk through a simple example that demonstrates how to generate responses using OpenAI's ChatGPT model.

Getting Started

First, let's start by installing the necessary packages using cargo add. You will need the llm-chain and llm-chain-openai libraries:

cargo add llm-chain llm-chain-openai

Now, let's dive into the code:


use llm_chain::{traits::StepExt, Parameters};
use llm_chain_openai::chatgpt::{Executor, Model, Role, Step};

#[tokio::main(flavor = "current_thread")]
async fn main() {
let exec = Executor::new_default();
let chain = Step::new(
Model::ChatGPT3_5Turbo,
[
(
Role::System,
"You are a helpful assistant",
),
(Role::User, "Tell me about the Rust programming language"),
],
)
.to_chain();
let res = chain.run(Parameters::new(), &exec).await.unwrap();
println!("{:?}", res);
}

In the code snippet above, we begin by importing the necessary modules and functions from the llm-chain and llm-chain-openai libraries. We then define a simple main function that uses the Executor and Step structs to create a conversational chain.

The Model::ChatGPT3_5Turbo model is used as the language model in this example. We also define two steps in the conversation: the first one sets the role of the assistant and the second one asks a question about the Rust programming language.

Finally, we execute the conversation chain using the run method and print the generated response.

Wrapping Up

As you can see, using ChatGPT in Rust with llm-chain is a straightforward and efficient process. The library makes it easy to build and manage conversational agents in Rust, allowing developers to focus on creating more powerful and interactive applications.

To continue learning about ChatGPT in Rust and how to make the most of the llm-chain library, try our tutorial .

· 2 min read
will rudenmalm

We're excited to announce the release of LLM-chain, a Rust library designed to help developers work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can't handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.

Features of LLM-chain

LLM-chain comes with a variety of features that make it easier to work with LLMs, including:

  • Prompt templates: Create reusable and easily customizable prompt templates for consistent and structured interactions with LLMs.
  • Chains: Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.
  • ChatGPT support: Currently supports ChatGPT models, with plans to add support for more LLMs in the future, such as LLaMa and Stanford's Alpaca models.
  • Tools: Enhance your AI agents' capabilities by giving them access to various tools, such as running Bash commands, executing Python scripts, or performing web searches, enabling more complex and powerful interactions.
  • Extensibility: Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows and new models are developed.
  • Community-driven: We welcome and encourage contributions from the community to help improve and expand the capabilities of LLM-chain.

Connect with Us

If you have any questions, suggestions, or feedback, feel free to join our Discord community. We're always excited to hear from our users and learn about your experiences with LLM-chain.

Getting Started with LLM-chain

Check out our Github repository or the documentation to get started.

- + \ No newline at end of file diff --git a/blog/tags/prompt-system/index.html b/blog/tags/prompt-system/index.html index 9209ec8d..8f7f8319 100644 --- a/blog/tags/prompt-system/index.html +++ b/blog/tags/prompt-system/index.html @@ -13,13 +13,13 @@ - +

2 posts tagged with "prompt system"

View All Tags

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

- + \ No newline at end of file diff --git a/blog/tags/rust/index.html b/blog/tags/rust/index.html index 57dadf69..7d1bb726 100644 --- a/blog/tags/rust/index.html +++ b/blog/tags/rust/index.html @@ -13,13 +13,13 @@ - +

4 posts tagged with "rust"

View All Tags

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

· 2 min read
will rudenmalm

In this blog post, we'll explore how to use ChatGPT in Rust with the help of the llm-chain library. We will walk through a simple example that demonstrates how to generate responses using OpenAI's ChatGPT model.

Getting Started

First, let's start by installing the necessary packages using cargo add. You will need the llm-chain and llm-chain-openai libraries:

cargo add llm-chain llm-chain-openai

Now, let's dive into the code:


use llm_chain::{traits::StepExt, Parameters};
use llm_chain_openai::chatgpt::{Executor, Model, Role, Step};

#[tokio::main(flavor = "current_thread")]
async fn main() {
let exec = Executor::new_default();
let chain = Step::new(
Model::ChatGPT3_5Turbo,
[
(
Role::System,
"You are a helpful assistant",
),
(Role::User, "Tell me about the Rust programming language"),
],
)
.to_chain();
let res = chain.run(Parameters::new(), &exec).await.unwrap();
println!("{:?}", res);
}

In the code snippet above, we begin by importing the necessary modules and functions from the llm-chain and llm-chain-openai libraries. We then define a simple main function that uses the Executor and Step structs to create a conversational chain.

The Model::ChatGPT3_5Turbo model is used as the language model in this example. We also define two steps in the conversation: the first one sets the role of the assistant and the second one asks a question about the Rust programming language.

Finally, we execute the conversation chain using the run method and print the generated response.

Wrapping Up

As you can see, using ChatGPT in Rust with llm-chain is a straightforward and efficient process. The library makes it easy to build and manage conversational agents in Rust, allowing developers to focus on creating more powerful and interactive applications.

To continue learning about ChatGPT in Rust and how to make the most of the llm-chain library, try our tutorial .

· 2 min read
will rudenmalm

We're excited to announce the release of LLM-chain, a Rust library designed to help developers work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can't handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.

Features of LLM-chain

LLM-chain comes with a variety of features that make it easier to work with LLMs, including:

  • Prompt templates: Create reusable and easily customizable prompt templates for consistent and structured interactions with LLMs.
  • Chains: Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.
  • ChatGPT support: Currently supports ChatGPT models, with plans to add support for more LLMs in the future, such as LLaMa and Stanford's Alpaca models.
  • Tools: Enhance your AI agents' capabilities by giving them access to various tools, such as running Bash commands, executing Python scripts, or performing web searches, enabling more complex and powerful interactions.
  • Extensibility: Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows and new models are developed.
  • Community-driven: We welcome and encourage contributions from the community to help improve and expand the capabilities of LLM-chain.

Connect with Us

If you have any questions, suggestions, or feedback, feel free to join our Discord community. We're always excited to hear from our users and learn about your experiences with LLM-chain.

Getting Started with LLM-chain

Check out our Github repository or the documentation to get started.

- + \ No newline at end of file diff --git a/blog/tags/templating/index.html b/blog/tags/templating/index.html index 20dfed78..45871a5a 100644 --- a/blog/tags/templating/index.html +++ b/blog/tags/templating/index.html @@ -13,13 +13,13 @@ - +

2 posts tagged with "templating"

View All Tags

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

- + \ No newline at end of file diff --git a/blog/tags/tera/index.html b/blog/tags/tera/index.html index 3b2b74c2..05c5d26c 100644 --- a/blog/tags/tera/index.html +++ b/blog/tags/tera/index.html @@ -13,13 +13,13 @@ - +

2 posts tagged with "tera"

View All Tags

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

- + \ No newline at end of file diff --git a/blog/tags/update/index.html b/blog/tags/update/index.html index 39d32871..ba1ae7f0 100644 --- a/blog/tags/update/index.html +++ b/blog/tags/update/index.html @@ -13,13 +13,13 @@ - +

2 posts tagged with "update"

View All Tags

· 2 min read
will rudenmalm

We're excited to announce the release of llm-chain v0.8.0, a significant update to our LLM library. This release introduces a host of improvements and new features, including a completely revamped Prompt system and more streamlined handling of Parameters. Let's dive into the details!

Revamped Prompt System

Our new Prompt system has been redesigned from the ground up to provide greater flexibility and efficiency in working with language models. In llm-chain v0.8.0, we've introduced new structs and enums to better represent chat messages and their roles, such as ChatMessage, ChatMessageCollection, and ChatRole. The Data enum has also been introduced to represent either a collection of chat messages or a single text, making it easier to work with different types of data.

Furthermore, we've created a more powerful PromptTemplate system that allows you to format prompts with a set of parameters. This enables you to dynamically generate prompts for your language models without the need for cumbersome string manipulation.

Executors No Longer Handle Parameters

With the release of llm-chain v0.8.0, we've shifted the responsibility of handling Parameters from the executors to the main llm-chain crate. This change simplifies the process of working with executors, allowing developers to focus more on the core functionality of their language models.

What's Next?

This release marks a significant step forward in the evolution. However, we're not stopping here! We'll continue to refine and expand the capabilities of llm-chain, making it even more powerful and user-friendly.

We encourage you to check out llm-chain v0.8.0 and experience the benefits of the improved Prompt system and streamlined handling of Parameters. As always, we appreciate your feedback and contributions to help make llm-chain the best language model library out there.

Upgrade to llm-chain v0.8.0 today and take your language models to the next level!

· 2 min read
will rudenmalm

We are thrilled to announce the release of llm-chain v0.6.0, which introduces significant enhancements to our library. This update focuses on making the llm-chain more robust and versatile, allowing developers to build even more advanced applications with ease.

Major updates

1. The switch to the tera template language

One of the most significant changes in this release is the introduction of the tera template language. This powerful and flexible templating system enables developers to create dynamic and complex templates for their projects. The tera language allows for more advanced control structures and filters, making it a substantial upgrade from the previous templating system.

2. Improved prompt system

Another notable update is the revamped prompt system. With llm-chain v0.6.0, the prompt system now supports both Chat and completion-style models. This improvement means developers no longer need to worry about whether they are using a completion or chat model when crafting prompts. This unified approach simplifies the development process and makes it easier to work with various types of language models.

3. Updated LLaMA.cpp

The latest version of LLaMA.cpp has been integrated into this release, ensuring better performance and stability for your projects.

Other improvements

1. Safer error handling

In addition to the major updates, llm-chain v0.6.0 also brings improvements to error handling. Templates now return Result rather than panicking on errors, making it more convenient to handle any issues that may arise during development. Similarly, Executors also return Result instead of panicking on errors, providing a more consistent and safer API.

Time to move on from the old templating system

With the introduction of the tera template language, we strongly recommend moving away from the old templating system. This update provides a solid foundation for building even more advanced applications using the llm-chain library.

We hope you're as excited about these enhancements as we are! As always, we appreciate your feedback and support. If you have any questions or need help, please don't hesitate to reach out on Discord !

Happy coding! 🚀

- + \ No newline at end of file diff --git a/blog/using-chatgpt-in-rust/index.html b/blog/using-chatgpt-in-rust/index.html index 0f750f7b..85c2323d 100644 --- a/blog/using-chatgpt-in-rust/index.html +++ b/blog/using-chatgpt-in-rust/index.html @@ -13,13 +13,13 @@ - +

Using ChatGPT in Rust with llm-chain

· 2 min read
will rudenmalm

In this blog post, we'll explore how to use ChatGPT in Rust with the help of the llm-chain library. We will walk through a simple example that demonstrates how to generate responses using OpenAI's ChatGPT model.

Getting Started

First, let's start by installing the necessary packages using cargo add. You will need the llm-chain and llm-chain-openai libraries:

cargo add llm-chain llm-chain-openai

Now, let's dive into the code:


use llm_chain::{traits::StepExt, Parameters};
use llm_chain_openai::chatgpt::{Executor, Model, Role, Step};

#[tokio::main(flavor = "current_thread")]
async fn main() {
let exec = Executor::new_default();
let chain = Step::new(
Model::ChatGPT3_5Turbo,
[
(
Role::System,
"You are a helpful assistant",
),
(Role::User, "Tell me about the Rust programming language"),
],
)
.to_chain();
let res = chain.run(Parameters::new(), &exec).await.unwrap();
println!("{:?}", res);
}

In the code snippet above, we begin by importing the necessary modules and functions from the llm-chain and llm-chain-openai libraries. We then define a simple main function that uses the Executor and Step structs to create a conversational chain.

The Model::ChatGPT3_5Turbo model is used as the language model in this example. We also define two steps in the conversation: the first one sets the role of the assistant and the second one asks a question about the Rust programming language.

Finally, we execute the conversation chain using the run method and print the generated response.

Wrapping Up

As you can see, using ChatGPT in Rust with llm-chain is a straightforward and efficient process. The library makes it easy to build and manage conversational agents in Rust, allowing developers to focus on creating more powerful and interactive applications.

To continue learning about ChatGPT in Rust and how to make the most of the llm-chain library, try our tutorial .

- + \ No newline at end of file diff --git a/docs/category/tutorial/index.html b/docs/category/tutorial/index.html index 356f45f8..e61fbdfc 100644 --- a/docs/category/tutorial/index.html +++ b/docs/category/tutorial/index.html @@ -13,13 +13,13 @@ - +
-
- +
+ \ No newline at end of file diff --git a/docs/chains/conversational/index.html b/docs/chains/conversational/index.html index acf05c13..a1bd6ff1 100644 --- a/docs/chains/conversational/index.html +++ b/docs/chains/conversational/index.html @@ -13,13 +13,13 @@ - +

Conversational Chains

Conversational chains enable you to have an ongoing conversation with a large language model (LLM). They keep track of the conversation history and manage the context, ensuring that the LLM's responses remain relevant and coherent throughout the conversation. Conversational chains are particularly useful for chatbot applications, multi-step interactions, and any scenario where context is essential.

In this guide, we'll walk you through an example of a conversational chain with an AI assistant that creates personalized greetings for different individuals. The example demonstrates how to initiate a conversation, send multiple messages, and manage the conversation context.

use llm_chain::{
chains::conversation::Chain, executor, output::Output, parameters, prompt, step::Step,
};
use tokio;

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor.
let exec = executor!()?;

// Create a new Chain with the executor.
let mut chain = Chain::new(
prompt!(system: "You are a robot assistant for making personalized greetings."),
)?;

// Define the conversation steps.
let step1 = Step::for_prompt_template(prompt!(user: "Make a personalized greeting for Joe."));
let step2 =
Step::for_prompt_template(prompt!(user: "Now, create a personalized greeting for Jane."));
let step3 = Step::for_prompt_template(
prompt!(user: "Finally, create a personalized greeting for Alice."),
);

let step4 = Step::for_prompt_template(prompt!(user: "Remind me who did we just greet."));

// Execute the conversation steps.
let res1 = chain.send_message(step1, &parameters!(), &exec).await?;
println!("Step 1: {}", res1.primary_textual_output().await.unwrap());

let res2 = chain.send_message(step2, &parameters!(), &exec).await?;
println!("Step 2: {}", res2.primary_textual_output().await.unwrap());

let res3 = chain.send_message(step3, &parameters!(), &exec).await?;
println!("Step 3: {}", res3.primary_textual_output().await.unwrap());

let res4 = chain.send_message(step4, &parameters!(), &exec).await?;
println!("Step 4: {}", res4.primary_textual_output().await.unwrap());

Ok(())
}
- + \ No newline at end of file diff --git a/docs/chains/map-reduce-chains/index.html b/docs/chains/map-reduce-chains/index.html index 7f5489cc..7a88ced5 100644 --- a/docs/chains/map-reduce-chains/index.html +++ b/docs/chains/map-reduce-chains/index.html @@ -13,13 +13,13 @@ - +
-

Map-Reduce Chains

Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a "map" step, which processes each text chunk independently, and a "reduce" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM's context window size.

In this guide, we'll explain how to create and execute a map-reduce chain using an example. The example demonstrates how to summarize a Wikipedia article into bullet points using a two-step process:

  1. The "map" step summarizes each chunk of the article into bullet points.
  2. The "reduce" step combines all bullet point summaries into a single summary.
use llm_chain::chains::map_reduce::Chain;
use llm_chain::step::Step;
use llm_chain::{executor, parameters, prompt, Parameters};

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = executor!()?;

// Create the "map" step to summarize an article into bullet points
let map_prompt = Step::for_prompt_template(prompt!(
"You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",
"Summarize this article into bullet points:\n{{text}}"
));

// Create the "reduce" step to combine multiple summaries into one
let reduce_prompt = Step::for_prompt_template(prompt!(
"You are a diligent bot that summarizes text",
"Please combine the articles below into one summary as bullet points:\n{{text}}"
));

// Create a map-reduce chain with the map and reduce steps
let chain = Chain::new(map_prompt, reduce_prompt);

// Load the content of the article to be summarized
let article = include_str!("article_to_summarize.md");

// Create a vector with the Parameters object containing the text of the article
let docs = vec![parameters!(article)];

// Run the chain with the provided documents and an empty Parameters object for the "reduce" step
let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();

// Print the result to the console
println!("{}", res);
Ok(())
}

n this example, we start by importing the necessary modules and defining the main function. We then create a new ChatGPT executor using the executor!() macro.

Next, we create the "map" and "reduce" steps using Step::for_prompt_template(). The "map" step is responsible for summarizing each article chunk, while the "reduce" step combines the summaries into a single output.

After defining the steps, we create a new Chain object by passing in the "map" and "reduce" steps. We then load the content of the article to be summarized and create a Parameters object containing the text.

Finally, we execute the map-reduce chain using the chain.run() method, passing in the documents, an empty Parameters object for the "reduce" step, and the executor. The result is printed to the console.

Map-Reduce chains offer an effective way to handle large documents or multiple documents using LLMs. By breaking the text into manageable chunks and combining the results, you can create efficient pipelines for text processing tasks such as summarization, translation, and analysis.

- +

Map-Reduce Chains

Map-Reduce chains are a powerful way to process large amounts of text using large language models (LLMs). They consist of two main steps: a "map" step, which processes each text chunk independently, and a "reduce" step, which combines the results of the map step into a single output. This approach enables the efficient processing of large documents that exceed the LLM's context window size.

In this guide, we'll explain how to create and execute a map-reduce chain using an example. The example demonstrates how to summarize a Wikipedia article into bullet points using a two-step process:

  1. The "map" step summarizes each chunk of the article into bullet points.
  2. The "reduce" step combines all bullet point summaries into a single summary.
use llm_chain::chains::map_reduce::Chain;
use llm_chain::step::Step;
use llm_chain::{executor, parameters, prompt, Parameters};

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = executor!()?;

// Create the "map" step to summarize an article into bullet points
let map_prompt = Step::for_prompt_template(prompt!(
"You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",
"Summarize this article into bullet points:\n{{text}}"
));

// Create the "reduce" step to combine multiple summaries into one
let reduce_prompt = Step::for_prompt_template(prompt!(
"You are a diligent bot that summarizes text",
"Please combine the articles below into one summary as bullet points:\n{{text}}"
));

// Create a map-reduce chain with the map and reduce steps
let chain = Chain::new(map_prompt, reduce_prompt);

// Load the content of the article to be summarized
let article = include_str!("article_to_summarize.md");

// Create a vector with the Parameters object containing the text of the article
let docs = vec![parameters!(article)];

// Run the chain with the provided documents and an empty Parameters object for the "reduce" step
let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();

// Print the result to the console
println!("{}", res);
Ok(())
}

In this example, we start by importing the necessary modules and defining the main function. We then create a new ChatGPT executor using the executor!() macro.

Next, we create the "map" and "reduce" steps using Step::for_prompt_template(). The "map" step is responsible for summarizing each article chunk, while the "reduce" step combines the summaries into a single output.

After defining the steps, we create a new Chain object by passing in the "map" and "reduce" steps. We then load the content of the article to be summarized and create a Parameters object containing the text.

Finally, we execute the map-reduce chain using the chain.run() method, passing in the documents, an empty Parameters object for the "reduce" step, and the executor. The result is printed to the console.

Map-Reduce chains offer an effective way to handle large documents or multiple documents using LLMs. By breaking the text into manageable chunks and combining the results, you can create efficient pipelines for text processing tasks such as summarization, translation, and analysis.

+ \ No newline at end of file diff --git a/docs/chains/sequential-chains/index.html b/docs/chains/sequential-chains/index.html index 54808f36..18158710 100644 --- a/docs/chains/sequential-chains/index.html +++ b/docs/chains/sequential-chains/index.html @@ -13,14 +13,14 @@ - +

sequential-chains

Sequential Chains

Sequential chains are a convenient way to apply large language models (LLMs) to a sequence of tasks. They connect multiple steps together, where the output of the first step becomes the input of the second step, and so on. This method allows for straightforward processing of information, where each step builds upon the results of the previous one.

In this guide, we'll explain how to create and execute a sequential chain using an example. The example demonstrates a two-step process, where the first step generates a personalized birthday email, and the second step summarizes the email into a tweet.

use llm_chain::parameters;
use llm_chain::step::Step;
use llm_chain::traits::Executor as ExecutorTrait;
use llm_chain::{chains::sequential::Chain, prompt};
use llm_chain_openai::chatgpt::Executor;

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = Executor::new()?;

// Create a chain of steps with two prompts
let chain: Chain<Executor> = Chain::new(vec![
// First step: make a personalized birthday email
Step::for_prompt_template(
prompt!("You are a bot for making personalized greetings", "Make personalized birthday e-mail to the whole company for {{name}} who has their birthday on {{date}}. Include their name")
),

// Second step: summarize the email into a tweet. Importantly, the text parameter becomes the result of the previous prompt.
Step::for_prompt_template(
prompt!( "You are an assistant for managing social media accounts for a company", "Summarize this email into a tweet to be sent by the company, use emoji if you can. \n--\n{{text}}")
)
]);

// Run the chain with the provided parameters
let res = chain
.run(
// Create a Parameters object with key-value pairs for the placeholders
parameters!("name" => "Emil", "date" => "February 30th 2023"),
&exec,
)
.await
.unwrap();

// Print the result to the console
println!("{:?}", res);
Ok(())
}

In this example, we start by importing the necessary modules and defining the main function. Then, we create a new ChatGPT executor using the Executor::new() function. The executor is responsible for running the LLM.

Next, we create a new Chain object by passing in a vector of Step objects. Each step represents a separate LLM prompt. In this case, we have two steps:

  1. The first step generates a personalized birthday email using the provided name and date parameters.
  2. The second step summarizes the previously generated email into a tweet. Note that the {{text}} placeholder in the prompt is automatically filled with the result of the previous step. After defining the chain, we execute it using the chain.run() method. We provide a Parameters object containing key-value pairs for the placeholders in the prompts (e.g., name and date) and the executor.

Finally, we print the result of the chain to the console.

Sequential chains offer an efficient and straightforward way to perform a series of tasks using LLMs. By organizing the steps in a specific order, you can create complex processing pipelines that leverage the capabilities of LLMs effectively.

- + \ No newline at end of file diff --git a/docs/chains/what-are-chains/index.html b/docs/chains/what-are-chains/index.html index 96703335..da7727c6 100644 --- a/docs/chains/what-are-chains/index.html +++ b/docs/chains/what-are-chains/index.html @@ -13,13 +13,13 @@ - +

What are LLM chains and why are they useful?

Chains are a concept in the world of language models designed to model common patterns for applying large language models (LLMs) to a sequence of tasks. Although the term "chain" might suggest that it strictly involves chaining together LLM steps, the name has stuck, and it is now used more broadly.

Chains provide a convenient abstraction for organizing and executing a series of LLM steps in various ways to achieve desired outcomes. In this document, we will explore three main types of chains: Sequential, MapReduce, and Conversation chains. Each chain has its unique characteristics and serves specific purposes in applying LLMs.

Sequential Chains

Sequential chains are a simple yet powerful approach to applying LLMs. They connect multiple steps together in a sequence, where the output of the first step becomes the input of the second step, and so on. This method allows for straightforward processing of information, where each step builds upon the results of the previous one.

MapReduce Chains

MapReduce chains are designed to work with one or more documents. They split the documents into chunks that fit the LLM's context window and then apply the Map prompt to each chunk. After processing the chunks, a Reduce prompt is used to combine the results into a final output.

This approach is particularly useful when working with large documents or multiple documents, as it enables parallel processing and efficient combination of results.

Conversation Chains

Conversation chains are tailored for chat-style use cases, where maintaining a conversation history with the LLM is essential. This chain type keeps building up a history of chat messages, removing the ones that do not fit the context window, starting from the oldest to the newest. The conversation chain allows for more dynamic and interactive experiences when working with LLMs.

In summary, chains are a useful concept in applying LLMs, as they provide a structured way of organizing and executing LLM steps for various tasks. Each chain type has its unique characteristics and advantages, and choosing the right chain for your specific use case can significantly improve the effectiveness of your LLM application.

- + \ No newline at end of file diff --git a/docs/dev-setup/index.html b/docs/dev-setup/index.html index a566d196..f75fda28 100644 --- a/docs/dev-setup/index.html +++ b/docs/dev-setup/index.html @@ -13,13 +13,13 @@ - +

Contributing to llm-chain

First of all, thank you for considering contributing to our project! 🎉 We are delighted to have you here and truly appreciate your interest in making our project even better. Your contributions and ideas are highly valued.

Getting Started

  1. Make your own fork of llm-chain.
  2. git clone your fork to your local machine.
  3. Follow the instructions on the rustup website to install Rust.
  4. Fetch the project's submodules with git submodule update --init
  5. Test that everything went well with cargo test
  6. Make your changes in a new branch, following the coding guidelines and best practices.
  7. Commit and push your changes to your fork.
  8. Open a pull request against the main repository. 🚀

Before You Contribute

We are open to new ideas and contributions that align with the project's goals and vision. However, if you're planning on working on something significantly different from what's already in the project, we strongly recommend getting in touch with us before you start.

You can reach out to us by opening an issue, starting a discussion, or sending an email. This way, we can discuss your ideas, provide guidance, and ensure that your efforts are more likely to be merged into the project.

- + \ No newline at end of file diff --git a/docs/getting-started-tutorial/building-a-multi-step-chain/index.html b/docs/getting-started-tutorial/building-a-multi-step-chain/index.html index c597d72f..481468bc 100644 --- a/docs/getting-started-tutorial/building-a-multi-step-chain/index.html +++ b/docs/getting-started-tutorial/building-a-multi-step-chain/index.html @@ -3,7 +3,7 @@ -Creating Your First Sequential Chain | llm-chain +Creating Your First Sequential Chain | llm-chain @@ -13,13 +13,13 @@ - +
-

Creating Your First Sequential Chain

tip

Having problems? Don't worry reach out on discord and we will help you out.

Sequential chains in LLM-Chain allow you to execute a series of steps, with the output of each step feeding into the next one. This tutorial will guide you through creating a sequential chain, extending it with more steps, and provide some best practices and tips.

Here's a Rust program that demonstrates how to create a sequential chain:

use llm_chain::parameters;
use llm_chain::step::Step;
use llm_chain::traits::Executor as ExecutorTrait;
use llm_chain::{chains::sequential::Chain, prompt};
use llm_chain_openai::chatgpt::Executor;

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = Executor::new()?;

// Create a chain of steps with two prompts
let chain: Chain = Chain::new(vec![
// First step: make a personalized birthday email
Step::for_prompt_template(
prompt!("You are a bot for making personalized greetings", "Make personalized birthday e-mail to the whole company for {{name}} who has their birthday on {{date}}. Include their name")
),

// Second step: summarize the email into a tweet. Importantly, the text parameter becomes the result of the previous prompt.
Step::for_prompt_template(
prompt!( "You are an assistant for managing social media accounts for a company", "Summarize this email into a tweet to be sent by the company, use emoji if you can. \n--\n{{text}}")
)
]);

// Run the chain with the provided parameters
let res = chain
.run(
// Create a Parameters object with key-value pairs for the placeholders
parameters!("name" => "Emil", "date" => "February 30th 2023"),
&exec,
)
.await
.unwrap();

// Print the result to the console
println!("{}", res.to_immediate().await?.as_content());
Ok(())
}
  1. We start by importing the necessary modules from the llm_chain and llm_chain_openai libraries.

  2. The main async function is defined, using Tokio as the runtime.

  3. We create a new Executor with the default settings.

  4. We create a Chain that contains two steps, each with a different prompt:

    • The first step has a prompt to make a personalized birthday email for a company.
    • The second step has a prompt to summarize the email into a tweet.

    Both prompts use placeholders (e.g., {{name}}, {{date}}, and {{text}}) that will be replaced with specific values later. Importantly the value of {{text}} will replaced by result of the first step in the chain.

  5. We run the Chain with the provided parameters:

    • We create a Parameters object with key-value pairs for the placeholders: ("name", "Emil") and ("date", "February 30th 2023").
    • We pass the Parameters object and the Executor to the run() method.
  6. We unwrap the result and print it to the console.

Best Practices and Tips

When working with sequential chains, consider the following tips and best practices:

  1. Use descriptive and clear instructions for the system role to help guide the LLM.
  2. Keep the chain as short and simple as possible. Longer chains are harder to manage and debug.
  3. Test each step independently before in

For the next tutorial we will switch our focus from sequential to map-reduce chains. Map reduce chains are more complicated than sequential chains but allow us to do things that sequential chains can't. Stay tuned!

- +

Creating Your First Sequential Chain

tip

Having problems? Don't worry, reach out on discord and we will help you out.

Sequential chains in LLM-Chain allow you to execute a series of steps, with the output of each step feeding into the next one. This tutorial will guide you through creating a sequential chain, extending it with more steps, and provide some best practices and tips.

Here's a Rust program that demonstrates how to create a sequential chain:

use llm_chain::parameters;
use llm_chain::step::Step;
use llm_chain::traits::Executor as ExecutorTrait;
use llm_chain::{chains::sequential::Chain, prompt};
use llm_chain_openai::chatgpt::Executor;

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = Executor::new()?;

// Create a chain of steps with two prompts
let chain: Chain = Chain::new(vec![
// First step: make a personalized birthday email
Step::for_prompt_template(
prompt!("You are a bot for making personalized greetings", "Make personalized birthday e-mail to the whole company for {{name}} who has their birthday on {{date}}. Include their name")
),

// Second step: summarize the email into a tweet. Importantly, the text parameter becomes the result of the previous prompt.
Step::for_prompt_template(
prompt!( "You are an assistant for managing social media accounts for a company", "Summarize this email into a tweet to be sent by the company, use emoji if you can. \n--\n{{text}}")
)
]);

// Run the chain with the provided parameters
let res = chain
.run(
// Create a Parameters object with key-value pairs for the placeholders
parameters!("name" => "Emil", "date" => "February 30th 2023"),
&exec,
)
.await
.unwrap();

// Print the result to the console
println!("{}", res.to_immediate().await?.as_content());
Ok(())
}
  1. We start by importing the necessary modules from the llm_chain and llm_chain_openai libraries.

  2. The main async function is defined, using Tokio as the runtime.

  3. We create a new Executor with the default settings.

  4. We create a Chain that contains two steps, each with a different prompt:

    • The first step has a prompt to make a personalized birthday email for a company.
    • The second step has a prompt to summarize the email into a tweet.

    Both prompts use placeholders (e.g., {{name}}, {{date}}, and {{text}}) that will be replaced with specific values later. Importantly, the value of {{text}} will be replaced by result of the first step in the chain.

  5. We run the Chain with the provided parameters:

    • We create a Parameters object with key-value pairs for the placeholders: ("name", "Emil") and ("date", "February 30th 2023").
    • We pass the Parameters object and the Executor to the run() method.
  6. We unwrap the result and print it to the console.

Best Practices and Tips

When working with sequential chains, consider the following tips and best practices:

  1. Use descriptive and clear instructions for the system role to help guide the LLM.
  2. Keep the chain as short and simple as possible. Longer chains are harder to manage and debug.
  3. Test each step independently before testing the entire sequence.

For the next tutorial we will switch our focus from sequential to map-reduce chains. Map reduce chains are more complicated than sequential chains but allow us to do things that sequential chains can't. Stay tuned!

+ \ No newline at end of file diff --git a/docs/getting-started-tutorial/generating-your-first-llm-output/index.html b/docs/getting-started-tutorial/generating-your-first-llm-output/index.html index 4abcfaf8..4596dee3 100644 --- a/docs/getting-started-tutorial/generating-your-first-llm-output/index.html +++ b/docs/getting-started-tutorial/generating-your-first-llm-output/index.html @@ -3,7 +3,7 @@ -Generating Your First LLM Output | llm-chain +Generating Your First LLM Output | llm-chain @@ -13,13 +13,13 @@ - +
-

Generating Your First LLM Output

tip

Having problems? Don't worry reach out on discord and we will help you out.

First, we need to install tokio in our project. Since this is a tutorial we will install the full tokio package crate, in production, of course we should be more selective with what features we install.

cargo add tokio --features full

First, let's start by writing a simple Rust program that generates an LLM output using LLM-Chain and the OpenAI driver:

use llm_chain::{executor, parameters, prompt};

// Declare an async main function
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor
let exec = executor!()?;
// Create our prompt...
let res = prompt!(
"You are a robot assistant for making personalized greetings",
"Make a personalized greeting for Joe"
)
.run(&parameters!(), &exec) // ...and run it
.await?;
println!("{}", res);
Ok(())
}

Understanding LLM Response

When you run the program, you'll receive an LLM response. The response contains the generated text and other metadata.

Error Handling and Common Issues

One common issue you might encounter is forgetting to set the OpenAI API key. Make sure you have set the API key in your OPENAI_API_KEY environment variable.

export OPENAI_API_KEY="YOUR_OPEN_AI_KEY" # TIP: It stars with sk-

If you don't want to set enviroment variable or want to multiple api-keys. Then you can use a different macro like this.

use llm_chain::{executor, options, parameters, prompt};
use tokio;

// Declare an async main function
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor
let options = options! {
ApiKey: "sk-proj-..."
};

let exec = executor!(chatgpt, options);
match exec {
Ok(exec) => {

let res = prompt!(
"You are a robot assistant for making personalized greetings",
"Make a personalized greeting for Joe"
)
.run(&parameters!(), &exec) // ...and run it
.await?;
println!("{}", res);
}
Err(err) => panic!("Unable to create executor: {}", err),
}
// Create our step containing our prompt template

Ok(())
}

In the next tutorial, we'll cover adding parameters to customize the LLM prompt to create more complicated interactions.

- +

Generating Your First LLM Output

tip

Having problems? Don't worry, reach out on discord and we will help you out.

First, we need to install tokio in our project. Since this is a tutorial we will install the full tokio package crate, in production, of course we should be more selective with what features we install.

cargo add tokio --features full

First, let's start by writing a simple Rust program that generates an LLM output using LLM-Chain and the OpenAI driver:

use llm_chain::{executor, parameters, prompt};

// Declare an async main function
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor
let exec = executor!()?;
// Create our prompt...
let res = prompt!(
"You are a robot assistant for making personalized greetings",
"Make a personalized greeting for Joe"
)
.run(&parameters!(), &exec) // ...and run it
.await?;
println!("{}", res);
Ok(())
}

Understanding LLM Response

When you run the program, you'll receive an LLM response. The response contains the generated text and other metadata.

Error Handling and Common Issues

One common issue you might encounter is forgetting to set the OpenAI API key. Make sure you have set the API key in your OPENAI_API_KEY environment variable.

export OPENAI_API_KEY="YOUR_OPEN_AI_KEY" # TIP: It stars with sk-

If you don't want to set enviroment variable or want to multiple api-keys. Then you can use a different macro like this.

use llm_chain::{executor, options, parameters, prompt};
use tokio;

// Declare an async main function
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor
let options = options! {
ApiKey: "sk-proj-..."
};

let exec = executor!(chatgpt, options);
match exec {
Ok(exec) => {

let res = prompt!(
"You are a robot assistant for making personalized greetings",
"Make a personalized greeting for Joe"
)
.run(&parameters!(), &exec) // ...and run it
.await?;
println!("{}", res);
}
Err(err) => panic!("Unable to create executor: {}", err),
}
// Create our step containing our prompt template

Ok(())
}

In the next tutorial, we'll cover adding parameters to customize the LLM prompt to create more complicated interactions.

+ \ No newline at end of file diff --git a/docs/getting-started-tutorial/index/index.html b/docs/getting-started-tutorial/index/index.html index baa8b941..c2bfc648 100644 --- a/docs/getting-started-tutorial/index/index.html +++ b/docs/getting-started-tutorial/index/index.html @@ -13,13 +13,13 @@ - +

Getting started

Welcome to the Getting Started tutorial for llm-chain! This series of articles will guide you through the process of installing, setting up, and using the llm-chain library to make cool applications for LLMs. As you progress through the tutorials, you'll learn about generating text, using prompt templates, creating sequential chains, and summarizing text with map-reduce. We hope these tutorials provide you with a solid foundation to build upon and inspire you to create unique and innovative solutions using llm-chain. Let's get started!

- + \ No newline at end of file diff --git a/docs/getting-started-tutorial/setting-up-a-project/index.html b/docs/getting-started-tutorial/setting-up-a-project/index.html index 7299fbe9..e2248592 100644 --- a/docs/getting-started-tutorial/setting-up-a-project/index.html +++ b/docs/getting-started-tutorial/setting-up-a-project/index.html @@ -3,7 +3,7 @@ -Setting up a project with llm-chain | llm-chain +Setting up a project with llm-chain | llm-chain @@ -13,13 +13,13 @@ - +
-

Setting up a project with llm-chain

tip

Having problems? Don't worry reach out on discord and we will help you out.

Welcome to llm-chain, a Rust library designed to simplify working with large language models (LLMs) and help you create powerful applications. In this tutorial, we'll walk you through installing Rust, setting up a new project, and getting started with LLM-Chain.

Installing Rust

To begin, you'll need to install Rust on your machine. We recommend using rustup , the official Rust toolchain manager, to ensure you have the latest version and can manage your installations easily.

You need Rust 1.65.0 or higher. If you see errors about unstable feature or dependencies requiring newer Rust version, please update your Rust version.

  1. Follow the instructions on the rustup website to install Rust.

Creating a New Rust Project

Now that you have Rust installed, it's time to create a new Rust project. Run the following command to set up a new binary project:


cargo new --bin my-llm-project

This command will create a new directory called my-llm-project with the necessary files and directories for a Rust project.

Installing LLM-Chain

With your Rust project set up, it's time to add LLM-Chain as a dependency. To do this, run the following command:


cd my-llm-project
cargo add llm-chain

This will add LLM-Chain to your project's Cargo.toml file.

Choosing a Driver: LLAMA vs OpenAI

LLM-Chain supports multiple drivers for working with different LLMs. You can choose between the LLAMA driver (which runs a LLaMA LLM on your computer) and the OpenAI driver (which connects to the OpenAI API). For ease of use and getting started quickly, we'll be using the OpenAI driver in this tutorial. To install it run

cargo add llm-chain-openai

In the next tutorial, we'll cover generating your first LLM output using the OpenAI driver.

- +

Setting up a project with llm-chain

tip

Having problems? Don't worry, reach out on discord and we will help you out.

Welcome to llm-chain, a Rust library designed to simplify working with large language models (LLMs) and help you create powerful applications. In this tutorial, we'll walk you through installing Rust, setting up a new project, and getting started with LLM-Chain.

Installing Rust

To begin, you'll need to install Rust on your machine. We recommend using rustup , the official Rust toolchain manager, to ensure you have the latest version and can manage your installations easily.

You need Rust 1.65.0 or higher. If you see errors about unstable feature or dependencies requiring newer Rust version, please update your Rust version.

  1. Follow the instructions on the rustup website to install Rust.

Creating a New Rust Project

Now that you have Rust installed, it's time to create a new Rust project. Run the following command to set up a new binary project:


cargo new --bin my-llm-project

This command will create a new directory called my-llm-project with the necessary files and directories for a Rust project.

Installing LLM-Chain

With your Rust project set up, it's time to add LLM-Chain as a dependency. To do this, run the following command:


cd my-llm-project
cargo add llm-chain

This will add LLM-Chain to your project's Cargo.toml file.

Choosing a Driver: LLAMA vs OpenAI

LLM-Chain supports multiple drivers for working with different LLMs. You can choose between the LLAMA driver (which runs a LLaMA LLM on your computer) and the OpenAI driver (which connects to the OpenAI API). For ease of use and getting started quickly, we'll be using the OpenAI driver in this tutorial. To install it run

cargo add llm-chain-openai

In the next tutorial, we'll cover generating your first LLM output using the OpenAI driver.

+ \ No newline at end of file diff --git a/docs/getting-started-tutorial/summarizing-text-with-map-reduce/index.html b/docs/getting-started-tutorial/summarizing-text-with-map-reduce/index.html index 99773f63..23d09c59 100644 --- a/docs/getting-started-tutorial/summarizing-text-with-map-reduce/index.html +++ b/docs/getting-started-tutorial/summarizing-text-with-map-reduce/index.html @@ -3,7 +3,7 @@ -Summarizing Text with Map-Reduce in LLM-Chain | llm-chain +Summarizing Text with Map-Reduce in LLM-Chain | llm-chain @@ -13,15 +13,15 @@ - +
-

Summarizing Text with Map-Reduce in LLM-Chain

tip

Having problems? Don't worry reach out on discord and we will help you out.

Map-reduce is a powerful technique for processing and aggregating data in parallel. In this tutorial, we'll explore how to use map-reduce in llm-chain to summarize text effectively. We'll cover implementing a basic map-reduce for text summarization.

To start create a file named in "article_to_summarize.md" take the content of a wikipedia article and paste it in there.

Here's a Rust program that demonstrates how to create a map-reduce chain for summarizing text:

use llm_chain::chains::map_reduce::Chain;
use llm_chain::step::Step;
use llm_chain::{executor, parameters, prompt, Parameters};

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = executor!()?;

// Create the "map" step to summarize an article into bullet points
let map_prompt = Step::for_prompt_template(prompt!(
"You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",
"Summarize this article into bullet points:\n{{text}}"
));

// Create the "reduce" step to combine multiple summaries into one
let reduce_prompt = Step::for_prompt_template(prompt!(
"You are a diligent bot that summarizes text",
"Please combine the articles below into one summary as bullet points:\n{{text}}"
));

// Create a map-reduce chain with the map and reduce steps
let chain = Chain::new(map_prompt, reduce_prompt);

// Load the content of the article to be summarized
let article = include_str!("article_to_summarize.md");

// Create a vector with the Parameters object containing the text of the article
let docs = vec![parameters!(article)];

// Run the chain with the provided documents and an empty Parameters object for the "reduce" step
let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();

// Print the result to the console
println!("{}", res.to_immediate().await?.as_content());
Ok(())
}

Let's break down the code and understand the different parts:

  1. Define the map and reduce prompts as Step objects: +

    Summarizing Text with Map-Reduce in LLM-Chain

    tip

    Having problems? Don't worry, reach out on discord and we will help you out.

    Map-reduce is a powerful technique for processing and aggregating data in parallel. In this tutorial, we'll explore how to use map-reduce in llm-chain to summarize text effectively. We'll cover implementing a basic map-reduce for text summarization.

    To start, create a file named "article_to_summarize.md", take the content of a wikipedia article and paste it in there.

    Here's a Rust program that demonstrates how to create a map-reduce chain for summarizing text:

    use llm_chain::chains::map_reduce::Chain;
    use llm_chain::step::Step;
    use llm_chain::{executor, parameters, prompt, Parameters};

    #[tokio::main(flavor = "current_thread")]
    async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // Create a new ChatGPT executor with the default settings
    let exec = executor!()?;

    // Create the "map" step to summarize an article into bullet points
    let map_prompt = Step::for_prompt_template(prompt!(
    "You are a bot for summarizing wikipedia articles, you are terse and focus on accuracy",
    "Summarize this article into bullet points:\n{{text}}"
    ));

    // Create the "reduce" step to combine multiple summaries into one
    let reduce_prompt = Step::for_prompt_template(prompt!(
    "You are a diligent bot that summarizes text",
    "Please combine the articles below into one summary as bullet points:\n{{text}}"
    ));

    // Create a map-reduce chain with the map and reduce steps
    let chain = Chain::new(map_prompt, reduce_prompt);

    // Load the content of the article to be summarized
    let article = include_str!("article_to_summarize.md");

    // Create a vector with the Parameters object containing the text of the article
    let docs = vec![parameters!(article)];

    // Run the chain with the provided documents and an empty Parameters object for the "reduce" step
    let res = chain.run(docs, Parameters::new(), &exec).await.unwrap();

    // Print the result to the console
    println!("{}", res.to_immediate().await?.as_content());
    Ok(())
    }

    Let's break down the code and understand the different parts:

    1. Define the map and reduce prompts as Step objects: a. The map_prompt summarizes a given article into bullet points. b. The reduce_prompt combines multiple summaries into a single summary as bullet points.
    2. Create a new map-reduce Chain by providing the map_prompt and reduce_prompt.
    3. Load the article to be summarized and create a Parameters object with the text.
    4. Execute the map-reduce Chain with the provided Parameters and store the result in res.
    5. Print the LLM response to the console.

    This should be able to summarize any wikipedia article you might find. Play around with the prompt templates to make it best fit your usecase.


    That's it folks, thanks for following along for the tutorial. You are now ready to use llm-chain for something useful. Don't forget to stop by discord and share what you are making.

- + \ No newline at end of file diff --git a/docs/getting-started-tutorial/using-prompt-templates-and-parameters/index.html b/docs/getting-started-tutorial/using-prompt-templates-and-parameters/index.html index 37bbdbd9..8a928c94 100644 --- a/docs/getting-started-tutorial/using-prompt-templates-and-parameters/index.html +++ b/docs/getting-started-tutorial/using-prompt-templates-and-parameters/index.html @@ -3,7 +3,7 @@ -Using Prompt Templates and Parameters | llm-chain +Using Prompt Templates and Parameters | llm-chain @@ -13,13 +13,13 @@ - +
-

Using Prompt Templates and Parameters

tip

Having problems? Don't worry reach out on discord and we will help you out.

In this part of the tutorial series, we'll explore how to use prompt templates and parameters with llm-chain. Prompt templates allow you to create dynamic prompts, and parameters are the text strings you put into your templates.

Here's a simple Rust program demonstrating how to use prompt templates and parameters:

use llm_chain::{executor, parameters, prompt, step::Step};

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor
let exec = executor!()?;
// Create our step containing our prompt template
let step = Step::for_prompt_template(prompt!(
"You are a bot for making personalized greetings",
"Make a personalized greeting tweet for {{text}}" // Text is the default parameter name, but you can use whatever you want
));

// A greeting for emil!
let res = step.run(&parameters!("Emil"), &exec).await?;
println!("{}", res);

// A greeting for you
let res = step.run(&parameters!("Your Name Here"), &exec).await?;

println!("{}", res.to_immediate().await?.as_content());

Ok(())
}

Let's break down the different parts of the code:

  1. We start with importing the necessary libraries, including the traits and structs required for our program.
  2. The main async function is defined, using Tokio as the runtime.
  3. We create a new Executor with the default settings.
  4. A Step is created containing our prompt template with a placeholder ({{text}}) that will be replaced with a specific value later.
  5. We create a Parameters object with the value "Emil" to replace the placeholder in the prompt template.
  6. We execute the Step with the provided parameters and store the result in res, then print the response to the console.
  7. We create another Parameters object, this time with the value "Your Name Here" to replace the placeholder.
  8. We execute the Step again with the new parameters, store the result in res, and print the response to the console.

In the next tutorial, we will combine multiple LLM invocations to solve more complicated problems.

- +

Using Prompt Templates and Parameters

tip

Having problems? Don't worry, reach out on discord and we will help you out.

In this part of the tutorial series, we'll explore how to use prompt templates and parameters with llm-chain. Prompt templates allow you to create dynamic prompts, and parameters are the text strings you put into your templates.

Here's a simple Rust program demonstrating how to use prompt templates and parameters:

use llm_chain::{executor, parameters, prompt, step::Step};

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor
let exec = executor!()?;
// Create our step containing our prompt template
let step = Step::for_prompt_template(prompt!(
"You are a bot for making personalized greetings",
"Make a personalized greeting tweet for {{text}}" // Text is the default parameter name, but you can use whatever you want
));

// A greeting for emil!
let res = step.run(&parameters!("Emil"), &exec).await?;
println!("{}", res);

// A greeting for you
let res = step.run(&parameters!("Your Name Here"), &exec).await?;

println!("{}", res.to_immediate().await?.as_content());

Ok(())
}

Let's break down the different parts of the code:

  1. We start with importing the necessary libraries, including the traits and structs required for our program.
  2. The main async function is defined, using Tokio as the runtime.
  3. We create a new Executor with the default settings.
  4. A Step is created containing our prompt template with a placeholder ({{text}}) that will be replaced with a specific value later.
  5. We create a Parameters object with the value "Emil" to replace the placeholder in the prompt template.
  6. We execute the Step with the provided parameters and store the result in res, then print the response to the console.
  7. We create another Parameters object, this time with the value "Your Name Here" to replace the placeholder.
  8. We execute the Step again with the new parameters, store the result in res, and print the response to the console.

In the next tutorial, we will combine multiple LLM invocations to solve more complicated problems.

+ \ No newline at end of file diff --git a/docs/introduction/index.html b/docs/introduction/index.html index 6a0b7f5d..9e1dd923 100644 --- a/docs/introduction/index.html +++ b/docs/introduction/index.html @@ -13,13 +13,13 @@ - +

Welcome to LLM-chain

LLM-chain is a collection of Rust crates designed to help you work with Large Language Models (LLMs) more effectively. Our primary focus is on providing robust support for prompt templates and chaining together prompts in multi-step chains, enabling complex tasks that LLMs can't handle in a single step. This includes, but is not limited to, summarizing lengthy texts or performing advanced data processing tasks.

Features

  • Prompt templates: Create reusable and easily customizable prompt templates for consistent and structured interactions with LLMs.
  • Chains: Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.
  • ChatGPT support: Supports ChatGPT models, with plans to add OpenAI's other models in the future.
  • LLaMa support: Provides seamless integration with LLaMa models, enabling natural language understanding and generation tasks with Facebook's research models.
  • Alpaca support: Incorporates support for Stanford's Alpaca models, expanding the range of available language models for advanced AI applications.
  • Tools: Enhance your AI agents' capabilities by giving them access to various tools, such as running Bash commands, executing Python scripts, or performing web searches, enabling more complex and powerful interactions.
  • Extensibility: Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows.
  • Community-driven: We welcome and encourage contributions from the community to help improve and expand the capabilities of LLM-chain.

Getting Started

To start using LLM-chain, add it as a dependency:

cargo add llm-chain llm-chain-openai

Connect with Us

We're always excited to hear from our users and learn about your experiences with LLM-chain. If you have any questions, suggestions, or feedback, feel free to open an issue or join our community discussions.

We hope you enjoy using LLM-chain to unlock the full potential of Large Language Models in your projects. Happy coding! 🎉

- + \ No newline at end of file diff --git a/docs/llama-tutorial/index.html b/docs/llama-tutorial/index.html index 50a5cfda..1920b120 100644 --- a/docs/llama-tutorial/index.html +++ b/docs/llama-tutorial/index.html @@ -13,13 +13,13 @@ - +

Tutorial: Getting Started using the LLAMA driver

In this tutorial, you will learn how to set up an llm-project using the LLAMA drive. If you wish to use the other drivers you can skip this part of the tutorial.

Prerequisites

To follow this tutorial, you will need:

  • Ubuntu Linux 18.04 or higher
  • Rust 1.71.0 or higher
  • Cargo, the Rust package manager
  • GCC/G++ 8 or higher
  • A Hugging Face account1
  • Git and Git LFS
  • Pyenv, a Python version manager
  • Python 3.11.3 or higher
  • cmake, libclang-dev

We tested using these exact software versions, but you should be able to get similar results using the latest versions. Also, there are many alternative ways to install and use these products. For example install Python via your Linux distribution's package manager. You can adapt this tutorial to your environment.

Step 1: Create a new Rust project

First, you will create a new Rust project using Cargo. To create a new project, open a terminal and run the following command:

cargo new --bin llm-chain-demo
cd llm-chain-demo

This will create a new directory called llm-chain-demo with the following structure:

llm-chain-demo
├── Cargo.toml
└── src
└── main.rs

The Cargo.toml file contains the metadata and dependencies of your project. The src/main.rs file contains the main source code of your project.

Step 2: Add dependencies

To add these dependencies, run the following commands in your terminal:

cargo add llm-chain
cargo add tokio --features full

Step 3: Update Rust

The minimum version required to run this tutorial is Rust 1.65.0. At the time of this writing, the latest stable version is 1.71.0, so we'll use that.

To switch to Rust 1.71.0, you need to use rustup, which is a tool that helps you manage multiple versions of Rust on your system.

To install rustup, follow the instruction on https://rustup.rs/

To install Rust 1.71.0 using rustup, run the following command in your terminal:

rustup install 1.71.0

To switch to Rust 1.71.0 as the default version for your project, run the following command in your terminal:

rustup default 1.71.0

You can verify that you are using Rust 1.71.0 by running the following command in your terminal:

rustc --version

This should output something like this:

rustc 1.71.0 (8ede3aae2 2023-07-12)

Step 4: Install and run llama.cpp

Now that you have set up your Rust project and switched to the correct version of Rust, you need to install and run llama.cpp, which is a C++ implementation of LLaMa models for inference.

llama.cpp requires GCC and G++ 8 or newer, if your distribution use GCC/G++ 7 by default, use these commands to update:

sudo apt install gcc-8 g++-8
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 10
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 10
sudo update-alternatives --install /usr/bin/cc cc /usr/bin/gcc 30
sudo update-alternatives --set cc /usr/bin/gcc

sudo update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++ 30
sudo update-alternatives --set c++ /usr/bin/g++

To install llama.cpp, you need to clone its repository from GitHub and build it from source. To do that, run the following commands in your terminal:

git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp
make

Notice that we clone the llama.cpp folder inside the llm-chain-demo folder.

To run llama.cpp, you need to download a LLaMa model and convert it to a binary format that llama.cpp can read. In this tutorial, you will use the Alpaca model.

To download the Alpaca model, you need to have a Hugging Face account and install Git LFS. Hugging Face is a platform that hosts and distributes various natural language processing models, including LLaMa models. Git LFS is an extension for Git that allows you to store large files on GitHub. Because these LLMs are usually quite big, Hugging Face use Git LFS to allow you to download them using git.

To create a Hugging Face account, go to Hugging Face and sign up.

To install Git LFS, run the following commands in your terminal:

curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash
sudo apt install git-lfs
git lfs install

This will download and install Git LFS on your system.

To download the Alpaca model, run the following commands in your terminal:

cd ./models
git clone https://huggingface.co/chavinlo/alpaca-native

This will clone the Alpaca model repository to your models directory.

To convert the Alpaca model to the format llama.cpp accepts, you need to install Python and run a conversion script. In this tutorial, you will use Python 3.11.3, which is the latest stable version of Python at the time of writing this tutorial.

To install Python 3.11.3, you need to use pyenv, which is a tool that helps you manage multiple versions of Python on your system.

To install pyenv, run the following command in your terminal:

curl https://pyenv.run | bash

Then, you need to install the pyenv-virtualenv plugin to let pyenv manage virtualenv for you. Run this command to install pyenv-virtualenv:

git clone https://github.com/pyenv/pyenv-virtualenv.git $(pyenv root)/plugins/pyenv-virtualenv

If you use zsh, add the following lines to your ~/.zshrc file, run the following command in your terminal:

echo 'eval "$(pyenv init -)"' >> ~/.zshrc
echo 'eval "$(pyenv virtualenv-init -)"' >> ~/.zshrc
source ~/.zshrc

Or replace it with ~/.bashrc if you prefer bash.

This will enable pyenv to manage your Python versions and virtual environments.

To install Python 3.11.3 using pyenv, run the following command in your terminal:

pyenv install 3.11.3

To create a virtual environment for Python 3.11.3 using pyenv, run the following command in your terminal:

pyenv virtualenv 3.11.3 llama

To activate the virtual environment, run the following command in your terminal:

pyenv activate llama

This will activate the virtual environment and change your prompt to indicate that you are using it.

To install the required Python packages for the conversion script, run the following command in your terminal:

# in the llama.cpp root directory
pip install -r requirements.txt

With the Python dependencies installed, you need to run the conversion script that will convert the Alpaca model to a binary format that llama.cpp can read. To do that, run the following command in your terminal:

python convert.py ./models/alpaca-native

This will run the convert.py script that is located in the llama.cpp directory. The script will take the Alpaca model directory as an argument and output a binary file called ggml-model-f32.bin in the same directory.

To test llama.cpp with the Alpaca model, run the following command in your terminal:

./main -m models/alpaca-native/ggml-model-f32.bin -n 128 -p "I love Rust because"

This will run the main executable that is located in the llama.cpp directory. The executable will take two arguments: -m, -n and -p The -m argument specifies the path to the model binary file. The -n argument specifies the number of tokens to generate for each input. The -p argument specifies the prompt you send to the LLM.

The model may take some time to load. You should see something like this in your terminal:

main: build = 849 (d01bccd)
main: seed = 1690205678
llama.cpp: loading model from models/alpaca-native/ggml-model-f32.bin
llama_model_load_internal: format = ggjt v1 (pre #1405)
...
llama_new_context_with_model: kv self size = 256.00 MB

system_info: n_threads = 4 / 8 | AVX = 1 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
sampling: repeat_last_n = 64, repeat_penalty = 1.100000, presence_penalty = 0.000000, frequency_penalty = 0.000000, top_k = 40, tfs_z = 1.000000, top_p = 0.950000, typical_p = 1.000000, temp = 0.800000, mirostat = 0, mirostat_lr = 0.100000, mirostat_ent = 5.000000
generate: n_ctx = 512, n_batch = 512, n_predict = 128, n_keep = 0

I love Rust because it allows me to create things that are both practical and beautiful. I can design objects that are functional, reliable, and secure - all while still looking great. It’s also a fun language to work with, as it encourages creativity through its focus on code spelunking and efficient algorithmic thinking. [end of text]

llama_print_timings: load time = 113290.18 ms
...
llama_print_timings: total time = 44284.74 ms

This means that llama.cpp has successfully loaded the Alpaca model and generated text based on your prompt.

Step 5: Add the llama.cpp driver

Now we can get back to the llm-chain-demo Rust project. To use the LLAMA driver, you need to add it as a dependency to your Rust project. You can run the following command in the terminal:

cargo add llm-chain-llama

Step 6: Run the example code

To run the example code, you need to copy and paste it into your src/main.rs file. The example code creates a LLAMA executor with the Alpaca model and generates text for a given prompt.

use llm_chain::executor;
use llm_chain::{parameters, prompt};
use llm_chain::options::*;
use llm_chain::options;

#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let opts = options!(
Model: ModelRef::from_path("./llama.cpp/models/alpaca-native/ggml-model-f32.bin"), // Notice that we reference the model binary path
ModelType: "llama",
MaxContextSize: 512_usize,
NThreads: 4_usize,
MaxTokens: 0_usize,
TopK: 40_i32,
TopP: 0.95,
TfsZ: 1.0,
TypicalP: 1.0,
Temperature: 0.8,
RepeatPenalty: 1.1,
RepeatPenaltyLastN: 64_usize,
FrequencyPenalty: 0.0,
PresencePenalty: 0.0,
Mirostat: 0_i32,
MirostatTau: 5.0,
MirostatEta: 0.1,
PenalizeNl: true,
StopSequence: vec!["\n".to_string()]
);
let exec = executor!(llama, opts)?;
let res = prompt!("I love Rust because")
.run(
&parameters!(),
&exec,
)
.await?;
println!("{}", res.to_immediate().await?);
Ok(())
}

When we set up llama.cpp, we use make to compile it. But llama.cpp can also be compiled with cmake. the llm-chain-llama crate uses llm-chain-llama-sys internally, and llm-chain-llama.sys uses cmake to compile the bindings for llama.cpp. Before you run the example code, you may need to install some additional packages for the compilation, such as libclang-dev and cmake. To install libclang-dev, run the following command in your terminal:

sudo apt install libclang-dev

This will install the Clang library development files on your system.

To install cmake, you can to use a PPA from Kitware, which provides the latest version of cmake. You can also compile from source if you have concerns using 3rd-party PPA. To do that, run the following commands in your terminal:

wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | sudo tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null
sudo apt-add-repository "deb https://apt.kitware.com/ubuntu/ $(lsb_release -cs) main"
sudo apt update
sudo apt install kitware-archive-keyring
sudo apt update
sudo apt install cmake

To run your program, run the following command in your terminal:

cargo run

You should see something like this in your terminal:

   Compiling llm-chain-demo v0.1.0 (/home/ubuntu/environment/llm-chain-demo)
Finished dev [unoptimized + debuginfo] target(s) in 9.05s
Running `target/debug/llm-chain-demo`
...
I love Rust because it allows me to create things that are both practical and beautiful. I can design objects that are functional, reliable, and secure - all while still looking great. It’s also a fun language to work with, as it encourages creativity through its focus on code spelunking and efficient algorithmic thinking. [end of text]
...

This is the text generated through the llm-chain and the LLAMA driver based on your prompt.

Congratulations! You have successfully run the example code using the llama.cpp driver. You can experiment with different models, model parameters and prompts.

- + \ No newline at end of file diff --git a/index.html b/index.html index 343bd234..47a7a297 100644 --- a/index.html +++ b/index.html @@ -13,13 +13,13 @@ - +

llm-chain

Unlock the full potential of Large Language Models

Tools

Unleash LLMs in the real world with a set of tools that allow your LLMs to perform actions like running Python code.

Chains

Build powerful chains of prompts that allow you to execute more complex tasks, step by step, leveraging the full potential of LLMs.

Extensibility

Designed with extensibility in mind, making it easy to integrate additional LLMs as the ecosystem grows.

- + \ No newline at end of file diff --git a/llmcasual/index.html b/llmcasual/index.html index b9d1c602..9823e783 100644 --- a/llmcasual/index.html +++ b/llmcasual/index.html @@ -13,13 +13,13 @@ - +

llm::casual

Join us for the LLM-Casual event, an unconference style session focused on building stuff with LLMs. Don't miss this opportunity to connect with like-minded people, learn new things, and have fun!

If you are interested in presenting a topic reach out on discord and make it happen.

Event Details

  • Date: May 3rd
  • Time: 17:00 CEST - 19:00 CEST
  • Duration: 2 hours
  • Location: Discord

How to Register

To secure your spot at the llm::casual event, simply click the button below:

Register for the event

We look forward to seeing you there!

- + \ No newline at end of file