From 0d9e16dfc6ac1d91c93119520a7826b33f89cbd5 Mon Sep 17 00:00:00 2001 From: Jeongho Nam Date: Tue, 31 Dec 2024 02:22:32 +0900 Subject: [PATCH] Detailed description of `typia.llm.parameters()` function. --- website/pages/docs/llm/application.mdx | 6 +- website/pages/docs/llm/parameters.mdx | 153 ++++++++++++++++++++++++- 2 files changed, 155 insertions(+), 4 deletions(-) diff --git a/website/pages/docs/llm/application.mdx b/website/pages/docs/llm/application.mdx index da2cb4be71..1d1fdb01a7 100644 --- a/website/pages/docs/llm/application.mdx +++ b/website/pages/docs/llm/application.mdx @@ -771,7 +771,9 @@ LLM function calling application schema with validators. This validator function is useful when implementing the actual LLM function calling feature. It's because LLM function calling sometimes takes a mistake that composing wrong typed arguments. In that case, you can correct the arguments by delivering the return value of the `ILlmFunctionOfValidate.validate()` function to the LLM provider. Then, the LLM provider will correct the arguments at the next function calling. -Here is an actual program code correcting the OpenAI (ChatGPT) function calling. +Here is an actual program code correcting the OpenAI (ChatGPT) function calling. + +Note that, if you are developing an A.I. chatbot project, such validation feedback strategy is essential for both LLM function calling and structured output features. Tends to my experiments, even though the LLM makes a wrong typed structured data, it always be corrected just by only one validation feedback step. ```typescript filename="ChatGptFunctionCaller.ts" showLineNumbers {22, 33-43, 72} copy import OpenAI from "openai"; @@ -795,7 +797,7 @@ export namespace ChatGptFunctionCaller { const step = async ( props: IProps, - previous?: IValidation.IFailure, + previous?: IValidation.IFailure | undefined, ): Promise> => { const client: OpenAI = new OpenAI({ apiKey: "YOUR-SECRET-KEY", diff --git a/website/pages/docs/llm/parameters.mdx b/website/pages/docs/llm/parameters.mdx index 8ce48051b7..b0bd9a2a4f 100644 --- a/website/pages/docs/llm/parameters.mdx +++ b/website/pages/docs/llm/parameters.mdx @@ -443,12 +443,161 @@ Structured output is another feature of LLM. The "structured output" means that -## Specialization +## Structured Output +```typescript filename="src/examples/llm.parameters.ts" copy showLineNumbers {4-10, 36} +import OpenAI from "openai"; +import typia, { tags } from "typia"; +interface IMember { + email: string & tags.Format<"email">; + name: string; + age: number; + hobbies: string[]; + joined_at: string & tags.Format<"date">; +} + +const main = async (): Promise => { + const client: OpenAI = new OpenAI({ + apiKey: TestGlobal.env.CHATGPT_API_KEY, + // apiKey: "", + }); + const completion: OpenAI.ChatCompletion = + await client.chat.completions.create({ + model: "gpt-4o", + messages: [ + { + role: "user", + content: [ + "I am a new member of the community.", + "", + "My name is John Doe, and I am 25 years old.", + "I like playing basketball and reading books,", + "and joined to this community at 2022-01-01.", + ].join("\n"), + }, + ], + response_format: { + type: "json_schema", + json_schema: { + name: "member", + schema: typia.llm.parameters() as any, + }, + }, + }); + console.log(JSON.parse(completion.choices[0].message.content!)); +}; +main().catch(console.error); +``` + +> ```bash filename="Terminal" +> { +> email: 'john.doe@example.com', +> name: 'John Doe', +> age: 25, +> hobbies: [ 'playing basketball', 'reading books' ], +> joined_at: '2022-01-01' +> } +> ``` + +You can utilize the `typia.llm.parameters()` function to generate structured output like above. + +Just configure output mode as JSON schema, and deliver the `typia.llm.parameters()` function returned value to the LLM provider like OpenAI (ChatGPT). Then, the LLM provider will automatically transform the output conversation into a structured data format of the `Parameters` type. + + + + +## Validation Feedback +```typescript filename="src/examples/llm.parameters.ts" showLineNumbers copy +import OpenAI from "openai"; +import typia, { IValidation, tags } from "typia"; + +interface IMember { + email: string & tags.Format<"email">; + name: string; + age: number; + hobbies: string[]; + joined_at: string & tags.Format<"date">; +} + +const step = async ( + failure?: IValidation.IFailure | undefined, +): Promise> => { + const client: OpenAI = new OpenAI({ + apiKey: "", + }); + const completion: OpenAI.ChatCompletion = + await client.chat.completions.create({ + model: "gpt-4o", + messages: [ + { + role: "user", + content: [ + "I am a new member of the community.", + "", + "My name is John Doe, and I am 25 years old.", + "I like playing basketball and reading books,", + "and joined to this community at 2022-01-01.", + ].join("\n"), + }, + ...(failure + ? [ + { + role: "system", + content: [ + "You A.I. agent had taken a mistak that", + "returing wrong typed structured data.", + "", + "Here is the detailed list of type errors.", + "Review and correct them at the next step.", + "", + "```json", + JSON.stringify(failure.errors, null, 2), + "```", + ].join("\n"), + } satisfies OpenAI.ChatCompletionSystemMessageParam, + ] + : []), + ], + response_format: { + type: "json_schema", + json_schema: { + name: "member", + schema: typia.llm.parameters() as any, + }, + }, + }); + const member: IMember = JSON.parse(completion.choices[0].message.content!); + return typia.validate(member); +}; + +const main = async (): Promise => { + let result: IValidation | undefined = undefined; + for (let i: number = 0; i < 2; ++i) { + if (result && result.success === true) break; + result = await step(result); + } + console.log(result); +}; + +main().catch(console.error); +``` + +> ```bash filename="Terminal" +> { +> email: 'john.doe@example.com', +> name: 'John Doe', +> age: 25, +> hobbies: [ 'playing basketball', 'reading books' ], +> joined_at: '2022-01-01' +> } +> ``` + +In sometimes, LLM takes a mistake composing wrong typed structured data. +In that case, you can guide the LLM (Large Language Model) to generate the correct typed structured data at the next step just by delivering the validation error message of the [`typia.validate()`](../validators/validate) function as a system prompt like above. +Note that, if you are developing an A.I. chatbot project, such validation feedback strategy is essential for both LLM function calling and structured output features. Tends to my experiments, even though the LLM makes a wrong typed structured data, it always be corrected just by only one validation feedback step. -## Customziation