From 82a4c8c80293fa95bd334cd7e9da467d587de959 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Fri, 1 Mar 2024 14:13:39 -0500 Subject: [PATCH 01/14] add custom queue for conversation memory --- .../custome-queue/custom-queue.spec.ts | 76 +++++++++++++++++++ .../custome-queue/custom-queue.ts | 64 ++++++++++++++++ 2 files changed, 140 insertions(+) create mode 100644 new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts create mode 100644 new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.ts diff --git a/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts b/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts new file mode 100644 index 0000000..be800f5 --- /dev/null +++ b/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts @@ -0,0 +1,76 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { CustomQueue } from './custom-queue'; + +describe('CustomQueue', () => { + let queue: CustomQueue; + + beforeEach(async () => { + const module: TestingModule = await Test.createTestingModule({ + providers: [CustomQueue], + }).compile(); + + queue = module.get>(CustomQueue); + }); + + it('should create a queue with a maximum size of 5 and enqueue items', () => { + queue.setMaxSize(5); + expect(queue.size()).toEqual(0); + expect(queue.isEmpty()).toEqual(true); + expect(queue.isFull()).toEqual(false); + + queue.enqueue(1); + queue.enqueue(2); + queue.enqueue(3); + queue.enqueue(4); + queue.enqueue(5); + + expect(queue.size()).toEqual(5); + expect(queue.isFull()).toEqual(true); + }); + + it('should dequeue items', () => { + queue.enqueue(1); + queue.enqueue(2); + queue.enqueue(3); + + expect(queue.dequeue()).toEqual(1); + expect(queue.size()).toEqual(2); + expect(queue.peek()).toEqual(2); + }); + + it('should update maximum size and dequeue excess items', () => { + queue.setMaxSize(5); + + queue.enqueue(1); + queue.enqueue(2); + queue.enqueue(3); + queue.enqueue(4); + queue.enqueue(5); + + expect(queue.size()).toEqual(5); + expect(queue.isFull()).toEqual(true); + + queue.setMaxSize(3); + + expect(queue.size()).toEqual(3); + expect(queue.peek()).toEqual(3); + }); + + it('should slice the queue correctly', () => { + queue.enqueue(1); + queue.enqueue(2); + queue.enqueue(3); + queue.enqueue(4); + queue.enqueue(5); + + // Test slicing the whole queue + expect(queue.slice()).toEqual([1, 2, 3, 4, 5]); + + expect(queue.slice(1, 4)).toEqual([2, 3, 4]); + + expect(queue.slice(0, 3)).toEqual([1, 2, 3]); + + // Test slicing from index 3 to end of queue + expect(queue.slice(3)).toEqual([4, 5]); + }); +}); diff --git a/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.ts b/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.ts new file mode 100644 index 0000000..ca2ee18 --- /dev/null +++ b/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.ts @@ -0,0 +1,64 @@ +export class CustomQueue { + private queue: T[]; + private maxSize: number | undefined; + + constructor(maxSize?: number) { + if (maxSize !== undefined && maxSize <= 0) { + throw new Error('Maximum size must be greater than zero.'); + } + this.queue = []; + this.maxSize = maxSize; + } + + enqueue(item: T): void { + if (this.maxSize !== undefined && this.queue.length === this.maxSize) { + this.dequeue(); // Remove oldest item if queue is full + } + this.queue.push(item); + } + + dequeue(): T | undefined { + return this.queue.shift(); + } + + peek(): T | undefined { + return this.queue.length > 0 ? this.queue[0] : undefined; + } + + size(): number { + return this.queue.length; + } + + isEmpty(): boolean { + return this.size() === 0; + } + + isFull(): boolean { + return this.size() === this.maxSize; + } + + getMaxSize(): number | undefined { + return this.maxSize; + } + + setMaxSize(newMaxSize: number | undefined): void { + if (newMaxSize !== undefined && newMaxSize <= 0) { + throw new Error('Maximum size must be greater than zero.'); + } + this.maxSize = newMaxSize; + if (newMaxSize === undefined) return; + while (this.queue.length > newMaxSize) { + this.dequeue(); + } + } + + slice(start?: number, end?: number): T[] { + try { + const slicedQueue = this.queue.slice(start, end); + return slicedQueue; + } catch (error: any) { + const errorMsg: string = `${error.message}. Keep in mind queue max size is ${this.maxSize}`; + throw new Error(errorMsg); + } + } +} From 7a5f1b7356459b461b64b5a9b49540542ca553a7 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Sun, 3 Mar 2024 22:50:06 -0500 Subject: [PATCH 02/14] update .gitignore --- new-chatbot-app/.gitignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/new-chatbot-app/.gitignore b/new-chatbot-app/.gitignore index 4b56acf..b5faa26 100644 --- a/new-chatbot-app/.gitignore +++ b/new-chatbot-app/.gitignore @@ -1,3 +1,8 @@ +#visualization +graph.png +graph.jpg +graph.jpeg + # compiled output /dist /node_modules From f9ce541a92380a06035ed20e2e9fc0d443077817 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Mon, 4 Mar 2024 01:26:43 -0500 Subject: [PATCH 03/14] added memory with conversation summarization and resolved circular dependency --- new-chatbot-app/src/app.module.ts | 12 +- .../src/llm-chain/agent/agent.module.ts | 4 - .../src/llm-chain/llm-chain.module.ts | 2 - .../src/llm-chain/llm/llm.interface.ts | 2 +- .../src/llm-chain/llm/llm.module.ts | 11 +- .../src/llm-chain/llm/llm.service.ts | 7 +- .../llm/openai-api/openai-api.service.ts | 2 +- .../conversation-memory.service.spec.ts | 23 +++ .../conversation-memory.service.ts | 147 ++++++++++++++++++ .../src/llm-chain/memory/memory.interface.ts | 2 +- .../src/llm-chain/memory/memory.module.ts | 12 +- ...ersation-prompt-with-tools.service.spec.ts | 2 + ...-conversation-prompt-with-tools.service.ts | 14 +- ...ation-summarization-prompt.service.spec.ts | 20 +++ ...nversation-summarization-prompt.service.ts | 27 ++++ .../src/llm-chain/prompt/prompt.interface.ts | 2 +- .../src/llm-chain/prompt/prompt.module.ts | 13 +- .../token-usage/token-usage.service.spec.ts | 21 +++ .../token-usage/token-usage.service.ts | 1 + new-chatbot-app/src/shared/shared.module.ts | 14 +- new-chatbot-app/src/shared/shared.types.ts | 16 -- 21 files changed, 297 insertions(+), 57 deletions(-) delete mode 100644 new-chatbot-app/src/llm-chain/agent/agent.module.ts create mode 100644 new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts create mode 100644 new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts create mode 100644 new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts create mode 100644 new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts delete mode 100644 new-chatbot-app/src/shared/shared.types.ts diff --git a/new-chatbot-app/src/app.module.ts b/new-chatbot-app/src/app.module.ts index 306c7cf..b8a6898 100644 --- a/new-chatbot-app/src/app.module.ts +++ b/new-chatbot-app/src/app.module.ts @@ -1,20 +1,14 @@ import { Module } from '@nestjs/common'; import { LlmChainModule } from './llm-chain/llm-chain.module'; import { LibraryApiModule } from './library-api/library-api.module'; -import { ConfigModule } from '@nestjs/config'; -import { RetrieveEnvironmentVariablesService } from './shared/services/retrieve-environment-variables/retrieve-environment-variables.service'; import { ChatGateway } from './chat/chat.gateway'; -import { NetworkService } from './shared/services/network/network.service'; - -import { TokenUsageService } from './shared/services/token-usage/token-usage.service'; +import { SharedModule } from './shared/shared.module'; @Module({ - imports: [ConfigModule.forRoot(), LlmChainModule, LibraryApiModule], + imports: [LlmChainModule, LibraryApiModule, SharedModule], providers: [ - RetrieveEnvironmentVariablesService, ChatGateway, - NetworkService, - TokenUsageService, ], }) + export class AppModule {} diff --git a/new-chatbot-app/src/llm-chain/agent/agent.module.ts b/new-chatbot-app/src/llm-chain/agent/agent.module.ts deleted file mode 100644 index 9ae43f7..0000000 --- a/new-chatbot-app/src/llm-chain/agent/agent.module.ts +++ /dev/null @@ -1,4 +0,0 @@ -import { Module } from '@nestjs/common'; - -@Module({}) -export class AgentModule {} diff --git a/new-chatbot-app/src/llm-chain/llm-chain.module.ts b/new-chatbot-app/src/llm-chain/llm-chain.module.ts index 84d0f6f..3490425 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.module.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.module.ts @@ -1,6 +1,5 @@ import { Module } from '@nestjs/common'; import { LlmToolboxModule } from './llm-toolbox/llm-toolbox.module'; -import { AgentModule } from './agent/agent.module'; import { MemoryModule } from './memory/memory.module'; import { PromptModule } from './prompt/prompt.module'; import { LlmChainService } from './llm-chain.service'; @@ -10,7 +9,6 @@ import { LlmModule } from './llm/llm.module'; imports: [ LlmModule, LlmToolboxModule, - AgentModule, MemoryModule, PromptModule, ], diff --git a/new-chatbot-app/src/llm-chain/llm/llm.interface.ts b/new-chatbot-app/src/llm-chain/llm/llm.interface.ts index a15ee5b..4494e50 100644 --- a/new-chatbot-app/src/llm-chain/llm/llm.interface.ts +++ b/new-chatbot-app/src/llm-chain/llm/llm.interface.ts @@ -1,4 +1,4 @@ -import { TokenUsage } from 'src/shared/services/token-usage/token-usage.service'; +import { TokenUsage } from '../../shared/services/token-usage/token-usage.service'; export interface LlmInterface { getModelResponse( diff --git a/new-chatbot-app/src/llm-chain/llm/llm.module.ts b/new-chatbot-app/src/llm-chain/llm/llm.module.ts index bb300a8..05182da 100644 --- a/new-chatbot-app/src/llm-chain/llm/llm.module.ts +++ b/new-chatbot-app/src/llm-chain/llm/llm.module.ts @@ -1,7 +1,10 @@ import { Module } from '@nestjs/common'; import { LlmService } from './llm.service'; -import { OpenaiApiService } from './openai-api/openai-api.service'; -import { TokenUsageService } from 'src/shared/services/token-usage/token-usage.service'; +import { + OpenaiApiService, + OpenAiModelType, +} from './openai-api/openai-api.service'; +import { SharedModule } from '../../shared/shared.module'; export type LlmModelType = OpenAiModelType | CohereModelType; @@ -18,6 +21,8 @@ export type LlmModelSetting = { }; @Module({ - providers: [LlmService, OpenaiApiService, TokenUsageService], + imports: [SharedModule], + providers: [LlmService, OpenaiApiService], + exports: [LlmService], }) export class LlmModule {} diff --git a/new-chatbot-app/src/llm-chain/llm/llm.service.ts b/new-chatbot-app/src/llm-chain/llm/llm.service.ts index 7a5dba1..fd83c32 100644 --- a/new-chatbot-app/src/llm-chain/llm/llm.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/llm.service.ts @@ -3,13 +3,13 @@ import { Injectable, Logger } from '@nestjs/common'; import { Prompt } from '../prompt/prompt.interface'; import { TokenUsage, - TokenUsageService, } from '../../shared/services/token-usage/token-usage.service'; import { LlmInterface } from './llm.interface'; import { OpenaiApiService, OpenAiModelType, } from './openai-api/openai-api.service'; +import { LlmModelType } from './llm.module'; /** * Service for getting response from different LLM model and number of token used @@ -19,7 +19,6 @@ export class LlmService { private readonly logger = new Logger(LlmService.name); constructor( - private tokenUsageService: TokenUsageService, private openaiApiService: OpenaiApiService, ) {} @@ -59,8 +58,8 @@ export class LlmService { let model: LlmInterface; try { model = this.getModel(modelType); - } catch (e: any) { - throw new Error(e); + } catch (error: any) { + throw error; } // Get the prompt from the prompt object diff --git a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts index 187fdc9..657721f 100644 --- a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts @@ -34,7 +34,7 @@ export class OpenaiApiService implements LlmInterface { private tokenUsageService: TokenUsageService, ) { const modelConfiguration = new Configuration({ - organization: this.retrieveEnvironmentVariablesService.retrieve( + organization: this.retrieveEnvironmentVariablesService.retrieve( 'OPENAI_ORGANIZATION_ID', ), apiKey: diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts new file mode 100644 index 0000000..14b9337 --- /dev/null +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts @@ -0,0 +1,23 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { ConversationMemoryService } from './conversation-memory.service'; +import { PromptModule } from '../../../llm-chain/prompt/prompt.module'; +import { MemoryModule } from '../memory.module'; +import { LlmModule } from '../../../llm-chain/llm/llm.module'; +import { SharedModule } from '../../../shared/shared.module'; + +describe('ConversationMemoryService', () => { + let service: ConversationMemoryService; + + beforeEach(async () => { + const module: TestingModule = await Test.createTestingModule({ + imports: [PromptModule, MemoryModule, LlmModule, SharedModule], + providers: [ConversationMemoryService], + }).compile(); + + service = module.get(ConversationMemoryService); + }); + + it('should be defined', () => { + expect(service).toBeDefined(); + }); +}); diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts new file mode 100644 index 0000000..9038818 --- /dev/null +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts @@ -0,0 +1,147 @@ +import { Injectable } from '@nestjs/common'; +import { ConversationMemory, Role } from '../memory.interface'; +import { CustomQueue } from '../../../shared/custom-data-structures/custome-queue/custom-queue'; +import { + TokenUsage, + TokenUsageService, +} from '../../../shared/services/token-usage/token-usage.service'; +import { ConversationSummarizationPromptService } from '../../prompt/conversation-summarization-prompt/conversation-summarization-prompt.service'; +import { LlmService } from '../../../llm-chain/llm/llm.service'; +import { OpenAiModelType } from '../../../llm-chain/llm/openai-api/openai-api.service'; + +@Injectable() +export class ConversationMemoryService implements ConversationMemory { + private conversationQueue: CustomQueue<[Role | null, string]> = + new CustomQueue<[Role | null, string]>(); + + /** + * Maximum conversation line would be keep in the memory. Oldest conversation would be tossed if exceed maxContextWindow. In other word, this is the size of the fixed-size queue for the conversation. If null, the queue can grow to whatever size + */ + private maxContextWindow: number | undefined; + /** + * Number of conversation line (most recent) that we would not summarize, allowing model to have the full context of most recent conversation + */ + private conversationBufferSize: number | undefined; + private conversationSummarizationMode: boolean = false; + private tokenUsage: TokenUsage = {}; + + constructor( + private conversationSummarizationPromptService: ConversationSummarizationPromptService, + private llmService: LlmService, + private tokenUsageService: TokenUsageService, + ) {} + + public setConversationSummarizationMode(shouldSummarize: boolean): void { + this.conversationSummarizationMode = shouldSummarize; + } + + public getConversationSummarizationMode(): boolean { + return this.conversationSummarizationMode; + } + + public setMaxContextWindow(contextWindowSize: number | undefined) { + this.maxContextWindow = contextWindowSize; + this.conversationQueue.setMaxSize(this.maxContextWindow); + } + + public addToConversation(role: Role, message: string): void { + this.conversationQueue.enqueue([role, message]); + } + + public setConversationBufferSize(bufferSize: number | undefined) { + this.conversationBufferSize = bufferSize; + } + + public getConversationBufferSize(): number | undefined { + return this.conversationBufferSize; + } + + /** + * Turn conversation into a string + * @param conversation + * @returns conversation as string + */ + private stringifyConversation(conversation: [Role | null, string][]): string { + const conversationString: string = conversation.reduce( + (prevString: string, curValue: [Role | null, string]) => { + const role: string = curValue[0] as string; + const response: string = curValue[1]; + return prevString + `${role}: ${response}\n`; + }, + '', + ); + return conversationString; + } + + /** + * Summarize the input conversation + * @param conversation + * @returns + */ + private async summarizeConversation( + conversation: [Role | null, string][], + ): Promise { + return new Promise(async (resolve, rejects) => { + if (conversation.length === 0) { + resolve(''); + return; + } + + const conversationString = this.stringifyConversation(conversation); + this.conversationSummarizationPromptService.setConversation( + conversationString, + ); + const { + response: conversationSummary, + tokenUsage: tokenUsageFromSummarization, + } = await this.llmService.getModelResponse( + this.conversationSummarizationPromptService, + OpenAiModelType.GPT_3_5_TURBO, + ); + + //Update TokenUsage information + this.tokenUsage = this.tokenUsageService.combineTokenUsage( + tokenUsageFromSummarization, + this.tokenUsage, + ); + resolve(this.stringifyConversation(conversation)); + }); + } + + public getConversationAsString(start: number, end: number): Promise { + return new Promise(async (resolve, rejects) => { + const slicedConversation = this.conversationQueue.slice(start, end); + + //If bufferSize is undefined, we don't summarize anything + const conversationToUnchange = + this.conversationBufferSize !== undefined + ? slicedConversation.slice((start = -this.conversationBufferSize)) + : slicedConversation; + const conversationToSummarize = + this.conversationBufferSize !== undefined + ? slicedConversation.slice( + (start = 0), + (end = -this.conversationBufferSize), + ) + : []; + + const conversationSummary = this.summarizeConversation( + conversationToSummarize, + ); + + resolve( + `${conversationSummary}\n${this.stringifyConversation(conversationToUnchange)}`, + ); + }); + } + + public getTokenUsage(): TokenUsage { + return { + 'gpt-4-0314': { + totalTokens: 0, + completionTokens: 0, + promptTokens: 0, + }, + }; + } +} diff --git a/new-chatbot-app/src/llm-chain/memory/memory.interface.ts b/new-chatbot-app/src/llm-chain/memory/memory.interface.ts index 0081d57..b3ea74c 100644 --- a/new-chatbot-app/src/llm-chain/memory/memory.interface.ts +++ b/new-chatbot-app/src/llm-chain/memory/memory.interface.ts @@ -1,4 +1,4 @@ -import { TokenUsage } from 'src/shared/services/token-usage/token-usage.service'; +import { TokenUsage } from '../../shared/services/token-usage/token-usage.service'; export enum Role { AIAgent = 'AIAgent', diff --git a/new-chatbot-app/src/llm-chain/memory/memory.module.ts b/new-chatbot-app/src/llm-chain/memory/memory.module.ts index 11aa07c..34a01ff 100644 --- a/new-chatbot-app/src/llm-chain/memory/memory.module.ts +++ b/new-chatbot-app/src/llm-chain/memory/memory.module.ts @@ -1,4 +1,14 @@ import { Module } from '@nestjs/common'; +import { ConversationMemoryService } from './conversation-memory/conversation-memory.service'; +import { PromptModule } from '../prompt/prompt.module'; +import { LlmModule } from '../llm/llm.module'; +import { SharedModule } from '../../shared/shared.module'; -@Module({}) +@Module({ + imports: [PromptModule, LlmModule, SharedModule], + providers: [ + ConversationMemoryService, + ], + exports: [ConversationMemoryService], +}) export class MemoryModule {} diff --git a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.spec.ts b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.spec.ts index bcbc7dc..bfc2c43 100644 --- a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.spec.ts @@ -2,6 +2,7 @@ import { Test, TestingModule } from '@nestjs/testing'; import { ConversationMemory } from 'src/llm-chain/memory/memory.interface'; import { ChatbotConversationPromptWithToolsService } from './chatbot-conversation-prompt-with-tools.service'; import { ConfigService } from '@nestjs/config'; +import { SharedModule } from '../../../shared/shared.module'; describe('ChatbotConversationPromptWithToolsService', () => { let chatbotConversationPromptService: ChatbotConversationPromptWithToolsService; @@ -9,6 +10,7 @@ describe('ChatbotConversationPromptWithToolsService', () => { beforeEach(async () => { const moduleRef: TestingModule = await Test.createTestingModule({ + imports: [SharedModule], providers: [ChatbotConversationPromptWithToolsService, ConfigService], }).compile(); diff --git a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts index fa36ab9..a3716e7 100644 --- a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts @@ -2,7 +2,7 @@ import { Injectable, Scope } from '@nestjs/common'; import { Prompt } from '../prompt.interface'; import { ConversationMemory } from 'src/llm-chain/memory/memory.interface'; import { LlmTool } from 'src/llm-chain/llm-toolbox/llm-tool.interface'; -import { ConfigService } from '@nestjs/config'; +import { RetrieveEnvironmentVariablesService } from '../../../shared/services/retrieve-environment-variables/retrieve-environment-variables.service'; /** * This service is for getting prompt for sending to LLM model. This prompt is aware of conversation history, context, and which tools are available to use. @@ -22,7 +22,7 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { private reActModelDescription: string = ''; private modelScratchpad: string; - constructor(private configService: ConfigService) { + constructor(private retrieveEnvironmentVariablesService: RetrieveEnvironmentVariablesService) { const date = new Date(); this.modelDescription = @@ -207,7 +207,7 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { * @param newScratchpad */ public _testSetScratchpad(newScratchpad: string): void { - if (this.configService.get('NODE_ENV') !== 'test') { + if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { throw new Error('This method is for testing purposes only'); } this.modelScratchpad = newScratchpad; @@ -218,21 +218,21 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { * @param newScratchpad */ public _testGetScratchpad(): string { - if (this.configService.get('NODE_ENV') !== 'test') { + if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { throw new Error('This method is for testing purposes only'); } return this.modelScratchpad; } public _testSetModelDescription(modelDescription: string): void { - if (this.configService.get('NODE_ENV') !== 'test') { + if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { throw new Error('This method is for testing purposes only'); } this.modelDescription = modelDescription; } public _testSetReActModelDescription(reActModelDescription: string): void { - if (this.configService.get('NODE_ENV') !== 'test') { + if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { throw new Error('This method is for testing purposes only'); } @@ -240,7 +240,7 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { } public _testSetToolsDescription(toolsDescription: string): void { - if (this.configService.get('NODE_ENV') !== 'test') { + if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { throw new Error('This method is for testing purposes only'); } diff --git a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts new file mode 100644 index 0000000..65d5f90 --- /dev/null +++ b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts @@ -0,0 +1,20 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { ConversationSummarizationPromptService } from './conversation-summarization-prompt.service'; +import { SharedModule } from '../../../shared/shared.module'; + +describe('ConversationSummarizationPromptService', () => { + let service: ConversationSummarizationPromptService; + + beforeEach(async () => { + const module: TestingModule = await Test.createTestingModule({ + imports: [SharedModule], + providers: [ConversationSummarizationPromptService], + }).compile(); + + service = module.get(ConversationSummarizationPromptService); + }); + + it('should be defined', () => { + expect(service).toBeDefined(); + }); +}); diff --git a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts new file mode 100644 index 0000000..62359f3 --- /dev/null +++ b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts @@ -0,0 +1,27 @@ +import { Injectable } from '@nestjs/common'; +import { Prompt } from '../prompt.interface'; + +@Injectable() +export class ConversationSummarizationPromptService implements Prompt { + private modelDescription: string; + private conversation: string = ''; + constructor() { + this.modelDescription = + 'You are trying to shorten the following conversation by summarizing it.Include any vital details like email,name,code,date,etc in the summary.\n'; + } + getSystemDescription(): string { + return this.modelDescription; + } + + /** + * The conversation you wish to summarize + * @param conversationString + */ + setConversation(conversationString: string) { + this.conversation = conversationString; + } + + getPrompt(): string { + return this.conversation; + } +} diff --git a/new-chatbot-app/src/llm-chain/prompt/prompt.interface.ts b/new-chatbot-app/src/llm-chain/prompt/prompt.interface.ts index cf86210..aa9fd8e 100644 --- a/new-chatbot-app/src/llm-chain/prompt/prompt.interface.ts +++ b/new-chatbot-app/src/llm-chain/prompt/prompt.interface.ts @@ -6,5 +6,5 @@ export interface Prompt { /** * Get the prompt */ - getPrompt(): Promise; + getPrompt(): Promise | string; } diff --git a/new-chatbot-app/src/llm-chain/prompt/prompt.module.ts b/new-chatbot-app/src/llm-chain/prompt/prompt.module.ts index 25464ec..5885dd0 100644 --- a/new-chatbot-app/src/llm-chain/prompt/prompt.module.ts +++ b/new-chatbot-app/src/llm-chain/prompt/prompt.module.ts @@ -1,8 +1,17 @@ import { Module } from '@nestjs/common'; import { ChatbotConversationPromptWithToolsService } from './chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service'; -import { ConfigService } from '@nestjs/config'; +import { ConversationSummarizationPromptService } from './conversation-summarization-prompt/conversation-summarization-prompt.service'; +import { SharedModule } from '../../shared/shared.module'; @Module({ - providers: [ChatbotConversationPromptWithToolsService, ConfigService], + imports: [SharedModule], + providers: [ + ConversationSummarizationPromptService, + ChatbotConversationPromptWithToolsService, + ], + exports: [ + ConversationSummarizationPromptService, + ChatbotConversationPromptWithToolsService, + ], }) export class PromptModule {} diff --git a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts index 58c8c82..071d750 100644 --- a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts +++ b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts @@ -126,5 +126,26 @@ describe('TokenUsageService', () => { expect(service.combineTokenUsage(mockTokenUsage1, mockTokenUsage2)).toEqual( expectedTokenUsageResult, ); + + //Should be able to deal with empty object + mockTokenUsage1 = {}; + mockTokenUsage2 = { + 'gpt-4-0314': { + totalTokens: 140, + promptTokens: 110, + completionTokens: 30, + }, + }; + + expectedTokenUsageResult = { + 'gpt-4-0314': { + totalTokens: 140, + promptTokens: 110, + completionTokens: 30, + }, + }; + expect(service.combineTokenUsage(mockTokenUsage1, mockTokenUsage2)).toEqual( + expectedTokenUsageResult, + ); }); }); diff --git a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts index 01bf0f5..723860e 100644 --- a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts +++ b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts @@ -1,6 +1,7 @@ import { Global, Injectable } from '@nestjs/common'; import { AxiosResponse } from 'axios'; import { CreateChatCompletionResponse } from 'openai'; +import { OpenAiModelType } from 'src/llm-chain/llm/openai-api/openai-api.service'; export type ModelTokenUsage = { totalTokens: number; diff --git a/new-chatbot-app/src/shared/shared.module.ts b/new-chatbot-app/src/shared/shared.module.ts index c9e95bd..8f0b43d 100644 --- a/new-chatbot-app/src/shared/shared.module.ts +++ b/new-chatbot-app/src/shared/shared.module.ts @@ -1,20 +1,24 @@ import { Module } from '@nestjs/common'; +import { RetrieveEnvironmentVariablesService } from './services/retrieve-environment-variables/retrieve-environment-variables.service'; +import { ConfigModule } from '@nestjs/config'; import { NetworkService } from './services/network/network.service'; import { TokenUsageService } from './services/token-usage/token-usage.service'; -import { RetrieveEnvironmentVariablesService } from './services/retrieve-environment-variables/retrieve-environment-variables.service'; -import { ConfigService } from '@nestjs/config'; @Module({ + imports: [ + ConfigModule.forRoot({ + envFilePath: '.env', + }), + ], providers: [ + RetrieveEnvironmentVariablesService, NetworkService, TokenUsageService, - RetrieveEnvironmentVariablesService, - ConfigService, ], exports: [ + RetrieveEnvironmentVariablesService, NetworkService, TokenUsageService, - RetrieveEnvironmentVariablesService, ], }) export class SharedModule {} diff --git a/new-chatbot-app/src/shared/shared.types.ts b/new-chatbot-app/src/shared/shared.types.ts deleted file mode 100644 index 5cea10b..0000000 --- a/new-chatbot-app/src/shared/shared.types.ts +++ /dev/null @@ -1,16 +0,0 @@ -type LlmModelType = OpenAiModelType | CohereModelType; - -enum CohereModelType { - Generate, - Embed, - Summarize, -} - -enum OpenAiModelType { - GPT_3_5_TURBO = 'gpt-3.5-turbo', - GPT_3_5_TURBO_0613 = 'gpt-3.5-turbo-0613', - GPT_3_5_TURBO_0301 = 'gpt-3.5-turbo-0301', - GPT_4 = 'gpt-4', - GPT_4_0613 = 'gpt-4-0613', - GPT_4_0314 = 'gpt-4-0314', -} From 2ce037a0ea95dd880191aad3b5c3b4f7a7043a65 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Wed, 6 Mar 2024 01:00:29 -0500 Subject: [PATCH 04/14] finish add unit test for memory --- new-chatbot-app/package.json | 1 + .../src/llm-chain/llm/llm.module.ts | 2 +- .../src/llm-chain/llm/llm.service.ts | 2 +- .../conversation-memory.service.spec.ts | 190 +++++++++++++++++- .../conversation-memory.service.ts | 78 +++++-- ...ation-summarization-prompt.service.spec.ts | 18 +- ...nversation-summarization-prompt.service.ts | 11 +- 7 files changed, 276 insertions(+), 26 deletions(-) diff --git a/new-chatbot-app/package.json b/new-chatbot-app/package.json index 6350894..7124bec 100644 --- a/new-chatbot-app/package.json +++ b/new-chatbot-app/package.json @@ -18,6 +18,7 @@ "test:watch": "NODE_ENV=test jest --watch", "test:cov": "NODE_ENV=test jest --coverage", "test:debug": "NODE_ENV=test node --inspect-brk -r tsconfig-paths/register -r ts-node/register node_modules/.bin/jest --runInBand", + "test:clear-cache": "NODE_ENV=test jest --clearCache", "test:e2e": "NODE_ENV=test jest --config ./test/jest-e2e.json" }, "dependencies": { diff --git a/new-chatbot-app/src/llm-chain/llm/llm.module.ts b/new-chatbot-app/src/llm-chain/llm/llm.module.ts index 05182da..3ca96d0 100644 --- a/new-chatbot-app/src/llm-chain/llm/llm.module.ts +++ b/new-chatbot-app/src/llm-chain/llm/llm.module.ts @@ -23,6 +23,6 @@ export type LlmModelSetting = { @Module({ imports: [SharedModule], providers: [LlmService, OpenaiApiService], - exports: [LlmService], + exports: [LlmService, OpenaiApiService], }) export class LlmModule {} diff --git a/new-chatbot-app/src/llm-chain/llm/llm.service.ts b/new-chatbot-app/src/llm-chain/llm/llm.service.ts index fd83c32..ab471e7 100644 --- a/new-chatbot-app/src/llm-chain/llm/llm.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/llm.service.ts @@ -46,7 +46,7 @@ export class LlmService { * @param top_p * @returns */ - async getModelResponse( + public async getModelResponse( prompt: Prompt, modelType: LlmModelType, temperature: number = 0.0, diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts index 14b9337..21b7b34 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts @@ -2,22 +2,202 @@ import { Test, TestingModule } from '@nestjs/testing'; import { ConversationMemoryService } from './conversation-memory.service'; import { PromptModule } from '../../../llm-chain/prompt/prompt.module'; import { MemoryModule } from '../memory.module'; -import { LlmModule } from '../../../llm-chain/llm/llm.module'; +import { LlmService } from '../../../llm-chain/llm/llm.service'; + import { SharedModule } from '../../../shared/shared.module'; +import { Role } from '../memory.interface'; +import { ConversationSummarizationPromptService } from '../../../llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service'; +import { LlmModule } from '../../../llm-chain/llm/llm.module'; + +import { + TokenUsage, + TokenUsageService, +} from '../../../shared/services/token-usage/token-usage.service'; describe('ConversationMemoryService', () => { let service: ConversationMemoryService; + let llmServiceMock: LlmService; beforeEach(async () => { const module: TestingModule = await Test.createTestingModule({ - imports: [PromptModule, MemoryModule, LlmModule, SharedModule], - providers: [ConversationMemoryService], + imports: [PromptModule, MemoryModule, SharedModule, LlmModule], + providers: [ + ConversationMemoryService, + LlmService, + ConversationSummarizationPromptService, + TokenUsageService, + ], }).compile(); service = module.get(ConversationMemoryService); + llmServiceMock = module.get(LlmService); }); - it('should be defined', () => { expect(service).toBeDefined(); }); -}); + + it('should set and get conversation summarization mode correctly', () => { + service.setConversationSummarizationMode(true); + expect(service.getConversationSummarizationMode()).toBe(true); + + service.setConversationSummarizationMode(false); + expect(service.getConversationSummarizationMode()).toBe(false); + }); + + it('should set max context window size and adjust conversation queue size', () => { + expect(service.getMaxContextWindow()).toBeUndefined(); + service.setMaxContextWindow(5); + expect(service.getMaxContextWindow()).toBe(5); + + service.setMaxContextWindow(undefined); + expect(service.getMaxContextWindow()).toBeUndefined(); + }); + + it('should add to conversation queue correctly', async () => { + service.setConversationSummarizationMode(false); + service.setMaxContextWindow(undefined); + service.addToConversation(Role.AIAgent, 'AI message1'); + service.addToConversation(Role.Customer, 'Customer message1'); + + expect(await service.getConversationAsString()).toEqual( + '\nAIAgent: AI message1\nCustomer: Customer message1\n', + ); + + service.addToConversation(Role.Customer, 'Customer message2'); + expect(await service.getConversationAsString()).toEqual( + '\nAIAgent: AI message1\nCustomer: Customer message1\nCustomer: Customer message2\n', + ); + }); + + it('should set and get conversation buffer size correctly', () => { + service.setConversationBufferSize(10); + expect(service.getConversationBufferSize()).toBe(10); + + service.setConversationBufferSize(undefined); + expect(service.getConversationBufferSize()).toBeUndefined(); + }); + + it('should split conversation summary and unchanged conversation correctly', async () => { + service.setConversationSummarizationMode(true); + service.setMaxContextWindow(4); + service.setConversationBufferSize(1); + service.addToConversation(Role.AIAgent, 'AI message1'); + service.addToConversation(Role.Customer, 'Customer message1'); + service.addToConversation(Role.AIAgent, 'AI message2'); + service.addToConversation(Role.Customer, 'Customer message2'); + + //Mock getModelResponse + jest + .spyOn(llmServiceMock, 'getModelResponse') + .mockImplementation(async () => ({ + response: 'This is summary', + tokenUsage: { + 'gpt-4': { + totalTokens: 100, + promptTokens: 70, + completionTokens: 30, + }, + }, + })); + + let expectedResult = 'This is summary\nCustomer: Customer message2\n'; + + expect(await service.getConversationAsString()).toEqual(expectedResult); + + service.setConversationBufferSize(2); + expectedResult = + 'This is summary\nAIAgent: AI message2\nCustomer: Customer message2\n'; + expect(await service.getConversationAsString()).toEqual(expectedResult); + + //undefined bufferSize + service.setConversationBufferSize(undefined); + expectedResult = + '\nAIAgent: AI message1\nCustomer: Customer message1\nAIAgent: AI message2\nCustomer: Customer message2\n'; + expect(await service.getConversationAsString()).toEqual(expectedResult); + }); + + it('should not accept invalid buffer size and maxContextWindow when setting', () => { + //bufferSize larger than maxContextWindow + service.setMaxContextWindow(6); + expect(() => { + service.setConversationBufferSize(7); + }).toThrow(Error); + + //maxContextWindow smaller than bufferSize + service.setConversationBufferSize(5); + expect(() => { + service.setMaxContextWindow(4); + }).toThrow(Error); + }); + + it('shoud return correct token usage', async () => { + service.setConversationSummarizationMode(true); + service.setMaxContextWindow(4); + service.setConversationBufferSize(2); + service.addToConversation(Role.AIAgent, 'AI message1'); + service.addToConversation(Role.Customer, 'Customer message1'); + service.addToConversation(Role.AIAgent, 'AI message2'); + service.addToConversation(Role.Customer, 'Customer message2'); + + //Mock getModelResponse + jest + .spyOn(llmServiceMock, 'getModelResponse') + .mockImplementation(async () => ({ + response: 'This is summary', + tokenUsage: { + 'gpt-4': { + totalTokens: 100, + promptTokens: 70, + completionTokens: 30, + }, + }, + })); + + //Token should be accumulate after each time call getConversationAsString + await service.getConversationAsString(); + let expectedTokenUsage = { + 'gpt-4': { + totalTokens: 100, + promptTokens: 70, + completionTokens: 30, + }, + }; + expect(service.getTokenUsage()).toEqual(expectedTokenUsage); + + //Token should be accumulate after each time call getConversationAsString + await service.getConversationAsString(); + expectedTokenUsage = { + 'gpt-4': { + totalTokens: 200, + promptTokens: 140, + completionTokens: 60, + }, + }; + expect(service.getTokenUsage()).toEqual(expectedTokenUsage); + + //Mock getModelResponse + jest + .spyOn(llmServiceMock, 'getModelResponse') + .mockImplementation(async () => ({ + response: 'This is summary', + tokenUsage: { + 'gpt-4': { + totalTokens: 50, + promptTokens: 30, + completionTokens: 20, + }, + }, + })); + + //Token should be accumulate after each time call getConversationAsString + await service.getConversationAsString(); + expectedTokenUsage = { + 'gpt-4': { + totalTokens: 250, + promptTokens: 170, + completionTokens: 80, + }, + }; + expect(service.getTokenUsage()).toEqual(expectedTokenUsage); + }); +}); \ No newline at end of file diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts index 9038818..74d6640 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts @@ -22,6 +22,10 @@ export class ConversationMemoryService implements ConversationMemory { * Number of conversation line (most recent) that we would not summarize, allowing model to have the full context of most recent conversation */ private conversationBufferSize: number | undefined; + + /** + * True: will summarize the conversation based on maxContextWindow and conversationBufferSize. False: will not summarize + */ private conversationSummarizationMode: boolean = false; private tokenUsage: TokenUsage = {}; @@ -39,16 +43,50 @@ export class ConversationMemoryService implements ConversationMemory { return this.conversationSummarizationMode; } + /** + * Set max context window for the memory + * @param contextWindowSize + * @throw Error in case contextWindowSize < conversation buffer size (if defined) + */ public setMaxContextWindow(contextWindowSize: number | undefined) { + if ( + contextWindowSize !== undefined && + this.conversationBufferSize !== undefined && + contextWindowSize < this.conversationBufferSize + ) { + throw new Error( + 'Context window size cannot be smaller than conversation buffer size', + ); + } + this.maxContextWindow = contextWindowSize; this.conversationQueue.setMaxSize(this.maxContextWindow); } + public getMaxContextWindow(): number | undefined { + return this.maxContextWindow; + } + public addToConversation(role: Role, message: string): void { this.conversationQueue.enqueue([role, message]); } + /** + * + * @param bufferSize + * @throws Error in case bufferSize is larger than this.maxContextWindow(if defined) + */ public setConversationBufferSize(bufferSize: number | undefined) { + if ( + bufferSize !== undefined && + this.maxContextWindow !== undefined && + bufferSize > this.maxContextWindow + ) { + throw new Error( + 'Conversation Buffer size cannot be larger than max context window', + ); + } + this.conversationBufferSize = bufferSize; } @@ -56,6 +94,10 @@ export class ConversationMemoryService implements ConversationMemory { return this.conversationBufferSize; } + private setTokenUsage(tokenUsage: TokenUsage) { + this.tokenUsage = tokenUsage; + } + /** * Turn conversation into a string * @param conversation @@ -100,15 +142,27 @@ export class ConversationMemoryService implements ConversationMemory { ); //Update TokenUsage information - this.tokenUsage = this.tokenUsageService.combineTokenUsage( - tokenUsageFromSummarization, - this.tokenUsage, + this.setTokenUsage( + this.tokenUsageService.combineTokenUsage( + tokenUsageFromSummarization, + this.getTokenUsage(), + ), ); - resolve(this.stringifyConversation(conversation)); + + resolve(conversationSummary); }); } + /** + * get + * @param start The beginning index of the specified portion of the array. If start is undefined, then the slice begins at index 0. - public getConversationAsString(start: number, end: number): Promise { + * @param end The end index of the specified portion of the array. This is exclusive of the element at the index 'end'. If end is undefined, then the slice extends to the end of the array. + * @returns + */ + public async getConversationAsString( + start: number = 0, + end?: number, + ): Promise { return new Promise(async (resolve, rejects) => { const slicedConversation = this.conversationQueue.slice(start, end); @@ -125,9 +179,9 @@ export class ConversationMemoryService implements ConversationMemory { ) : []; - const conversationSummary = this.summarizeConversation( - conversationToSummarize, - ); + const conversationSummary = this.conversationSummarizationMode + ? await this.summarizeConversation(conversationToSummarize) + : this.stringifyConversation(conversationToSummarize); resolve( `${conversationSummary}\n${this.stringifyConversation(conversationToUnchange)}`, @@ -136,12 +190,6 @@ export class ConversationMemoryService implements ConversationMemory { } public getTokenUsage(): TokenUsage { - return { - 'gpt-4-0314': { - totalTokens: 0, - completionTokens: 0, - promptTokens: 0, - }, - }; + return this.tokenUsage; } } diff --git a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts index 65d5f90..5366004 100644 --- a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts @@ -11,10 +11,26 @@ describe('ConversationSummarizationPromptService', () => { providers: [ConversationSummarizationPromptService], }).compile(); - service = module.get(ConversationSummarizationPromptService); + service = module.get( + ConversationSummarizationPromptService, + ); }); it('should be defined', () => { expect(service).toBeDefined(); }); + + it('should set and get the correct system description', () => { + const expectedSystemDescription = 'Mocked System Description'; + service.setSystemDescription(expectedSystemDescription); + + expect(service.getSystemDescription()).toEqual(expectedSystemDescription); + }); + + it('should set and get correct conversation', () => { + const expectedConversation = 'Mocked conversation'; + + service.setConversation(expectedConversation); + expect(service.getPrompt()).toEqual(expectedConversation); + }); }); diff --git a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts index 62359f3..e89d4e8 100644 --- a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts @@ -9,7 +9,12 @@ export class ConversationSummarizationPromptService implements Prompt { this.modelDescription = 'You are trying to shorten the following conversation by summarizing it.Include any vital details like email,name,code,date,etc in the summary.\n'; } - getSystemDescription(): string { + + public setSystemDescription(systemDescription: string) { + this.modelDescription = systemDescription; + } + + public getSystemDescription(): string { return this.modelDescription; } @@ -17,11 +22,11 @@ export class ConversationSummarizationPromptService implements Prompt { * The conversation you wish to summarize * @param conversationString */ - setConversation(conversationString: string) { + public setConversation(conversationString: string) { this.conversation = conversationString; } - getPrompt(): string { + public getPrompt(): string { return this.conversation; } } From 1dbf171236d5b739770a6ffac29d457fc370d72c Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Wed, 6 Mar 2024 01:13:10 -0500 Subject: [PATCH 05/14] prettier format --- new-chatbot-app/package.json | 1 - new-chatbot-app/src/app.module.ts | 5 +--- .../src/llm-chain/llm-chain.module.ts | 7 +---- .../src/llm-chain/llm/llm.service.ts | 8 ++--- .../conversation-memory.service.spec.ts | 2 +- .../src/llm-chain/memory/memory.module.ts | 4 +-- ...-conversation-prompt-with-tools.service.ts | 29 +++++++++++++++---- .../custome-queue/custom-queue.spec.ts | 2 +- 8 files changed, 30 insertions(+), 28 deletions(-) diff --git a/new-chatbot-app/package.json b/new-chatbot-app/package.json index 7124bec..5ceac75 100644 --- a/new-chatbot-app/package.json +++ b/new-chatbot-app/package.json @@ -7,7 +7,6 @@ "license": "UNLICENSED", "scripts": { "build": "nest build", - "format:write": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"", "format:check": "prettier --check \"src/**/*.ts\" \"test/**/*.ts\"", "start": "nest start", "start:dev": "nest start --watch", diff --git a/new-chatbot-app/src/app.module.ts b/new-chatbot-app/src/app.module.ts index b8a6898..c5076f3 100644 --- a/new-chatbot-app/src/app.module.ts +++ b/new-chatbot-app/src/app.module.ts @@ -6,9 +6,6 @@ import { SharedModule } from './shared/shared.module'; @Module({ imports: [LlmChainModule, LibraryApiModule, SharedModule], - providers: [ - ChatGateway, - ], + providers: [ChatGateway], }) - export class AppModule {} diff --git a/new-chatbot-app/src/llm-chain/llm-chain.module.ts b/new-chatbot-app/src/llm-chain/llm-chain.module.ts index 3490425..4741c3a 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.module.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.module.ts @@ -6,12 +6,7 @@ import { LlmChainService } from './llm-chain.service'; import { LlmModule } from './llm/llm.module'; @Module({ - imports: [ - LlmModule, - LlmToolboxModule, - MemoryModule, - PromptModule, - ], + imports: [LlmModule, LlmToolboxModule, MemoryModule, PromptModule], providers: [LlmChainService], exports: [LlmChainService], }) diff --git a/new-chatbot-app/src/llm-chain/llm/llm.service.ts b/new-chatbot-app/src/llm-chain/llm/llm.service.ts index ab471e7..a7251d2 100644 --- a/new-chatbot-app/src/llm-chain/llm/llm.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/llm.service.ts @@ -1,9 +1,7 @@ import { Injectable, Logger } from '@nestjs/common'; import { Prompt } from '../prompt/prompt.interface'; -import { - TokenUsage, -} from '../../shared/services/token-usage/token-usage.service'; +import { TokenUsage } from '../../shared/services/token-usage/token-usage.service'; import { LlmInterface } from './llm.interface'; import { OpenaiApiService, @@ -18,9 +16,7 @@ import { LlmModelType } from './llm.module'; export class LlmService { private readonly logger = new Logger(LlmService.name); - constructor( - private openaiApiService: OpenaiApiService, - ) {} + constructor(private openaiApiService: OpenaiApiService) {} /** * Get the LLM Model from input model name diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts index 21b7b34..944697f 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts @@ -200,4 +200,4 @@ describe('ConversationMemoryService', () => { }; expect(service.getTokenUsage()).toEqual(expectedTokenUsage); }); -}); \ No newline at end of file +}); diff --git a/new-chatbot-app/src/llm-chain/memory/memory.module.ts b/new-chatbot-app/src/llm-chain/memory/memory.module.ts index 34a01ff..74a4a11 100644 --- a/new-chatbot-app/src/llm-chain/memory/memory.module.ts +++ b/new-chatbot-app/src/llm-chain/memory/memory.module.ts @@ -6,9 +6,7 @@ import { SharedModule } from '../../shared/shared.module'; @Module({ imports: [PromptModule, LlmModule, SharedModule], - providers: [ - ConversationMemoryService, - ], + providers: [ConversationMemoryService], exports: [ConversationMemoryService], }) export class MemoryModule {} diff --git a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts index a3716e7..94416d5 100644 --- a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts @@ -22,7 +22,9 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { private reActModelDescription: string = ''; private modelScratchpad: string; - constructor(private retrieveEnvironmentVariablesService: RetrieveEnvironmentVariablesService) { + constructor( + private retrieveEnvironmentVariablesService: RetrieveEnvironmentVariablesService, + ) { const date = new Date(); this.modelDescription = @@ -207,7 +209,10 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { * @param newScratchpad */ public _testSetScratchpad(newScratchpad: string): void { - if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { + if ( + this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== + 'test' + ) { throw new Error('This method is for testing purposes only'); } this.modelScratchpad = newScratchpad; @@ -218,21 +223,30 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { * @param newScratchpad */ public _testGetScratchpad(): string { - if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { + if ( + this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== + 'test' + ) { throw new Error('This method is for testing purposes only'); } return this.modelScratchpad; } public _testSetModelDescription(modelDescription: string): void { - if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { + if ( + this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== + 'test' + ) { throw new Error('This method is for testing purposes only'); } this.modelDescription = modelDescription; } public _testSetReActModelDescription(reActModelDescription: string): void { - if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { + if ( + this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== + 'test' + ) { throw new Error('This method is for testing purposes only'); } @@ -240,7 +254,10 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { } public _testSetToolsDescription(toolsDescription: string): void { - if (this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== 'test') { + if ( + this.retrieveEnvironmentVariablesService.retrieve('NODE_ENV') !== + 'test' + ) { throw new Error('This method is for testing purposes only'); } diff --git a/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts b/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts index be800f5..002ad02 100644 --- a/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts +++ b/new-chatbot-app/src/shared/custom-data-structures/custome-queue/custom-queue.spec.ts @@ -40,7 +40,7 @@ describe('CustomQueue', () => { it('should update maximum size and dequeue excess items', () => { queue.setMaxSize(5); - + queue.enqueue(1); queue.enqueue(2); queue.enqueue(3); From 06afd4de767f497baaf1f99644a6c7f4e95b0c05 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Wed, 6 Mar 2024 11:58:31 -0500 Subject: [PATCH 06/14] add env variable to github action --- .github/workflows/formating-and-testing.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/formating-and-testing.yml b/.github/workflows/formating-and-testing.yml index e770846..6fa44a4 100644 --- a/.github/workflows/formating-and-testing.yml +++ b/.github/workflows/formating-and-testing.yml @@ -28,8 +28,13 @@ jobs: - name: Build run: npm run build + - name: Run tests + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENAI_ORGANIZATION_ID: ${{ secrets.OPENAI_ORGANIZATION_ID }} + run: npm test + - name: Check formatting run: npm run format:check - - name: Run tests - run: npm test + From 82a08a0da296c057916c63ac1f04289a11e5b163 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Wed, 6 Mar 2024 12:58:07 -0500 Subject: [PATCH 07/14] add request scope to conversation memory service --- .../conversation-memory/conversation-memory.service.spec.ts | 5 ++--- .../conversation-memory/conversation-memory.service.ts | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts index 944697f..0761385 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts @@ -10,7 +10,6 @@ import { ConversationSummarizationPromptService } from '../../../llm-chain/promp import { LlmModule } from '../../../llm-chain/llm/llm.module'; import { - TokenUsage, TokenUsageService, } from '../../../shared/services/token-usage/token-usage.service'; @@ -29,8 +28,8 @@ describe('ConversationMemoryService', () => { ], }).compile(); - service = module.get(ConversationMemoryService); - llmServiceMock = module.get(LlmService); + service = await module.resolve(ConversationMemoryService); + llmServiceMock = await module.resolve(LlmService); }); it('should be defined', () => { expect(service).toBeDefined(); diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts index 74d6640..4ad379c 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts @@ -1,4 +1,4 @@ -import { Injectable } from '@nestjs/common'; +import { Injectable, Scope } from '@nestjs/common'; import { ConversationMemory, Role } from '../memory.interface'; import { CustomQueue } from '../../../shared/custom-data-structures/custome-queue/custom-queue'; import { @@ -9,7 +9,7 @@ import { ConversationSummarizationPromptService } from '../../prompt/conversatio import { LlmService } from '../../../llm-chain/llm/llm.service'; import { OpenAiModelType } from '../../../llm-chain/llm/openai-api/openai-api.service'; -@Injectable() +@Injectable({scope: Scope.REQUEST}) export class ConversationMemoryService implements ConversationMemory { private conversationQueue: CustomQueue<[Role | null, string]> = new CustomQueue<[Role | null, string]>(); From 5a70bb713e1de4ea8f4a8eaa9f1cdfe07ab871b0 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Wed, 6 Mar 2024 12:59:23 -0500 Subject: [PATCH 08/14] prettier format --- .../conversation-memory.service.spec.ts | 8 ++++---- .../conversation-memory/conversation-memory.service.ts | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts index 0761385..10e1e02 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.spec.ts @@ -9,9 +9,7 @@ import { Role } from '../memory.interface'; import { ConversationSummarizationPromptService } from '../../../llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service'; import { LlmModule } from '../../../llm-chain/llm/llm.module'; -import { - TokenUsageService, -} from '../../../shared/services/token-usage/token-usage.service'; +import { TokenUsageService } from '../../../shared/services/token-usage/token-usage.service'; describe('ConversationMemoryService', () => { let service: ConversationMemoryService; @@ -28,7 +26,9 @@ describe('ConversationMemoryService', () => { ], }).compile(); - service = await module.resolve(ConversationMemoryService); + service = await module.resolve( + ConversationMemoryService, + ); llmServiceMock = await module.resolve(LlmService); }); it('should be defined', () => { diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts index 4ad379c..ec41ac3 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts @@ -9,7 +9,7 @@ import { ConversationSummarizationPromptService } from '../../prompt/conversatio import { LlmService } from '../../../llm-chain/llm/llm.service'; import { OpenAiModelType } from '../../../llm-chain/llm/openai-api/openai-api.service'; -@Injectable({scope: Scope.REQUEST}) +@Injectable({ scope: Scope.REQUEST }) export class ConversationMemoryService implements ConversationMemory { private conversationQueue: CustomQueue<[Role | null, string]> = new CustomQueue<[Role | null, string]>(); From bcc551691ec3f1b615cd3f8f3db8d19d260c59f0 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Fri, 8 Mar 2024 14:28:28 -0500 Subject: [PATCH 09/14] added llmGateWay and LlmChain and updated OpenAI newest version --- new-chatbot-app/.vscode/launch.json | 0 new-chatbot-app/.vscode/settings.json | 2 + new-chatbot-app/package-lock.json | 200 +++++++++++++++--- new-chatbot-app/package.json | 2 +- new-chatbot-app/src/app.module.ts | 10 +- new-chatbot-app/src/chat/chat.gateway.ts | 22 -- .../{ => gateway}/chat/chat.gateway.spec.ts | 3 +- .../src/gateway/chat/chat.gateway.ts | 41 ++++ .../connection/llm-connection.gateway.spec.ts | 18 ++ .../connection/llm-connection.gateway.ts | 33 +++ new-chatbot-app/src/gateway/gateway.module.ts | 13 ++ .../src/llm-chain/llm-chain.module.ts | 3 +- .../src/llm-chain/llm-chain.service.ts | 49 ++++- .../src/llm-chain/llm/llm.service.ts | 45 ++-- .../llm/openai-api/openai-api.service.ts | 99 ++++----- .../conversation-memory.service.ts | 6 +- ...-conversation-prompt-with-tools.service.ts | 2 +- ...ation-summarization-prompt.service.spec.ts | 2 +- ...nversation-summarization-prompt.service.ts | 4 +- new-chatbot-app/src/main.ts | 2 +- .../services/network/network.service.ts | 2 +- .../token-usage/token-usage.service.ts | 17 +- 22 files changed, 417 insertions(+), 158 deletions(-) create mode 100644 new-chatbot-app/.vscode/launch.json create mode 100644 new-chatbot-app/.vscode/settings.json delete mode 100644 new-chatbot-app/src/chat/chat.gateway.ts rename new-chatbot-app/src/{ => gateway}/chat/chat.gateway.spec.ts (75%) create mode 100644 new-chatbot-app/src/gateway/chat/chat.gateway.ts create mode 100644 new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts create mode 100644 new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts create mode 100644 new-chatbot-app/src/gateway/gateway.module.ts diff --git a/new-chatbot-app/.vscode/launch.json b/new-chatbot-app/.vscode/launch.json new file mode 100644 index 0000000..e69de29 diff --git a/new-chatbot-app/.vscode/settings.json b/new-chatbot-app/.vscode/settings.json new file mode 100644 index 0000000..7a73a41 --- /dev/null +++ b/new-chatbot-app/.vscode/settings.json @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/new-chatbot-app/package-lock.json b/new-chatbot-app/package-lock.json index adc6029..3615bb7 100644 --- a/new-chatbot-app/package-lock.json +++ b/new-chatbot-app/package-lock.json @@ -16,7 +16,7 @@ "@nestjs/platform-socket.io": "^10.3.3", "@nestjs/websockets": "^10.3.3", "eslint-plugin-unused-imports": "^3.1.0", - "openai": "^3.3.0", + "openai": "^4.28.4", "reflect-metadata": "^0.1.13", "rxjs": "^7.8.1" }, @@ -2227,6 +2227,15 @@ "undici-types": "~5.26.4" } }, + "node_modules/@types/node-fetch": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.0" + } + }, "node_modules/@types/qs": { "version": "6.9.11", "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.11.tgz", @@ -2661,6 +2670,17 @@ "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", "dev": true }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, "node_modules/accepts": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", @@ -2710,6 +2730,17 @@ "node": ">=0.4.0" } }, + "node_modules/agentkeepalive": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", + "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, "node_modules/ajv": { "version": "8.12.0", "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", @@ -2873,14 +2904,6 @@ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, - "node_modules/axios": { - "version": "0.26.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.26.1.tgz", - "integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==", - "dependencies": { - "follow-redirects": "^1.14.8" - } - }, "node_modules/babel-jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", @@ -3002,6 +3025,11 @@ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, + "node_modules/base-64": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz", + "integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==" + }, "node_modules/base64-js": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", @@ -3307,6 +3335,14 @@ "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", "dev": true }, + "node_modules/charenc": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz", + "integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==", + "engines": { + "node": "*" + } + }, "node_modules/chokidar": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", @@ -3677,6 +3713,14 @@ "node": ">= 8" } }, + "node_modules/crypt": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", + "integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==", + "engines": { + "node": "*" + } + }, "node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", @@ -3811,6 +3855,15 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, + "node_modules/digest-fetch": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz", + "integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==", + "dependencies": { + "base-64": "^0.1.0", + "md5": "^2.3.0" + } + }, "node_modules/dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -4280,6 +4333,14 @@ "node": ">= 0.6" } }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "engines": { + "node": ">=6" + } + }, "node_modules/events": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", @@ -4675,25 +4736,6 @@ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.9.tgz", "integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==" }, - "node_modules/follow-redirects": { - "version": "1.15.5", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", - "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, "node_modules/foreground-child": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", @@ -4773,6 +4815,31 @@ "node": ">= 6" } }, + "node_modules/form-data-encoder": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", + "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" + }, + "node_modules/formdata-node": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", + "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", + "dependencies": { + "node-domexception": "1.0.0", + "web-streams-polyfill": "4.0.0-beta.3" + }, + "engines": { + "node": ">= 12.20" + } + }, + "node_modules/formdata-node/node_modules/web-streams-polyfill": { + "version": "4.0.0-beta.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", + "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", + "engines": { + "node": ">= 14" + } + }, "node_modules/formidable": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/formidable/-/formidable-2.1.2.tgz", @@ -5104,6 +5171,14 @@ "node": ">=10.17.0" } }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "dependencies": { + "ms": "^2.0.0" + } + }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -5260,6 +5335,11 @@ "node": ">=8" } }, + "node_modules/is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + }, "node_modules/is-core-module": { "version": "2.13.1", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", @@ -6371,6 +6451,16 @@ "tmpl": "1.0.5" } }, + "node_modules/md5": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", + "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", + "dependencies": { + "charenc": "0.0.2", + "crypt": "0.0.2", + "is-buffer": "~1.1.6" + } + }, "node_modules/media-typer": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", @@ -6579,6 +6669,24 @@ "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==", "dev": true }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "engines": { + "node": ">=10.5.0" + } + }, "node_modules/node-emoji": { "version": "1.11.0", "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", @@ -6699,12 +6807,30 @@ } }, "node_modules/openai": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/openai/-/openai-3.3.0.tgz", - "integrity": "sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==", + "version": "4.28.4", + "resolved": "https://registry.npmjs.org/openai/-/openai-4.28.4.tgz", + "integrity": "sha512-RNIwx4MT/F0zyizGcwS+bXKLzJ8QE9IOyigDG/ttnwB220d58bYjYFp0qjvGwEFBO6+pvFVIDABZPGDl46RFsg==", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "digest-fetch": "^1.3.0", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7", + "web-streams-polyfill": "^3.2.1" + }, + "bin": { + "openai": "bin/cli" + } + }, + "node_modules/openai/node_modules/@types/node": { + "version": "18.19.22", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.22.tgz", + "integrity": "sha512-p3pDIfuMg/aXBmhkyanPshdfJuX5c5+bQjYLIikPLXAUycEogij/c50n/C+8XOA5L93cU4ZRXtn+dNQGi0IZqQ==", "dependencies": { - "axios": "^0.26.0", - "form-data": "^4.0.0" + "undici-types": "~5.26.4" } }, "node_modules/optionator": { @@ -8675,6 +8801,14 @@ "defaults": "^1.0.3" } }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "engines": { + "node": ">= 8" + } + }, "node_modules/webidl-conversions": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", diff --git a/new-chatbot-app/package.json b/new-chatbot-app/package.json index 5ceac75..8a406a8 100644 --- a/new-chatbot-app/package.json +++ b/new-chatbot-app/package.json @@ -28,7 +28,7 @@ "@nestjs/platform-socket.io": "^10.3.3", "@nestjs/websockets": "^10.3.3", "eslint-plugin-unused-imports": "^3.1.0", - "openai": "^3.3.0", + "openai": "^4.28.4", "reflect-metadata": "^0.1.13", "rxjs": "^7.8.1" }, diff --git a/new-chatbot-app/src/app.module.ts b/new-chatbot-app/src/app.module.ts index c5076f3..3db9e9c 100644 --- a/new-chatbot-app/src/app.module.ts +++ b/new-chatbot-app/src/app.module.ts @@ -1,11 +1,15 @@ import { Module } from '@nestjs/common'; import { LlmChainModule } from './llm-chain/llm-chain.module'; import { LibraryApiModule } from './library-api/library-api.module'; -import { ChatGateway } from './chat/chat.gateway'; import { SharedModule } from './shared/shared.module'; +import { GatewayModule } from './gateway/gateway.module'; @Module({ - imports: [LlmChainModule, LibraryApiModule, SharedModule], - providers: [ChatGateway], + imports: [ + LlmChainModule, + LibraryApiModule, + SharedModule, + GatewayModule, + ], }) export class AppModule {} diff --git a/new-chatbot-app/src/chat/chat.gateway.ts b/new-chatbot-app/src/chat/chat.gateway.ts deleted file mode 100644 index cc59a10..0000000 --- a/new-chatbot-app/src/chat/chat.gateway.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { - SubscribeMessage, - WebSocketGateway, - MessageBody, - ConnectedSocket, -} from '@nestjs/websockets'; -import { Socket } from 'socket.io'; - -@WebSocketGateway() -export class ChatGateway { - handleConnection(client: Socket) { - // Handle connection event - } - @SubscribeMessage('message') - handleEvent( - @MessageBody() data: string, - @ConnectedSocket() client: Socket, - ): void { - console.log(data); - client.emit('message', `Received ${data}`); - } -} diff --git a/new-chatbot-app/src/chat/chat.gateway.spec.ts b/new-chatbot-app/src/gateway/chat/chat.gateway.spec.ts similarity index 75% rename from new-chatbot-app/src/chat/chat.gateway.spec.ts rename to new-chatbot-app/src/gateway/chat/chat.gateway.spec.ts index 34daca9..300821b 100644 --- a/new-chatbot-app/src/chat/chat.gateway.spec.ts +++ b/new-chatbot-app/src/gateway/chat/chat.gateway.spec.ts @@ -1,12 +1,13 @@ import { Test, TestingModule } from '@nestjs/testing'; import { ChatGateway } from './chat.gateway'; +import { LlmConnectionGateway } from '../connection/llm-connection.gateway'; describe('ChatGateway', () => { let gateway: ChatGateway; beforeEach(async () => { const module: TestingModule = await Test.createTestingModule({ - providers: [ChatGateway], + providers: [ChatGateway, LlmConnectionGateway], }).compile(); gateway = module.get(ChatGateway); diff --git a/new-chatbot-app/src/gateway/chat/chat.gateway.ts b/new-chatbot-app/src/gateway/chat/chat.gateway.ts new file mode 100644 index 0000000..b4327c6 --- /dev/null +++ b/new-chatbot-app/src/gateway/chat/chat.gateway.ts @@ -0,0 +1,41 @@ +import { + SubscribeMessage, + WebSocketGateway, + MessageBody, + ConnectedSocket, + OnGatewayDisconnect, +} from '@nestjs/websockets'; +import { Socket } from 'socket.io'; +import { LlmConnectionGateway } from '../connection/llm-connection.gateway'; +import { LlmChainService } from 'src/llm-chain/llm-chain.service'; +import { Logger } from '@nestjs/common'; + +@WebSocketGateway() +export class ChatGateway implements OnGatewayDisconnect { + private readonly logger = new Logger(ChatGateway.name); + + constructor(private llmConnnectionGateway: LlmConnectionGateway) {} + @SubscribeMessage('message') + async handleEvent( + @MessageBody() userMessage: string, + @ConnectedSocket() client: Socket, + ): Promise { + const llmChain: LlmChainService = + await this.llmConnnectionGateway.getLlmChainForCurrentSocket(client); + + const modelResponse = await llmChain.getModelResponse(userMessage); + client.emit('message', modelResponse); + } + + async handleDisconnect(client: any): Promise { + const llmChain: LlmChainService = + await this.llmConnnectionGateway.getLlmChainForCurrentSocket(client); + + const totalTokenUsage = llmChain.getTokenUsage(); + this.logger.log( + `Total Token Used for the chat session is ${JSON.stringify(totalTokenUsage)}`, + ); + + this.llmConnnectionGateway.closeLlmChainForCurrentSocket(client); + } +} diff --git a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts new file mode 100644 index 0000000..8ad25e4 --- /dev/null +++ b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts @@ -0,0 +1,18 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { LlmConnectionGateway } from '../connection/llm-connection.gateway'; + +describe('LlmConnectionGateway', () => { + let gateway: LlmConnectionGateway; + + beforeEach(async () => { + const module: TestingModule = await Test.createTestingModule({ + providers: [LlmConnectionGateway], + }).compile(); + + gateway = module.get(LlmConnectionGateway); + }); + + it('should be defined', () => { + expect(gateway).toBeDefined(); + }); +}); diff --git a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts new file mode 100644 index 0000000..76cd317 --- /dev/null +++ b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts @@ -0,0 +1,33 @@ +import { Injectable } from '@nestjs/common'; +import { ContextId, ContextIdFactory, ModuleRef } from '@nestjs/core'; +import { LlmChainService } from '../../llm-chain/llm-chain.service'; + +import { Socket } from 'socket.io'; + +@Injectable() +export class LlmConnectionGateway { + private socketContextIdMapping: Map = new Map< + Socket, + ContextId + >(); + constructor(private readonly moduleRef: ModuleRef) {} + + /** + * This function returns a unique LlmChain for each socket (or each new socket client connects) + * @param socket + * @returns + */ + public getLlmChainForCurrentSocket(socket: Socket): Promise { + if (!this.socketContextIdMapping.has(socket)) { + const contextId = ContextIdFactory.create(); + this.moduleRef.registerRequestByContextId(LlmChainService, contextId); + this.socketContextIdMapping.set(socket, contextId); + } + const contextId: ContextId = this.socketContextIdMapping.get(socket)!; + return this.moduleRef.resolve(LlmChainService, contextId, {strict: false}); + } + + public closeLlmChainForCurrentSocket(socket: Socket) { + this.socketContextIdMapping.delete(socket); + } +} diff --git a/new-chatbot-app/src/gateway/gateway.module.ts b/new-chatbot-app/src/gateway/gateway.module.ts new file mode 100644 index 0000000..f621aa1 --- /dev/null +++ b/new-chatbot-app/src/gateway/gateway.module.ts @@ -0,0 +1,13 @@ +import { Module } from '@nestjs/common'; +import { LlmChainModule } from '../llm-chain/llm-chain.module'; +import { SharedModule } from '../shared/shared.module'; +import { LlmConnectionGateway } from './connection/llm-connection.gateway'; +import { ChatGateway } from './chat/chat.gateway'; + + +@Module({ + imports: [LlmChainModule, SharedModule], + providers: [ChatGateway, LlmConnectionGateway], + exports: [ChatGateway, LlmConnectionGateway] +}) +export class GatewayModule {} diff --git a/new-chatbot-app/src/llm-chain/llm-chain.module.ts b/new-chatbot-app/src/llm-chain/llm-chain.module.ts index 4741c3a..9f02f50 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.module.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.module.ts @@ -4,9 +4,10 @@ import { MemoryModule } from './memory/memory.module'; import { PromptModule } from './prompt/prompt.module'; import { LlmChainService } from './llm-chain.service'; import { LlmModule } from './llm/llm.module'; +import { SharedModule } from '../shared/shared.module'; @Module({ - imports: [LlmModule, LlmToolboxModule, MemoryModule, PromptModule], + imports: [LlmModule, LlmToolboxModule, MemoryModule, PromptModule, SharedModule], providers: [LlmChainService], exports: [LlmChainService], }) diff --git a/new-chatbot-app/src/llm-chain/llm-chain.service.ts b/new-chatbot-app/src/llm-chain/llm-chain.service.ts index 9549196..f8b1a2c 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.service.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.service.ts @@ -1,4 +1,47 @@ -import { Injectable } from '@nestjs/common'; +import { Injectable, Scope } from '@nestjs/common'; +import { LlmService } from './llm/llm.service'; +import { ChatbotConversationPromptWithToolsService } from './prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service'; +import { ConversationMemoryService } from './memory/conversation-memory/conversation-memory.service'; +import { Role } from './memory/memory.interface'; +import { OpenAiModelType } from './llm/openai-api/openai-api.service'; +import { TokenUsage, TokenUsageService } from '../shared/services/token-usage/token-usage.service'; -@Injectable() -export class LlmChainService {} +/** + * Service for using the LLM Chain + */ +@Injectable({ scope: Scope.REQUEST }) +export class LlmChainService { + totalLlmTokenUsage: TokenUsage = {} + constructor( + private llmService: LlmService, + private promptService: ChatbotConversationPromptWithToolsService, + private memoryService: ConversationMemoryService, + private tokenUsageService: TokenUsageService + ) { + this.promptService.setConversationMemory(memoryService); + } + + /** + * Get model response + * @param userMessage + * @returns model response + */ + public async getModelResponse( + userMessage: string, + ): Promise { + this.memoryService.addToConversation(Role.Customer, userMessage); + const { response, tokenUsage } = await this.llmService.getModelResponse( + this.promptService, + OpenAiModelType.GPT_4, + ); + + //Update total llm token usage + this.totalLlmTokenUsage = this.tokenUsageService.combineTokenUsage(this.totalLlmTokenUsage, tokenUsage); + + return response; + } + + public getTokenUsage(): TokenUsage { + return this.tokenUsageService.combineTokenUsage(this.totalLlmTokenUsage, this.memoryService.getTokenUsage()); + } +} diff --git a/new-chatbot-app/src/llm-chain/llm/llm.service.ts b/new-chatbot-app/src/llm-chain/llm/llm.service.ts index a7251d2..9379b1b 100644 --- a/new-chatbot-app/src/llm-chain/llm/llm.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/llm.service.ts @@ -48,32 +48,29 @@ export class LlmService { temperature: number = 0.0, top_p: number = 0.1, ): Promise<{ response: string; tokenUsage: TokenUsage }> { - return new Promise<{ response: string; tokenUsage: TokenUsage }>( - async (resolve, reject) => { - //Get the appropriate model - let model: LlmInterface; - try { - model = this.getModel(modelType); - } catch (error: any) { - throw error; - } + //Get the appropriate model + let model: LlmInterface; + try { + model = this.getModel(modelType); + } catch (error: any) { + throw error; + } - // Get the prompt from the prompt object - const promptString = await prompt.getPrompt(); + // Get the prompt from the prompt object + const promptString = await prompt.getPrompt(); - const { response, tokenUsage: responseTokenUsage } = - await model.getModelResponse( - promptString, - prompt.getSystemDescription(), - temperature, - top_p, - ); + const { response, tokenUsage: responseTokenUsage } = + await model.getModelResponse( + promptString, + prompt.getSystemDescription(), + modelType, + temperature, + top_p, + ); - resolve({ - response: response, - tokenUsage: responseTokenUsage, - }); - }, - ); + return { + response: response, + tokenUsage: responseTokenUsage, + }; } } diff --git a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts index 657721f..549ae57 100644 --- a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts @@ -1,7 +1,6 @@ import { Injectable, Logger } from '@nestjs/common'; -import { Configuration, CreateChatCompletionResponse, OpenAIApi } from 'openai'; -import { AxiosResponse } from 'axios'; +import OpenAI from 'openai'; import { NetworkService } from '../../../shared/services/network/network.service'; import { RetrieveEnvironmentVariablesService } from '../../../shared/services/retrieve-environment-variables/retrieve-environment-variables.service'; @@ -10,6 +9,7 @@ import { TokenUsageService, } from '../../../shared/services/token-usage/token-usage.service'; import { LlmInterface } from '../llm.interface'; +import { ChatCompletion } from 'openai/resources'; export enum OpenAiModelType { GPT_3_5_TURBO = 'gpt-3.5-turbo', @@ -26,21 +26,20 @@ export enum OpenAiModelType { @Injectable() export class OpenaiApiService implements LlmInterface { private readonly logger = new Logger(OpenaiApiService.name); - private readonly model: OpenAIApi; + private readonly openai: OpenAI; constructor( private networkService: NetworkService, private retrieveEnvironmentVariablesService: RetrieveEnvironmentVariablesService, private tokenUsageService: TokenUsageService, ) { - const modelConfiguration = new Configuration({ + this.openai = new OpenAI({ organization: this.retrieveEnvironmentVariablesService.retrieve( 'OPENAI_ORGANIZATION_ID', ), apiKey: this.retrieveEnvironmentVariablesService.retrieve('OPENAI_API_KEY'), }); - this.model = new OpenAIApi(modelConfiguration); } /** @@ -56,59 +55,51 @@ export class OpenaiApiService implements LlmInterface { async getModelResponse( systemPrompt: string, userPrompt: string, - modelName: OpenAiModelType = OpenAiModelType.GPT_4, + model: OpenAiModelType = OpenAiModelType.GPT_4, temperature: number = 0.0, top_p: number = 0.1, ): Promise<{ response: string; tokenUsage: TokenUsage }> { - return new Promise<{ response: string; tokenUsage: TokenUsage }>( - async (resolve, reject) => { - // Get the prompt from the prompt object - let response; // The response from the model, as a result to be returned - try { - response = await this.networkService.retryWithMaxAttempts< - AxiosResponse - >((): Promise> => { - return new Promise< - AxiosResponse - >((resolve, reject) => { - try { - const chatResponse = this.model.createChatCompletion({ - model: modelName, - temperature: temperature, - top_p: top_p, - messages: [ - { - role: 'system', - content: systemPrompt, - }, - { role: 'user', content: userPrompt }, - ], - }) as Promise>; - resolve(chatResponse); - } catch (error: any) { - reject(error); - } - }); - }); - } catch (error: any) { - reject(error); - return; - } + const modelName: string = model as string; + + // Get the prompt from the prompt object + let response; // The response from the model, as a result to be returned - if (response.data.choices[0].message?.content) { - // Extract the usage information from the response - const openAiApiTokenUsage: TokenUsage = - this.tokenUsageService.getTokenUsageFromOpenAiApiResponse(response); - resolve({ - response: response.data.choices[0].message?.content, - tokenUsage: openAiApiTokenUsage, + try { + response = await this.networkService.retryWithMaxAttempts( + async (): Promise => { + const chatResponse = await this.openai.chat.completions.create({ + model: modelName as string, + temperature: temperature, + top_p: top_p, + messages: [ + { + role: 'system', + content: systemPrompt, + }, + { role: 'user', content: userPrompt }, + ], }); - } else { - const errorMsg = 'Error: No response from the model'; - this.logger.error(errorMsg); - throw new Error(errorMsg); - } - }, - ); + return chatResponse; + }, + 5, + ); + } catch (error: any) { + console.log(error); + throw error; + } + + if (response.choices[0].message?.content) { + // Extract the usage information from the response + const openAiApiTokenUsage: TokenUsage = + this.tokenUsageService.getTokenUsageFromOpenAiApiResponse(response); + return { + response: response.choices[0].message?.content, + tokenUsage: openAiApiTokenUsage, + }; + } else { + const errorMsg = 'Error: No response from the model'; + this.logger.error(errorMsg); + throw new Error(errorMsg); + } } } diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts index ec41ac3..2fc7697 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts @@ -9,7 +9,7 @@ import { ConversationSummarizationPromptService } from '../../prompt/conversatio import { LlmService } from '../../../llm-chain/llm/llm.service'; import { OpenAiModelType } from '../../../llm-chain/llm/openai-api/openai-api.service'; -@Injectable({ scope: Scope.REQUEST }) +@Injectable({ scope: Scope.TRANSIENT }) export class ConversationMemoryService implements ConversationMemory { private conversationQueue: CustomQueue<[Role | null, string]> = new CustomQueue<[Role | null, string]>(); @@ -189,6 +189,10 @@ export class ConversationMemoryService implements ConversationMemory { }); } + /** + * Get the total token usage used for this memory so far + * @returns + */ public getTokenUsage(): TokenUsage { return this.tokenUsage; } diff --git a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts index 94416d5..41742c7 100644 --- a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts @@ -9,7 +9,7 @@ import { RetrieveEnvironmentVariablesService } from '../../../shared/services/re * * Scope: Request */ -@Injectable({ scope: Scope.REQUEST }) +@Injectable({ scope: Scope.TRANSIENT }) export class ChatbotConversationPromptWithToolsService implements Prompt { private modelDescription: string; private conversationMemory: ConversationMemory | undefined = undefined; diff --git a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts index 5366004..aa6172f 100644 --- a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.spec.ts @@ -11,7 +11,7 @@ describe('ConversationSummarizationPromptService', () => { providers: [ConversationSummarizationPromptService], }).compile(); - service = module.get( + service = await module.resolve( ConversationSummarizationPromptService, ); }); diff --git a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts index e89d4e8..cb6da72 100644 --- a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts @@ -1,7 +1,7 @@ -import { Injectable } from '@nestjs/common'; +import { Injectable, Scope } from '@nestjs/common'; import { Prompt } from '../prompt.interface'; -@Injectable() +@Injectable({scope: Scope.TRANSIENT}) export class ConversationSummarizationPromptService implements Prompt { private modelDescription: string; private conversation: string = ''; diff --git a/new-chatbot-app/src/main.ts b/new-chatbot-app/src/main.ts index 13cad38..e53b840 100644 --- a/new-chatbot-app/src/main.ts +++ b/new-chatbot-app/src/main.ts @@ -2,7 +2,7 @@ import { NestFactory } from '@nestjs/core'; import { AppModule } from './app.module'; async function bootstrap() { - const app = await NestFactory.create(AppModule); + const app = await NestFactory.create(AppModule, {}); await app.listen(3000); } bootstrap(); diff --git a/new-chatbot-app/src/shared/services/network/network.service.ts b/new-chatbot-app/src/shared/services/network/network.service.ts index f9159f1..9e9c8c9 100644 --- a/new-chatbot-app/src/shared/services/network/network.service.ts +++ b/new-chatbot-app/src/shared/services/network/network.service.ts @@ -35,7 +35,7 @@ export class NetworkService { resolve(response); } else { const errorMessage = - 'Cannot establish connection with the target network.'; + 'Cannot establish connection with the target network.\n' + error.message; this.logger.error(errorMessage); throw new Error(errorMessage); } diff --git a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts index 723860e..5b4e84e 100644 --- a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts +++ b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts @@ -1,6 +1,5 @@ import { Global, Injectable } from '@nestjs/common'; -import { AxiosResponse } from 'axios'; -import { CreateChatCompletionResponse } from 'openai'; +import { ChatCompletion } from 'openai/resources'; import { OpenAiModelType } from 'src/llm-chain/llm/openai-api/openai-api.service'; export type ModelTokenUsage = { @@ -23,24 +22,24 @@ export class TokenUsageService { * @returns */ getTokenUsageFromOpenAiApiResponse( - openaiApiResponse: AxiosResponse, + openaiApiResponse: ChatCompletion, ): TokenUsage { const tokenUsage: TokenUsage = {}; - if (openaiApiResponse.data && openaiApiResponse.data.usage) { + if (openaiApiResponse && openaiApiResponse.usage) { // Extract the usage information from the openaiApiResponse - const openAIModelType: OpenAiModelType = openaiApiResponse.data - .model as OpenAiModelType; + const openAIModelType: OpenAiModelType = + openaiApiResponse.model as OpenAiModelType; tokenUsage[openAIModelType] = { totalTokens: 0, promptTokens: 0, completionTokens: 0, }; tokenUsage[openAIModelType]!.totalTokens += - openaiApiResponse.data.usage!.total_tokens; + openaiApiResponse.usage.total_tokens; tokenUsage[openAIModelType]!.completionTokens += - openaiApiResponse.data.usage!.completion_tokens; + openaiApiResponse.usage.completion_tokens; tokenUsage[openAIModelType]!.promptTokens += - openaiApiResponse.data.usage!.prompt_tokens; + openaiApiResponse.usage.prompt_tokens; } return tokenUsage; } From 82959142630e6c5c8e154946fcab298044e5470f Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Fri, 8 Mar 2024 14:58:10 -0500 Subject: [PATCH 10/14] small fix in slicing conversation in memory --- new-chatbot-app/src/llm-chain/llm-chain.service.ts | 2 +- .../src/llm-chain/llm/openai-api/openai-api.service.ts | 1 + new-chatbot-app/src/llm-chain/memory/memory.interface.ts | 2 +- .../chatbot-conversation-prompt-with-tools.service.ts | 4 ++-- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/new-chatbot-app/src/llm-chain/llm-chain.service.ts b/new-chatbot-app/src/llm-chain/llm-chain.service.ts index f8b1a2c..517b166 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.service.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.service.ts @@ -32,7 +32,7 @@ export class LlmChainService { this.memoryService.addToConversation(Role.Customer, userMessage); const { response, tokenUsage } = await this.llmService.getModelResponse( this.promptService, - OpenAiModelType.GPT_4, + OpenAiModelType.GPT_4_TURBO, ); //Update total llm token usage diff --git a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts index 549ae57..2d9f4e8 100644 --- a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts @@ -18,6 +18,7 @@ export enum OpenAiModelType { GPT_4 = 'gpt-4', GPT_4_0613 = 'gpt-4-0613', GPT_4_0314 = 'gpt-4-0314', + GPT_4_TURBO = 'gpt-4-0125-preview', } /** diff --git a/new-chatbot-app/src/llm-chain/memory/memory.interface.ts b/new-chatbot-app/src/llm-chain/memory/memory.interface.ts index b3ea74c..710f398 100644 --- a/new-chatbot-app/src/llm-chain/memory/memory.interface.ts +++ b/new-chatbot-app/src/llm-chain/memory/memory.interface.ts @@ -33,7 +33,7 @@ export interface ConversationMemory { * @returns the whole conversation as a string. * @throws Error if start and end indices are not appropriate */ - getConversationAsString(start: number, end: number): Promise; + getConversationAsString(start?: number, end?: number): Promise; /** * Get the total token usage for the whole memory (in case when we use LLM inside this object to summarize the conversation) diff --git a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts index 41742c7..a39697e 100644 --- a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts @@ -31,7 +31,7 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { "You are a helpful assistant.You should try your best to answer the question.Unfortunately,you don't know anything about the library,books,and articles so you have to always rely on the tool or the given context for library-related,book-related,or article-related questions.\n" + `For context,the current time is ${date.toLocaleString('en-US', { timeZone: 'America/New_York', - })}\n`; + })}\nONLY include your answer in your final answer`; this.modelScratchpad = ''; } @@ -185,7 +185,7 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { return new Promise(async (resolve, reject) => { // Get the conversation summary string const conversationString = - await this.conversationMemory?.getConversationAsString(0, 0); + await this.conversationMemory?.getConversationAsString(0); const wholePrompt: string = `\nThis is the conversation so far(delimited by the triple dashes)\n---\n${conversationString}\n---\n` + From 3651d6253e38ceefbede382ce4d352541fb3511b Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Fri, 8 Mar 2024 15:26:50 -0500 Subject: [PATCH 11/14] small fix in the llm-chain --- .../src/llm-chain/llm-chain.service.ts | 33 +++++-- .../llm/openai-api/openai-api.service.ts | 61 ++++++------ .../conversation-memory.service.ts | 95 +++++++++---------- ...-conversation-prompt-with-tools.service.ts | 18 ++-- .../services/network/network.service.ts | 41 ++++---- 5 files changed, 126 insertions(+), 122 deletions(-) diff --git a/new-chatbot-app/src/llm-chain/llm-chain.service.ts b/new-chatbot-app/src/llm-chain/llm-chain.service.ts index 517b166..eb15432 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.service.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.service.ts @@ -4,44 +4,57 @@ import { ChatbotConversationPromptWithToolsService } from './prompt/chatbot-conv import { ConversationMemoryService } from './memory/conversation-memory/conversation-memory.service'; import { Role } from './memory/memory.interface'; import { OpenAiModelType } from './llm/openai-api/openai-api.service'; -import { TokenUsage, TokenUsageService } from '../shared/services/token-usage/token-usage.service'; +import { + TokenUsage, + TokenUsageService, +} from '../shared/services/token-usage/token-usage.service'; /** * Service for using the LLM Chain */ @Injectable({ scope: Scope.REQUEST }) export class LlmChainService { - totalLlmTokenUsage: TokenUsage = {} + totalLlmTokenUsage: TokenUsage = {}; constructor( private llmService: LlmService, private promptService: ChatbotConversationPromptWithToolsService, private memoryService: ConversationMemoryService, - private tokenUsageService: TokenUsageService + private tokenUsageService: TokenUsageService, ) { + this.memoryService.setMaxContextWindow(6); + this.memoryService.setConversationBufferSize(3); + this.memoryService.setConversationSummarizationMode(true); this.promptService.setConversationMemory(memoryService); } /** - * Get model response + * Get model response * @param userMessage * @returns model response */ - public async getModelResponse( - userMessage: string, - ): Promise { + public async getModelResponse(userMessage: string): Promise { this.memoryService.addToConversation(Role.Customer, userMessage); const { response, tokenUsage } = await this.llmService.getModelResponse( this.promptService, OpenAiModelType.GPT_4_TURBO, ); - + + //Add AI response to conversation + this.memoryService.addToConversation(Role.AIAgent, response); + //Update total llm token usage - this.totalLlmTokenUsage = this.tokenUsageService.combineTokenUsage(this.totalLlmTokenUsage, tokenUsage); + this.totalLlmTokenUsage = this.tokenUsageService.combineTokenUsage( + this.totalLlmTokenUsage, + tokenUsage, + ); return response; } public getTokenUsage(): TokenUsage { - return this.tokenUsageService.combineTokenUsage(this.totalLlmTokenUsage, this.memoryService.getTokenUsage()); + return this.tokenUsageService.combineTokenUsage( + this.totalLlmTokenUsage, + this.memoryService.getTokenUsage(), + ); } } diff --git a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts index 2d9f4e8..660bace 100644 --- a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts @@ -54,51 +54,52 @@ export class OpenaiApiService implements LlmInterface { * @throws Throw error if doesn't receive any message from the OpenAI API */ async getModelResponse( - systemPrompt: string, userPrompt: string, - model: OpenAiModelType = OpenAiModelType.GPT_4, + systemPrompt?: string, + modelName: OpenAiModelType = OpenAiModelType.GPT_4, temperature: number = 0.0, top_p: number = 0.1, ): Promise<{ response: string; tokenUsage: TokenUsage }> { - const modelName: string = model as string; - - // Get the prompt from the prompt object - let response; // The response from the model, as a result to be returned - + let modelResponse; try { - response = await this.networkService.retryWithMaxAttempts( - async (): Promise => { - const chatResponse = await this.openai.chat.completions.create({ - model: modelName as string, - temperature: temperature, - top_p: top_p, - messages: [ - { - role: 'system', - content: systemPrompt, - }, - { role: 'user', content: userPrompt }, - ], - }); - return chatResponse; - }, - 5, - ); + modelResponse = + await this.networkService.retryWithMaxAttempts( + async (): Promise => { + console.log(systemPrompt); + console.log(userPrompt); + const chatResponse = await this.openai.chat.completions.create({ + model: modelName as string, + temperature: temperature, + top_p: top_p, + messages: [ + { + role: 'system', + content: systemPrompt !== undefined ? systemPrompt : '', + }, + { role: 'user', content: userPrompt }, + ], + }); + return chatResponse; + }, + 5, + ); } catch (error: any) { console.log(error); throw error; } - if (response.choices[0].message?.content) { - // Extract the usage information from the response + if (modelResponse.choices[0].message?.content) { + // Extract the usage information from the modelResponse const openAiApiTokenUsage: TokenUsage = - this.tokenUsageService.getTokenUsageFromOpenAiApiResponse(response); + this.tokenUsageService.getTokenUsageFromOpenAiApiResponse( + modelResponse, + ); return { - response: response.choices[0].message?.content, + response: modelResponse.choices[0].message?.content, tokenUsage: openAiApiTokenUsage, }; } else { - const errorMsg = 'Error: No response from the model'; + const errorMsg = 'Error: No modelResponse from the model'; this.logger.error(errorMsg); throw new Error(errorMsg); } diff --git a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts index 2fc7697..c14a923 100644 --- a/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts +++ b/new-chatbot-app/src/llm-chain/memory/conversation-memory/conversation-memory.service.ts @@ -123,34 +123,31 @@ export class ConversationMemoryService implements ConversationMemory { private async summarizeConversation( conversation: [Role | null, string][], ): Promise { - return new Promise(async (resolve, rejects) => { - if (conversation.length === 0) { - resolve(''); - return; - } - - const conversationString = this.stringifyConversation(conversation); - this.conversationSummarizationPromptService.setConversation( - conversationString, - ); - const { - response: conversationSummary, - tokenUsage: tokenUsageFromSummarization, - } = await this.llmService.getModelResponse( - this.conversationSummarizationPromptService, - OpenAiModelType.GPT_3_5_TURBO, - ); + if (conversation.length === 0) { + return ''; + } - //Update TokenUsage information - this.setTokenUsage( - this.tokenUsageService.combineTokenUsage( - tokenUsageFromSummarization, - this.getTokenUsage(), - ), - ); + const conversationString = this.stringifyConversation(conversation); + this.conversationSummarizationPromptService.setConversation( + conversationString, + ); + const { + response: conversationSummary, + tokenUsage: tokenUsageFromSummarization, + } = await this.llmService.getModelResponse( + this.conversationSummarizationPromptService, + OpenAiModelType.GPT_3_5_TURBO, + ); - resolve(conversationSummary); - }); + //Update TokenUsage information + this.setTokenUsage( + this.tokenUsageService.combineTokenUsage( + tokenUsageFromSummarization, + this.getTokenUsage(), + ), + ); + + return conversationSummary; } /** * get @@ -163,35 +160,31 @@ export class ConversationMemoryService implements ConversationMemory { start: number = 0, end?: number, ): Promise { - return new Promise(async (resolve, rejects) => { - const slicedConversation = this.conversationQueue.slice(start, end); - - //If bufferSize is undefined, we don't summarize anything - const conversationToUnchange = - this.conversationBufferSize !== undefined - ? slicedConversation.slice((start = -this.conversationBufferSize)) - : slicedConversation; - const conversationToSummarize = - this.conversationBufferSize !== undefined - ? slicedConversation.slice( - (start = 0), - (end = -this.conversationBufferSize), - ) - : []; - - const conversationSummary = this.conversationSummarizationMode - ? await this.summarizeConversation(conversationToSummarize) - : this.stringifyConversation(conversationToSummarize); - - resolve( - `${conversationSummary}\n${this.stringifyConversation(conversationToUnchange)}`, - ); - }); + const slicedConversation = this.conversationQueue.slice(start, end); + + //If bufferSize is undefined, we don't summarize anything + const conversationToUnchange = + this.conversationBufferSize !== undefined + ? slicedConversation.slice((start = -this.conversationBufferSize)) + : slicedConversation; + const conversationToSummarize = + this.conversationBufferSize !== undefined + ? slicedConversation.slice( + (start = 0), + (end = -this.conversationBufferSize), + ) + : []; + + const conversationSummary = this.conversationSummarizationMode + ? await this.summarizeConversation(conversationToSummarize) + : this.stringifyConversation(conversationToSummarize); + + return `${conversationSummary}\n${this.stringifyConversation(conversationToUnchange)}`; } /** * Get the total token usage used for this memory so far - * @returns + * @returns */ public getTokenUsage(): TokenUsage { return this.tokenUsage; diff --git a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts index a39697e..46da19f 100644 --- a/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service.ts @@ -28,7 +28,7 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { const date = new Date(); this.modelDescription = - "You are a helpful assistant.You should try your best to answer the question.Unfortunately,you don't know anything about the library,books,and articles so you have to always rely on the tool or the given context for library-related,book-related,or article-related questions.\n" + + "You are a helpful assistant.You should try your best to help and talk to the customer based on the conversation below.Unfortunately,you don't know anything about the library,books,and articles so you have to always rely on the tool or the given context for library-related,book-related,or article-related questions.\n" + `For context,the current time is ${date.toLocaleString('en-US', { timeZone: 'America/New_York', })}\nONLY include your answer in your final answer`; @@ -182,16 +182,14 @@ export class ChatbotConversationPromptWithToolsService implements Prompt { * @returns string The whole prompt */ public async getPrompt(): Promise { - return new Promise(async (resolve, reject) => { - // Get the conversation summary string - const conversationString = - await this.conversationMemory?.getConversationAsString(0); + // Get the conversation summary string + const conversationString = + await this.conversationMemory?.getConversationAsString(0); - const wholePrompt: string = - `\nThis is the conversation so far(delimited by the triple dashes)\n---\n${conversationString}\n---\n` + - `This is your scratchpad:\n"""\n${this.modelScratchpad}\n"""\n`; - resolve(wholePrompt); - }); + const wholePrompt: string = + `\nThis is the conversation so far(delimited by the triple dashes)\n---\n${conversationString}\n---\n` + + `This is your scratchpad:\n"""\n${this.modelScratchpad}\n"""\n`; + return wholePrompt; } /** diff --git a/new-chatbot-app/src/shared/services/network/network.service.ts b/new-chatbot-app/src/shared/services/network/network.service.ts index 9e9c8c9..554b346 100644 --- a/new-chatbot-app/src/shared/services/network/network.service.ts +++ b/new-chatbot-app/src/shared/services/network/network.service.ts @@ -17,28 +17,27 @@ export class NetworkService { axiosFunc: (...args: any[]) => Promise, maxAttemps: number = 5, ): Promise { - return new Promise(async (resolve, reject) => { - let retries = 0; - let response; - let error: any; - // Retry until the function succeeds or the maximum number of attempts is reached - while (!response && retries < maxAttemps) { - try { - response = await axiosFunc(); - } catch (e: any) { - error = e; - } - retries++; + let retries = 0; + let response; + let error: any; + // Retry until the function succeeds or the maximum number of attempts is reached + while (!response && retries < maxAttemps) { + try { + response = await axiosFunc(); + } catch (e: any) { + error = e; } + retries++; + } - if (response) { - resolve(response); - } else { - const errorMessage = - 'Cannot establish connection with the target network.\n' + error.message; - this.logger.error(errorMessage); - throw new Error(errorMessage); - } - }); + if (response) { + return response; + } else { + const errorMessage = + 'Cannot establish connection with the target network.\n' + + error.message; + this.logger.error(errorMessage); + throw new Error(errorMessage); + } } } From fd11b12b744e1de9401b963edd62a8aac6b2f97e Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Mon, 11 Mar 2024 16:17:48 -0400 Subject: [PATCH 12/14] added unit test --- .../connection/llm-connection.gateway.spec.ts | 18 --- .../src/llm-chain/llm-chain.service.spec.ts | 105 ++++++++++++++++++ .../src/llm-chain/llm-chain.service.ts | 8 +- .../llm/openai-api/openai-api.service.ts | 3 +- .../token-usage/token-usage.service.spec.ts | 49 ++++---- .../token-usage/token-usage.service.ts | 1 + 6 files changed, 134 insertions(+), 50 deletions(-) delete mode 100644 new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts create mode 100644 new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts diff --git a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts deleted file mode 100644 index 8ad25e4..0000000 --- a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts +++ /dev/null @@ -1,18 +0,0 @@ -import { Test, TestingModule } from '@nestjs/testing'; -import { LlmConnectionGateway } from '../connection/llm-connection.gateway'; - -describe('LlmConnectionGateway', () => { - let gateway: LlmConnectionGateway; - - beforeEach(async () => { - const module: TestingModule = await Test.createTestingModule({ - providers: [LlmConnectionGateway], - }).compile(); - - gateway = module.get(LlmConnectionGateway); - }); - - it('should be defined', () => { - expect(gateway).toBeDefined(); - }); -}); diff --git a/new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts b/new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts new file mode 100644 index 0000000..696a459 --- /dev/null +++ b/new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts @@ -0,0 +1,105 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { ConfigService } from '@nestjs/config'; +import { LlmChainService } from './llm-chain.service'; +import { SharedModule } from '../shared/shared.module'; +import { ChatbotConversationPromptWithToolsService } from './prompt/chatbot-conversation-prompt-with-tools/chatbot-conversation-prompt-with-tools.service'; +import { TokenUsageService } from '../shared/services/token-usage/token-usage.service'; +import { MemoryModule } from './memory/memory.module'; +import { LlmModule } from './llm/llm.module'; +import { PromptModule } from './prompt/prompt.module'; +import { LlmService } from './llm/llm.service'; +import { ConversationMemoryService } from './memory/conversation-memory/conversation-memory.service'; +import { LlmChainModule } from './llm-chain.module'; + +describe('LlmChainService', () => { + let llmChainService: LlmChainService; + let mockedLlmService: LlmService; + + beforeEach(async () => { + const moduleRef: TestingModule = await Test.createTestingModule({ + imports: [ + SharedModule, + MemoryModule, + LlmModule, + PromptModule, + LlmChainModule, + ], + providers: [ + LlmChainService, + LlmService, + ConversationMemoryService, + ChatbotConversationPromptWithToolsService, + TokenUsageService, + ], + }).compile(); + + llmChainService = await moduleRef.resolve(LlmChainService); + + mockedLlmService = await moduleRef.resolve(LlmService); + + }); + + it('should be defined', () => { + expect(llmChainService).toBeDefined(); + }); + + it('should get the correct model response', async () => { + const expectedResponse = 'This is response'; + jest + .spyOn(mockedLlmService, 'getModelResponse') + .mockImplementation(async () => ({ + response: expectedResponse, + tokenUsage: { + 'gpt-4': { + totalTokens: 100, + promptTokens: 70, + completionTokens: 30, + }, + }, + })); + + expect(await llmChainService.getModelResponse('Message from user')).toEqual( + expectedResponse, + ); + }); + + it('should get the correct token usage', async () => { + jest + .spyOn(mockedLlmService, 'getModelResponse') + .mockImplementation(async () => ({ + response: '_', + tokenUsage: { + 'gpt-4': { + totalTokens: 100, + promptTokens: 70, + completionTokens: 30, + }, + }, + })); + jest + .spyOn(llmChainService['memoryService'], 'getTokenUsage') + .mockReturnValue({ + 'gpt-3.5-turbo': { + totalTokens: 90, + promptTokens: 60, + completionTokens: 30, + }, + }); + + //Pretend asking for LLM Chain response + await llmChainService.getModelResponse('_'); + + expect(llmChainService.getTokenUsage()).toEqual({ + 'gpt-4': { + totalTokens: 100, + promptTokens: 70, + completionTokens: 30, + }, + 'gpt-3.5-turbo': { + totalTokens: 90, + promptTokens: 60, + completionTokens: 30, + }, + }); + }); +}); diff --git a/new-chatbot-app/src/llm-chain/llm-chain.service.ts b/new-chatbot-app/src/llm-chain/llm-chain.service.ts index eb15432..9b90d84 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.service.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.service.ts @@ -14,7 +14,7 @@ import { */ @Injectable({ scope: Scope.REQUEST }) export class LlmChainService { - totalLlmTokenUsage: TokenUsage = {}; + private totalLlmTokenUsage: TokenUsage = {}; constructor( private llmService: LlmService, private promptService: ChatbotConversationPromptWithToolsService, @@ -52,9 +52,11 @@ export class LlmChainService { } public getTokenUsage(): TokenUsage { - return this.tokenUsageService.combineTokenUsage( + const totalTokenUsageFromMemory = this.memoryService.getTokenUsage() + const totalTokenUsage = this.tokenUsageService.combineTokenUsage( this.totalLlmTokenUsage, - this.memoryService.getTokenUsage(), + totalTokenUsageFromMemory, ); + return totalTokenUsage; } } diff --git a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts index 660bace..5c97f88 100644 --- a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts @@ -65,8 +65,7 @@ export class OpenaiApiService implements LlmInterface { modelResponse = await this.networkService.retryWithMaxAttempts( async (): Promise => { - console.log(systemPrompt); - console.log(userPrompt); + const chatResponse = await this.openai.chat.completions.create({ model: modelName as string, temperature: temperature, diff --git a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts index 071d750..8269145 100644 --- a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts +++ b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts @@ -1,7 +1,6 @@ import { Test, TestingModule } from '@nestjs/testing'; import { TokenUsage, TokenUsageService } from './token-usage.service'; -import { AxiosResponse } from 'axios'; -import { CreateChatCompletionResponse } from 'openai'; +import { ChatCompletion } from 'openai/resources'; describe('TokenUsageService', () => { let service: TokenUsageService; @@ -19,32 +18,28 @@ describe('TokenUsageService', () => { }); it('should get the right Token Usage from OpenAiApiResponse', () => { - const mockAxiosResponse: AxiosResponse = - { - data: { - choices: [ - { - message: { - role: 'system', - content: 'Sample content', - }, - }, - ], - model: 'gpt-3.5-turbo', - usage: { - total_tokens: 100, - completion_tokens: 60, - prompt_tokens: 40, + const mockOpenaiResponse: ChatCompletion = { + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Sample content', }, - id: 'someID', - object: 'someObj', - created: 1, + logprobs: null, + finish_reason: 'stop', }, - status: 200, - statusText: 'OK', - headers: {}, - config: {}, - }; + ], + model: 'gpt-3.5-turbo', + usage: { + total_tokens: 100, + completion_tokens: 60, + prompt_tokens: 40, + }, + id: 'someID', + object: "chat.completion", + created: 1, + }; const expectedTokenUsageResult: TokenUsage = { 'gpt-3.5-turbo': { totalTokens: 100, @@ -54,7 +49,7 @@ describe('TokenUsageService', () => { }; const tokenUsage = - service.getTokenUsageFromOpenAiApiResponse(mockAxiosResponse); + service.getTokenUsageFromOpenAiApiResponse(mockOpenaiResponse); expect(tokenUsage).toEqual(expectedTokenUsageResult); }); diff --git a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts index 5b4e84e..bbc35db 100644 --- a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts +++ b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.ts @@ -48,6 +48,7 @@ export class TokenUsageService { * Combine 2 tokenUsage object * @param tokenUsage1 * @param tokenUsage2 + * @returns the combine Token Usage */ combineTokenUsage( tokenUsage1: TokenUsage, From cec570855115a1a6723b96e77b59828b1fb2dbbb Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Mon, 11 Mar 2024 16:21:56 -0400 Subject: [PATCH 13/14] formatting with prettier --- new-chatbot-app/src/app.module.ts | 7 +------ .../src/gateway/connection/llm-connection.gateway.ts | 4 +++- new-chatbot-app/src/gateway/gateway.module.ts | 3 +-- new-chatbot-app/src/llm-chain/llm-chain.module.ts | 8 +++++++- new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts | 1 - new-chatbot-app/src/llm-chain/llm-chain.service.ts | 2 +- .../src/llm-chain/llm/openai-api/openai-api.service.ts | 1 - .../conversation-summarization-prompt.service.ts | 2 +- .../services/token-usage/token-usage.service.spec.ts | 2 +- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/new-chatbot-app/src/app.module.ts b/new-chatbot-app/src/app.module.ts index 3db9e9c..4207b63 100644 --- a/new-chatbot-app/src/app.module.ts +++ b/new-chatbot-app/src/app.module.ts @@ -5,11 +5,6 @@ import { SharedModule } from './shared/shared.module'; import { GatewayModule } from './gateway/gateway.module'; @Module({ - imports: [ - LlmChainModule, - LibraryApiModule, - SharedModule, - GatewayModule, - ], + imports: [LlmChainModule, LibraryApiModule, SharedModule, GatewayModule], }) export class AppModule {} diff --git a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts index 76cd317..93af2ea 100644 --- a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts +++ b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts @@ -24,7 +24,9 @@ export class LlmConnectionGateway { this.socketContextIdMapping.set(socket, contextId); } const contextId: ContextId = this.socketContextIdMapping.get(socket)!; - return this.moduleRef.resolve(LlmChainService, contextId, {strict: false}); + return this.moduleRef.resolve(LlmChainService, contextId, { + strict: false, + }); } public closeLlmChainForCurrentSocket(socket: Socket) { diff --git a/new-chatbot-app/src/gateway/gateway.module.ts b/new-chatbot-app/src/gateway/gateway.module.ts index f621aa1..92a935e 100644 --- a/new-chatbot-app/src/gateway/gateway.module.ts +++ b/new-chatbot-app/src/gateway/gateway.module.ts @@ -4,10 +4,9 @@ import { SharedModule } from '../shared/shared.module'; import { LlmConnectionGateway } from './connection/llm-connection.gateway'; import { ChatGateway } from './chat/chat.gateway'; - @Module({ imports: [LlmChainModule, SharedModule], providers: [ChatGateway, LlmConnectionGateway], - exports: [ChatGateway, LlmConnectionGateway] + exports: [ChatGateway, LlmConnectionGateway], }) export class GatewayModule {} diff --git a/new-chatbot-app/src/llm-chain/llm-chain.module.ts b/new-chatbot-app/src/llm-chain/llm-chain.module.ts index 9f02f50..69a6b11 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.module.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.module.ts @@ -7,7 +7,13 @@ import { LlmModule } from './llm/llm.module'; import { SharedModule } from '../shared/shared.module'; @Module({ - imports: [LlmModule, LlmToolboxModule, MemoryModule, PromptModule, SharedModule], + imports: [ + LlmModule, + LlmToolboxModule, + MemoryModule, + PromptModule, + SharedModule, + ], providers: [LlmChainService], exports: [LlmChainService], }) diff --git a/new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts b/new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts index 696a459..cfc7ecb 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.service.spec.ts @@ -36,7 +36,6 @@ describe('LlmChainService', () => { llmChainService = await moduleRef.resolve(LlmChainService); mockedLlmService = await moduleRef.resolve(LlmService); - }); it('should be defined', () => { diff --git a/new-chatbot-app/src/llm-chain/llm-chain.service.ts b/new-chatbot-app/src/llm-chain/llm-chain.service.ts index 9b90d84..41dd496 100644 --- a/new-chatbot-app/src/llm-chain/llm-chain.service.ts +++ b/new-chatbot-app/src/llm-chain/llm-chain.service.ts @@ -52,7 +52,7 @@ export class LlmChainService { } public getTokenUsage(): TokenUsage { - const totalTokenUsageFromMemory = this.memoryService.getTokenUsage() + const totalTokenUsageFromMemory = this.memoryService.getTokenUsage(); const totalTokenUsage = this.tokenUsageService.combineTokenUsage( this.totalLlmTokenUsage, totalTokenUsageFromMemory, diff --git a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts index 5c97f88..0034b24 100644 --- a/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts +++ b/new-chatbot-app/src/llm-chain/llm/openai-api/openai-api.service.ts @@ -65,7 +65,6 @@ export class OpenaiApiService implements LlmInterface { modelResponse = await this.networkService.retryWithMaxAttempts( async (): Promise => { - const chatResponse = await this.openai.chat.completions.create({ model: modelName as string, temperature: temperature, diff --git a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts index cb6da72..300f936 100644 --- a/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts +++ b/new-chatbot-app/src/llm-chain/prompt/conversation-summarization-prompt/conversation-summarization-prompt.service.ts @@ -1,7 +1,7 @@ import { Injectable, Scope } from '@nestjs/common'; import { Prompt } from '../prompt.interface'; -@Injectable({scope: Scope.TRANSIENT}) +@Injectable({ scope: Scope.TRANSIENT }) export class ConversationSummarizationPromptService implements Prompt { private modelDescription: string; private conversation: string = ''; diff --git a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts index 8269145..577cba2 100644 --- a/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts +++ b/new-chatbot-app/src/shared/services/token-usage/token-usage.service.spec.ts @@ -37,7 +37,7 @@ describe('TokenUsageService', () => { prompt_tokens: 40, }, id: 'someID', - object: "chat.completion", + object: 'chat.completion', created: 1, }; const expectedTokenUsageResult: TokenUsage = { From 436b930a8e6f05c564cad4136344c8a4dc8f45c0 Mon Sep 17 00:00:00 2001 From: dominhnhut01 Date: Tue, 12 Mar 2024 15:57:51 -0400 Subject: [PATCH 14/14] small fix and added unit test for llm-connection-gateway --- .../src/gateway/chat/chat.gateway.ts | 2 +- .../connection/llm-connection.gateway.spec.ts | 48 +++++++++++++++++++ .../connection/llm-connection.gateway.ts | 32 ++++++++----- 3 files changed, 70 insertions(+), 12 deletions(-) create mode 100644 new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts diff --git a/new-chatbot-app/src/gateway/chat/chat.gateway.ts b/new-chatbot-app/src/gateway/chat/chat.gateway.ts index b4327c6..8270368 100644 --- a/new-chatbot-app/src/gateway/chat/chat.gateway.ts +++ b/new-chatbot-app/src/gateway/chat/chat.gateway.ts @@ -21,7 +21,7 @@ export class ChatGateway implements OnGatewayDisconnect { @ConnectedSocket() client: Socket, ): Promise { const llmChain: LlmChainService = - await this.llmConnnectionGateway.getLlmChainForCurrentSocket(client); + await this.llmConnnectionGateway.getLlmChainForCurrentSocket(client.id); const modelResponse = await llmChain.getModelResponse(userMessage); client.emit('message', modelResponse); diff --git a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts new file mode 100644 index 0000000..3802948 --- /dev/null +++ b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.spec.ts @@ -0,0 +1,48 @@ +import { Test, TestingModule } from '@nestjs/testing'; +import { LlmConnectionGateway } from './llm-connection.gateway'; +import { GatewayModule } from '../gateway.module'; +import { LlmChainModule } from '../../llm-chain/llm-chain.module'; +import { SharedModule } from '../../shared/shared.module'; + +describe('LlmConnectionGateway', () => { + let gateway: LlmConnectionGateway; + + beforeEach(async () => { + const module: TestingModule = await Test.createTestingModule({ + imports: [GatewayModule, LlmChainModule, SharedModule], + providers: [LlmConnectionGateway], + }).compile(); + + gateway = await module.resolve(LlmConnectionGateway); + }); + it('should be defined', () => { + expect(gateway).toBeDefined(); + }); + + it('should get different LlmChain for different socketId', () => { + const socketId1 = 'id1'; + const socketId2 = 'id2'; + + //Init the LlmChain instance + gateway.getLlmChainForCurrentSocket(socketId1); + gateway.getLlmChainForCurrentSocket(socketId2); + + expect(gateway.getContextId(socketId1)).not.toEqual( + gateway.getContextId(socketId2), + ); + expect(gateway.getContextId(socketId1)).toEqual( + gateway.getContextId(socketId1), + ); + expect(gateway.getContextId(socketId2)).toEqual( + gateway.getContextId(socketId2), + ); + }); + + it('should remove the pointer to the instance', async () => { + const socketId1 = 'id1'; + expect(await gateway.getLlmChainForCurrentSocket(socketId1)).toBeTruthy(); + expect(gateway.getContextId(socketId1)).toBeTruthy(); + gateway.closeLlmChainForCurrentSocket(socketId1); + expect(gateway.getContextId(socketId1)).toBeFalsy(); + }); +}); diff --git a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts index 93af2ea..622cae2 100644 --- a/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts +++ b/new-chatbot-app/src/gateway/connection/llm-connection.gateway.ts @@ -1,13 +1,12 @@ import { Injectable } from '@nestjs/common'; import { ContextId, ContextIdFactory, ModuleRef } from '@nestjs/core'; import { LlmChainService } from '../../llm-chain/llm-chain.service'; - -import { Socket } from 'socket.io'; +import { RetrieveEnvironmentVariablesService } from '../../shared/services/retrieve-environment-variables/retrieve-environment-variables.service'; @Injectable() export class LlmConnectionGateway { - private socketContextIdMapping: Map = new Map< - Socket, + private socketIdToContextIdMapping: Map = new Map< + string, ContextId >(); constructor(private readonly moduleRef: ModuleRef) {} @@ -17,19 +16,30 @@ export class LlmConnectionGateway { * @param socket * @returns */ - public getLlmChainForCurrentSocket(socket: Socket): Promise { - if (!this.socketContextIdMapping.has(socket)) { + public async getLlmChainForCurrentSocket( + socketId: string, + ): Promise { + if (!this.socketIdToContextIdMapping.has(socketId)) { const contextId = ContextIdFactory.create(); this.moduleRef.registerRequestByContextId(LlmChainService, contextId); - this.socketContextIdMapping.set(socket, contextId); + this.socketIdToContextIdMapping.set(socketId, contextId); } - const contextId: ContextId = this.socketContextIdMapping.get(socket)!; - return this.moduleRef.resolve(LlmChainService, contextId, { + const contextId: ContextId = this.socketIdToContextIdMapping.get(socketId)!; + return await this.moduleRef.resolve(LlmChainService, contextId, { strict: false, }); } - public closeLlmChainForCurrentSocket(socket: Socket) { - this.socketContextIdMapping.delete(socket); + /** + * + * @param socketId + * @returns string if there exists socketId, else return undefined + */ + public getContextId(socketId: string): ContextId | undefined { + return this.socketIdToContextIdMapping.get(socketId); + } + + public closeLlmChainForCurrentSocket(socketId: string): void { + this.socketIdToContextIdMapping.delete(socketId); } }