Skip to content

Commit

Permalink
Merge branch 'master' into 7569-add-ai-models-assistant-configs-on-pl…
Browse files Browse the repository at this point in the history
…anet-creation
  • Loading branch information
Mutugiii committed Sep 30, 2024
2 parents e6e3144 + 3e5ae00 commit 9b5006e
Show file tree
Hide file tree
Showing 21 changed files with 411 additions and 109 deletions.
42 changes: 41 additions & 1 deletion chatapi/readme.md → chatapi/README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,41 @@
## Couchdb Notes

Ensure you have set the following configs in the `configurations` database in couchdb:

```
"keys": {
"openai": "sk-mm",
"perplexity": "pplx-21",
"gemini": "AIza"
},
"models": {
"openai": "gpt-3.5-turbo",
"perplexity": "llama-3-sonar-small-32k-online",
"gemini": "gemini-pro"
},
"assistant": {
"name": "Planet Context",
"instructions": "You are a brainstorming manager for Open Learning Exchange (OLE) - https://ole.org/, you have specialised knowledge in Planet(web app) and myPlanet(mobile app) applications developed by OLE. You are designed to generate innovative ideas and provide suggestions and help the community members so as to ensure OLE's mission of empowering communities. Emphasize on terms like 'learning,' 'learner,' 'coach,' 'leader,' 'community,' 'power,' 'team,' and 'enterprises,' and avoids overly technical jargon. You are to embody OLE's ethos of self-reliance, mentoring, and community leadership, steering clear of concepts that contradict these values. Communicates in a formal tone, treating users with respect and professionalism, and maintaining a supportive, solution-oriented approach. Ask for clarifications when necessary to ensure contributions are accurate and relevant, and always encourages community-focused, empowering brainstorming."
}
```

Note: This applies for both production and development environments.

## Development Notes
For development environment add a .env file in the `chatapi` directory

Add the following configs in the .env file:
```
SERVE_PORT=5000
COUCHDB_HOST=http://localhost:2200
COUCHDB_USER=username
COUCHDB_PASS=password
```

In the production environment these configs are set in the `planet.yml` file.

## API Overview

### HTTP requests

##### GET /
Expand All @@ -23,7 +61,8 @@
"data": {
"user": "admin",
"content": "Hello",
"assistant": true,
"assistant": false,
"context": "",
"aiProvider": {
"name": "openai",
"model"?: "gpt-3.5-turbo",
Expand All @@ -38,6 +77,7 @@
- **user**: string(required) -> Provide the planet/myPlanet username
- **content**: string(required) -> The latest prompt for the AI to answer
- **assistant**: boolean(required) -> Set to true if you want to use the assistants endpoint
- **context**: string(optional) -> The text context you would like to pre-load the AI Assistant with
- **aiProvider**: Object(required)
- **name**: string(required) -> Name of the API provider to choose from i.e openai, perplexity or gemini.
- **model**: string(optional) -> Name of the specific provider model to use. Defaults to gpt-3.5-turbo for _openai_, llama-3-sonar-small-32k-online for _peplexity_ and gemini-pro for _google gemini_
Expand Down
80 changes: 56 additions & 24 deletions chatapi/src/config/ai-providers.config.ts
Original file line number Diff line number Diff line change
@@ -1,28 +1,18 @@
import { GoogleGenerativeAI } from '@google/generative-ai';
import OpenAI from 'openai';
import dotenv from 'dotenv';

import { configurationDB } from './nano.config';
import { ModelConfigDocument } from '../models/ai-providers.model';
import { ModelsDocument } from '../models/ai-providers.model';

dotenv.config();
let keys: Record<string, any> = {};
let models: Record<string, any> = {};
let assistant: Record<string, any> = {};

const gemini = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || '');

const openai = new OpenAI({
'apiKey': process.env.OPENAI_API_KEY || '',
});

const perplexity = new OpenAI({
'apiKey': process.env.PERPLEXITY_API_KEY || '',
'baseURL': 'https://api.perplexity.ai',
});

async function getModelsConfig() {
async function getConfig(): Promise<ModelsDocument | undefined> {
try {
const allDocs = await configurationDB.list({ 'include_docs': true });
if (allDocs.rows.length > 0) {
const doc = allDocs.rows[0].doc;
const doc = allDocs.rows[0].doc as unknown as ModelsDocument;
return doc;
} else {
throw new Error('No documents found in configurationDB');
Expand All @@ -34,18 +24,60 @@ async function getModelsConfig() {

const initializeProviders = async () => {
try {
const doc = await getModelsConfig() as ModelConfigDocument | undefined;
if (!doc || !doc.modelsConfig) {
throw new Error('Models configuration not found');
const doc = await getConfig();
if (!doc || !doc.keys) {
throw new Error('API Keys configuration not found');
}
return {
'openai': { 'ai': openai, 'defaultModel': doc.modelsConfig.openai || 'gpt-3.5-turbo' },
'perplexity': { 'ai': perplexity, 'defaultModel': doc.modelsConfig.perplexity || 'llama-3.1-sonar-huge-128k-online ' },
'gemini': { 'ai': gemini, 'defaultModel': doc.modelsConfig.gemini || 'gemini-pro' },
keys = {
'openai': new OpenAI({
'apiKey': doc.keys.openai || '',
}),
'perplexity': new OpenAI({
'apiKey': doc.keys.perplexity || '',
'baseURL': 'https://api.perplexity.ai',
}),
'gemini': new GoogleGenerativeAI(doc.keys.gemini || '')
};
} catch (error: any) {
throw new Error(`Error initializing providers: ${error.message}`);
}
};

export { openai, perplexity, gemini, getModelsConfig, initializeProviders };
const getModels = async () => {
try {
const doc = await getConfig();
if (!doc || !doc.models) {
throw new Error('Models configuration not found');
}
models = {
'openai': { 'ai': keys.openai, 'defaultModel': doc.models.openai || 'gpt-3.5-turbo' },
'perplexity': { 'ai': keys.perplexity, 'defaultModel': doc.models.perplexity || 'llama-3-sonar-small-32k-online' },
'gemini': { 'ai': keys.gemini, 'defaultModel': doc.models.gemini || 'gemini-pro' },
};
} catch (error: any) {
throw new Error(`Error getting provider models: ${error.message}`);
}
};

const getAssistant = async () => {
try {
const doc = await getConfig();
if (!doc || !doc.assistant) {
throw new Error('Assistant configuration not found');
}
assistant = {
'name': doc.assistant.name,
'instructions': doc.assistant.instructions,
};
} catch (error: any) {
throw new Error(`Error getting assistant configs: ${error.message}`);
}
};

(async () => {
await initializeProviders();
await getModels();
await getAssistant();
})();

export { keys, models, assistant };
10 changes: 4 additions & 6 deletions chatapi/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@ import http from 'http';
import WebSocket from 'ws';

import { chat, chatNoSave } from './services/chat.service';
import { getModelsConfig } from './config/ai-providers.config';
import { ModelConfigDocument } from './models/ai-providers.model';
import { keys } from './config/ai-providers.config';

dotenv.config();

Expand Down Expand Up @@ -86,11 +85,10 @@ app.post('/', async (req: any, res: any) => {
});

app.get('/checkproviders', async (req: any, res: any) => {
const doc = await getModelsConfig() as ModelConfigDocument | undefined;
res.status(200).json({
'openai': doc?.modelsConfig.openai ? true : false,
'perplexity': doc?.modelsConfig.perplexity ? true : false,
'gemini': doc?.modelsConfig.gemini ? true : false
'openai': keys.openai.apiKey ? true : false,
'perplexity': keys.perplexity.apiKey ? true : false,
'gemini': keys.gemini.apiKey ? true : false
});
});

Expand Down
13 changes: 10 additions & 3 deletions chatapi/src/models/ai-providers.model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,19 @@ export interface AIProvider {
model?: string;
}

interface ModelsConfig {
interface Assistant {
name: string;
instructions: string;
}

interface Providers {
openai?: string;
perplexity?: string;
gemini?: string;
}

export interface ModelConfigDocument {
modelsConfig: ModelsConfig;
export interface ModelsDocument {
models: Providers;
keys: Providers;
assistant?: Assistant;
}
30 changes: 14 additions & 16 deletions chatapi/src/utils/chat-assistant.utils.ts
Original file line number Diff line number Diff line change
@@ -1,28 +1,26 @@
import { openai } from '../config/ai-providers.config';
import dotenv from 'dotenv';

dotenv.config();
import { keys } from '../config/ai-providers.config';
import { assistant } from '../config/ai-providers.config';

/**
* Creates an assistant with the specified model
* @param model - Model to use for assistant
* @returns Assistant object
*/
export async function createAssistant(model: string) {
return await openai.beta.assistants.create({
'name': process.env.ASSISTANT_NAME,
'instructions': process.env.ASSISTANT_INSTRUCTIONS,
return await keys.openai.beta.assistants.create({
'name': assistant?.name,
'instructions': assistant?.instructions,
'tools': [{ 'type': 'code_interpreter' }],
model,
});
}

export async function createThread() {
return await openai.beta.threads.create();
return await keys.openai.beta.threads.create();
}

export async function addToThread(threadId: any, message: string) {
return await openai.beta.threads.messages.create(
return await keys.openai.beta.threads.messages.create(
threadId,
{
'role': 'user',
Expand All @@ -32,7 +30,7 @@ export async function addToThread(threadId: any, message: string) {
}

export async function createRun(threadID: any, assistantID: any, instructions?: string) {
return await openai.beta.threads.runs.create(
return await keys.openai.beta.threads.runs.create(
threadID,
{
'assistant_id': assistantID,
Expand All @@ -42,16 +40,16 @@ export async function createRun(threadID: any, assistantID: any, instructions?:
}

export async function waitForRunCompletion(threadId: any, runId: any) {
let runStatus = await openai.beta.threads.runs.retrieve(threadId, runId);
let runStatus = await keys.openai.beta.threads.runs.retrieve(threadId, runId);
while (runStatus.status !== 'completed') {
await new Promise((resolve) => setTimeout(resolve, 1000));
runStatus = await openai.beta.threads.runs.retrieve(threadId, runId);
runStatus = await keys.openai.beta.threads.runs.retrieve(threadId, runId);
}
return runStatus;
}

export async function retrieveResponse(threadId: any): Promise<string> {
const messages = await openai.beta.threads.messages.list(threadId);
const messages = await keys.openai.beta.threads.messages.list(threadId);
for (const msg of messages.data) {
if ('text' in msg.content[0] && msg.role === 'assistant') {
return msg.content[0].text.value;
Expand All @@ -67,18 +65,18 @@ export async function createAndHandleRunWithStreaming(
let completionText = '';

return new Promise((resolve, reject) => {
openai.beta.threads.runs.stream(threadID, {
keys.openai.beta.threads.runs.stream(threadID, {
'assistant_id': assistantID
})
.on('textDelta', (textDelta) => {
.on('textDelta', (textDelta: { value: string }) => {
if (textDelta && textDelta.value) {
completionText += textDelta.value;
if (callback) {
callback(textDelta.value);
}
}
})
.on('toolCallDelta', (toolCallDelta) => {
.on('toolCallDelta', (toolCallDelta: { type: string; code_interpreter: { input: string; outputs: any[] } } ) => {
if (toolCallDelta.type === 'code_interpreter') {
if (toolCallDelta && toolCallDelta.code_interpreter && toolCallDelta.code_interpreter.input) {
completionText += toolCallDelta.code_interpreter.input;
Expand Down
20 changes: 9 additions & 11 deletions chatapi/src/utils/chat-helpers.utils.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { gemini } from '../config/ai-providers.config';
import { initializeProviders } from '../config/ai-providers.config';
import { keys } from '../config/ai-providers.config';
import { models } from '../config/ai-providers.config';
import { AIProvider } from '../models/ai-providers.model';
import { ChatMessage, GeminiMessage } from '../models/chat-message.model';
import { Attachment } from '../models/db-doc.model';
Expand All @@ -15,7 +15,7 @@ import {
} from './chat-assistant.utils';
import { extractTextFromDocument } from './text-extraction.utils';

const getProviders = async () => await initializeProviders();
// const getProviders = async () => await initializeProviderModels();

/**
* Uses geminis's multimodal endpoint to generate chat completions
Expand All @@ -27,7 +27,7 @@ async function handleGemini(
messages: ChatMessage[],
model: string
): Promise<string> {
const geminiModel = gemini.getGenerativeModel({ model });
const geminiModel = keys.gemini.getGenerativeModel({ model });

const msg = messages[messages.length - 1].content;

Expand Down Expand Up @@ -65,8 +65,7 @@ export async function aiChatStream(
assistant: boolean,
callback?: (response: string) => void
): Promise<string> {
const providers = await getProviders();
const provider = providers[aiProvider.name];
const provider = models[aiProvider.name];
if (!provider) {
throw new Error('Unsupported AI provider');
}
Expand All @@ -84,7 +83,7 @@ export async function aiChatStream(

return completionText;
} catch (error) {
throw new Error('Error processing request');
throw new Error(`Error processing request ${error}`);
}
}

Expand Down Expand Up @@ -125,10 +124,9 @@ export async function aiChatNonStream(
messages: ChatMessage[],
aiProvider: AIProvider,
assistant: boolean,
context: any,
context: any = '',
): Promise<string> {
const providers = await getProviders();
const provider = providers[aiProvider.name];
const provider = models[aiProvider.name];
if (!provider) {
throw new Error('Unsupported AI provider');
}
Expand Down Expand Up @@ -157,7 +155,7 @@ export async function aiChatNonStream(

return await retrieveResponse(thread.id);
} catch (error) {
return 'Error processing request';
return `Error processing request ${error}`;
}
}

Expand Down
3 changes: 0 additions & 3 deletions docker/chat.env.example

This file was deleted.

6 changes: 3 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
{
"name": "planet",
"license": "AGPL-3.0",
"version": "0.14.74",
"version": "0.14.80",
"myplanet": {
"latest": "v0.19.21",
"min": "v0.18.21"
"latest": "v0.19.65",
"min": "v0.18.65"
},
"scripts": {
"ng": "ng",
Expand Down
Loading

0 comments on commit 9b5006e

Please sign in to comment.