From 74462cb06628cf80adb71a95cb2bcf90e9f73c9a Mon Sep 17 00:00:00 2001 From: Saif Ali Shaik Date: Wed, 8 May 2024 11:57:02 +0530 Subject: [PATCH] feat: tests for newer models in anyscale and openai --- jest.config.ts | 3 ++ tests/chat/anyscale.test.ts | 72 ++++++++++++++++++++++++++++++++++++- tests/chat/openai.test.ts | 7 ++++ 3 files changed, 81 insertions(+), 1 deletion(-) diff --git a/jest.config.ts b/jest.config.ts index 6547888..d944209 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -57,6 +57,9 @@ const config: Config = { // "enableGlobally": false // }, + // Default timeout of a test in milliseconds + // testTimeout: 20000, + // Force coverage collection from ignored files using an array of glob patterns // forceCoverageMatch: [], diff --git a/tests/chat/anyscale.test.ts b/tests/chat/anyscale.test.ts index 3ba056e..a088f4b 100644 --- a/tests/chat/anyscale.test.ts +++ b/tests/chat/anyscale.test.ts @@ -30,7 +30,11 @@ describe('Anyscale ChatCompletions APIs', () => { }); test('model: codellama/CodeLlama-34b-Instruct-hf', async () => { - const completion = await client.chat.completions.create({ model: 'codellama/CodeLlama-34b-Instruct-hf', messages: [{ "role": "user", "content": "Say this is a test" }] }); + const completion = await client.chat.completions.create({ + model: 'codellama/CodeLlama-34b-Instruct-hf', + messages: [{ "role": "user", "content": "Say this is a test" }], + max_tokens: 30 + }); expect(completion).toBeDefined(); expect(completion.choices).toBeDefined(); expect(completion.choices.length).toBeGreaterThan(0); @@ -42,4 +46,70 @@ describe('Anyscale ChatCompletions APIs', () => { expect(completion.choices).toBeDefined(); expect(completion.choices.length).toBeGreaterThan(0); }); + + test('model: google/gemma-7b-it', async () => { + const completion = await client.chat.completions.create({ + model: 'google/gemma-7b-it', + messages: [{ "role": "user", "content": "Say this is a test" }], + max_tokens: 25 + }); + expect(completion).toBeDefined(); + expect(completion.choices).toBeDefined(); + expect(completion.choices.length).toBeGreaterThan(0); + }); + + test('model: meta-llama/Meta-Llama-3-8B-Instruct', async () => { + const completion = await client.chat.completions.create({ + model: 'meta-llama/Meta-Llama-3-8B-Instruct', + messages: [{ "role": "user", "content": "Say this is a test" }], + max_tokens: 25 + }); + expect(completion).toBeDefined(); + expect(completion.choices).toBeDefined(); + expect(completion.choices.length).toBeGreaterThan(0); + }); + + test('model: meta-llama/Meta-Llama-3-70B-Instruct', async () => { + const completion = await client.chat.completions.create({ + model: 'meta-llama/Meta-Llama-3-70B-Instruct', + messages: [{ role: 'user', content: 'Say this is a test' }], + max_tokens: 25, + }); + expect(completion).toBeDefined(); + expect(completion.choices).toBeDefined(); + expect(completion.choices.length).toBeGreaterThan(0); + }); + + test('model: mistralai/Mixtral-8x7B-Instruct-v0.1', async () => { + const completion = await client.chat.completions.create({ + model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', + messages: [{ role: 'user', content: 'Say this is a test' }], + max_tokens: 25, + }); + expect(completion).toBeDefined(); + expect(completion.choices).toBeDefined(); + expect(completion.choices.length).toBeGreaterThan(0); + }); + + test('model: mistralai/Mixtral-8x22B-Instruct-v0.1', async () => { + const completion = await client.chat.completions.create({ + model: 'mistralai/Mixtral-8x22B-Instruct-v0.1', + messages: [{ role: 'user', content: 'Say this is a test' }], + max_tokens: 25, + }); + expect(completion).toBeDefined(); + expect(completion.choices).toBeDefined(); + expect(completion.choices.length).toBeGreaterThan(0); + }); + + test('model: mlabonne/NeuralHermes-2.5-Mistral-7B', async () => { + const completion = await client.chat.completions.create({ + model: 'mlabonne/NeuralHermes-2.5-Mistral-7B', + messages: [{ role: 'user', content: 'Say this is a test' }], + max_tokens: 25, + }); + expect(completion).toBeDefined(); + expect(completion.choices).toBeDefined(); + expect(completion.choices.length).toBeGreaterThan(0); + }); }); \ No newline at end of file diff --git a/tests/chat/openai.test.ts b/tests/chat/openai.test.ts index 20a7120..1d71de5 100644 --- a/tests/chat/openai.test.ts +++ b/tests/chat/openai.test.ts @@ -70,4 +70,11 @@ describe('Openai ChatCompletions APIs', () => { expect(completion.choices).toBeDefined(); expect(completion.choices.length).toBeGreaterThan(0); }); + + test('model: gpt-4-turbo-2024-04-09', async () => { + const completion = await client.chat.completions.create({ model: 'gpt-4-turbo-2024-04-09', messages: [{ "role": "user", "content": "Say this is a test" }] }); + expect(completion).toBeDefined(); + expect(completion.choices).toBeDefined(); + expect(completion.choices.length).toBeGreaterThan(0); + }); }); \ No newline at end of file