Skip to content

Commit

Permalink
models
Browse files Browse the repository at this point in the history
  • Loading branch information
Zaki-1052 committed Jul 23, 2024
1 parent 58410f7 commit 62613ba
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 7 deletions.
13 changes: 8 additions & 5 deletions public/portal.html
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@
<button id="model-claude-3.5-sonnet" data-value="claude-3-5-sonnet-20240620">Claude 3.5 Sonnet</button>
<button id="model-gpt-4o" data-value="gpt-4o">GPT-4o: Latest</button>
<button id="model-gpt-4o-mini" data-value="gpt-4o-mini">GPT-4o Mini: Cheapest</button>
<button id="model-llama-3.1-405b" data-value="llama-3.1-405b-reasoning">Llama3.1 405bb</button>
<button id="model-gpt-4-turbo" data-value="gpt-4-turbo-">GPT-4-Turbo</button>
<button id="model-gpt-3.5" data-value="gpt-3.5-turbo-0125">GPT-3.5-Turbo</button>
<button id="model-claude-haiku" data-value="claude-3-haiku-20240307">Claude Haiku</button>
<button id="model-gemini-1.5-pro" data-value="gemini-1.5-pro">Gemini-1.5 Pro</button>
<button id="model-llama-3.1-405b" data-value="llama-3.1-405b-reasoning">Llama3.1 405b: New</button>
<button id="model-gpt-4-turbo" data-value="gpt-4-turbo-">GPT-4-Turbo: Standard</button>
<button id="model-gpt-3.5" data-value="gpt-3.5-turbo-0125">GPT-3.5-Turbo: Old</button>
<button id="model-claude-haiku" data-value="claude-3-haiku-20240307">Claude Haiku: Cheap</button>
<button id="model-gemini-1.5-pro" data-value="gemini-1.5-pro">Gemini-1.5 Pro: Best</button>
<button id="model-llama-3.1-70b" data-value="llama-3.1-70b-versatile">Llama3.1 70b</button>
<button id="model-llama-3.1-8b" data-value="llama-3.1-8b-instant">Llama3.1 8b</button>
<button id="model-mistral-large" data-value="mistral-large-latest">Mistral Large</button>
Expand Down Expand Up @@ -65,6 +65,9 @@
<button id="model-gemini-ultra" data-value="gemini-1.0-ultra">Gemini Ultra</button>
<!-- Add other models as options -->
<!-- Text Models: Show Tokens per $ -->
<button id="open-router-model-meta-llama-llama-3.1-70b-instruct" data-value="meta-llama/llama-3.1-70b-instruct">Meta: Llama 3.1 70B Instruct</button>
<button id="open-router-model-meta-llama-llama-3.1-8b-instruct" data-value="meta-llama/llama-3.1-8b-instruct">Meta: Llama 3.1 8B Instruct</button>
<button id="open-router-model-meta-llama-llama-3.1-405b-instruct" data-value="meta-llama/llama-3.1-405b-instruct">Meta: Llama 3.1 405B Instruct</button>
<button id="open-router-model-cognitivecomputations-dolphin-llama-3-70b" data-value="cognitivecomputations/dolphin-llama-3-70b">Dolphin Llama 3 70B 🐬</button>
<button id="open-router-model-mistralai-codestral-mamba" data-value="mistralai/codestral-mamba">Mistral: Codestral Mamba</button>
<button id="open-router-model-mistralai-mistral-nemo" data-value="mistralai/mistral-nemo">Mistral: Mistral Nemo</button>
Expand Down
21 changes: 19 additions & 2 deletions public/script.js
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ fetchConfig();
"Llama 3.1 70B": "llama-3.1-70b-versatile",
"Llama 3.1 405B": "llama-3.1-405b-reasoning",
// Open Router Models
"Meta: Llama 3.1 70B Instruct": "meta-llama/llama-3.1-70b-instruct",
"Meta: Llama 3.1 8B Instruct": "meta-llama/llama-3.1-8b-instruct",
"Meta: Llama 3.1 405B Instruct": "meta-llama/llama-3.1-405b-instruct",
"Dolphin Llama 3 70B \ud83d\udc2c": "cognitivecomputations/dolphin-llama-3-70b",
"Mistral: Codestral Mamba": "mistralai/codestral-mamba",
"Mistral: Mistral Nemo": "mistralai/mistral-nemo",
Expand Down Expand Up @@ -283,6 +286,9 @@ fetchConfig();
"llama-3.1-70b-versatile": "Llama 3.1 70B",
"llama-3.1-405b-reasoning": "Llama 3.1 405B",
// Open Router Models
"meta-llama/llama-3.1-70b-instruct": "Meta: Llama 3.1 70B Instruct",
"meta-llama/llama-3.1-8b-instruct": "Meta: Llama 3.1 8B Instruct",
"meta-llama/llama-3.1-405b-instruct": "Meta: Llama 3.1 405B Instruct",
"cognitivecomputations/dolphin-llama-3-70b": "Dolphin Llama 3 70B \ud83d\udc2c",
"mistralai/codestral-mamba": "Mistral: Codestral Mamba",
"mistralai/mistral-nemo": "Mistral: Mistral Nemo",
Expand Down Expand Up @@ -618,15 +624,15 @@ const selectedModelDisplayName = document.getElementById('selected-model').textC
"gpt-4o": "GPT-4o: Latest OpenAI Intelligent Model",
"gpt-4-32k": "GPT-4-32k: Longer Context Window — Higher Price",
"gpt-4-turbo": "GPT-4-Turbo: ChatGPT-Plus Model — 128k Tokens",
"gpt-3.5-turbo-0125": "GPT-3.5-Turbo: Cheapest Option Available",
"gpt-3.5-turbo-0125": "GPT-3.5-Turbo: Older Cheap Option",
"claude-3-5-sonnet-20240620": "Most Advanced Anthropic Model",
"gemini-pro": "Gemini-Pro: Google Bard Model — 3.5 Equivalent",
"gemini-pro-vision": "Gemini-Vision: View Images — One-Time Use",
"gemini-1.5-pro": "Gemini-Pro-1.5: Best Gemini Model — 2 Million Tokens",
"gemini-1.5-flash": "Gemini-Flash-1.5: Fastest & Cheapest Google Model",
"gemini-1.0-ultra": "Gemini-Ultra: Largest Google Model — Unreleased",
"claude-3-opus-20240229": "Claude-Opus: Very Powerful — GPT-4 Level",
"claude-3-sonnet-20240229": "Claude-Sonnet: Hard-Working — 3.5 Level",
"claude-3-sonnet-20240229": "Claude-Sonnet: Hard-Working — Turbo Level",
"claude-3-haiku-20240307": "Claude-Haiku: Light, Cheap, & Fast — New",
"claude-2.1": "Claude-2.1: Best Instant Model — 200k Tokens",
"claude-2.0": "Claude-2.0: Average Cheap Model — 100k Tokens",
Expand All @@ -644,7 +650,12 @@ const selectedModelDisplayName = document.getElementById('selected-model').textC
"open-codestral-mamba": "Codestral Mamba: A Mamba 2 language model specialized in code generation",
"mathstral-temp-id": "Mathstral: A math-specific 7B model designed for math reasoning and scientific tasks",
"open-mistral-nemo": "Mistral NeMo: A 12B model built in partnership with Nvidia, easy to use and a drop-in replacement for Mistral 7B",
"gpt-4o-mini": "GPT-4o-Mini: Small, fast, and cheap model from OpenAI with relatively high intelligence.",
"mixtral-8x7b-32768": "Qroq API: Free Mixtral 8x7b",
// open router models
"meta-llama/llama-3.1-70b-instruct": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
"meta-llama/llama-3.1-8b-instruct": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
"meta-llama/llama-3.1-405b-instruct": "The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.\n\nMeta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.\n\nIt has demonstrated strong performance compared to leading closed-source models in human evaluations.\n\nTo read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
"cognitivecomputations/dolphin-llama-3-70b": "Dolphin 2.9 is designed for instruction following, conversational, and coding. This model is a fine-tune of [Llama 3 70B](/models/meta-llama/llama-3-70b-instruct). It demonstrates improvements in instruction, conversation, coding, and function calling abilities, when compared to the original.\n\nUncensored and is stripped of alignment and bias, it requires an external alignment layer for ethical use. Users are cautioned to use this highly compliant model responsibly, as detailed in a blog post about uncensored models at [erichartford.com/uncensored-models](https://erichartford.com/uncensored-models).\n\nUsage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
"mistralai/codestral-mamba": "A 7.3B parameter Mamba-based model designed for code and reasoning tasks.\n\n- Linear time inference, allowing for theoretically infinite sequence lengths\n- 256k token context window\n- Optimized for quick responses, especially beneficial for code productivity\n- Performs comparably to state-of-the-art transformer models in code and reasoning tasks\n- Available under the Apache 2.0 license for free use, modification, and distribution",
"mistralai/mistral-nemo": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.\n\nThe model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.\n\nIt supports function calling and is released under the Apache 2.0 license.",
Expand Down Expand Up @@ -999,6 +1010,9 @@ document.getElementById('model-claude-3.5-sonnet').addEventListener('mouseover',
// select open router models lol

// Event listeners for additional models
document.getElementById('open-router-model-meta-llama-llama-3.1-70b-instruct').addEventListener('click', () => selectModel('meta-llama/llama-3.1-70b-instruct'));
document.getElementById('open-router-model-meta-llama-llama-3.1-8b-instruct').addEventListener('click', () => selectModel('meta-llama/llama-3.1-8b-instruct'));
document.getElementById('open-router-model-meta-llama-llama-3.1-405b-instruct').addEventListener('click', () => selectModel('meta-llama/llama-3.1-405b-instruct'));
document.getElementById('open-router-model-cognitivecomputations-dolphin-llama-3-70b').addEventListener('click', () => selectModel('cognitivecomputations/dolphin-llama-3-70b'));
document.getElementById('open-router-model-mistralai-codestral-mamba').addEventListener('click', () => selectModel('mistralai/codestral-mamba'));
document.getElementById('open-router-model-mistralai-mistral-nemo').addEventListener('click', () => selectModel('mistralai/mistral-nemo'));
Expand Down Expand Up @@ -1175,6 +1189,9 @@ document.getElementById('open-router-model-openai-gpt-3.5-turbo-0125').addEventL
document.getElementById('open-router-model-openai-gpt-3.5-turbo').addEventListener('click', () => selectModel('openai/gpt-3.5-turbo'));

// event listeners for descrptions
document.getElementById('open-router-model-meta-llama-llama-3.1-70b-instruct').addEventListener('mouseover', (event) => showCustomTooltip(modelDescriptions['meta-llama/llama-3.1-70b-instruct'], event.currentTarget));
document.getElementById('open-router-model-meta-llama-llama-3.1-8b-instruct').addEventListener('mouseover', (event) => showCustomTooltip(modelDescriptions['meta-llama/llama-3.1-8b-instruct'], event.currentTarget));
document.getElementById('open-router-model-meta-llama-llama-3.1-405b-instruct').addEventListener('mouseover', (event) => showCustomTooltip(modelDescriptions['meta-llama/llama-3.1-405b-instruct'], event.currentTarget));
document.getElementById('open-router-model-cognitivecomputations-dolphin-llama-3-70b').addEventListener('mouseover', (event) => showCustomTooltip(modelDescriptions['cognitivecomputations/dolphin-llama-3-70b'], event.currentTarget));
document.getElementById('open-router-model-mistralai-codestral-mamba').addEventListener('mouseover', (event) => showCustomTooltip(modelDescriptions['mistralai/codestral-mamba'], event.currentTarget));
document.getElementById('open-router-model-mistralai-mistral-nemo').addEventListener('mouseover', (event) => showCustomTooltip(modelDescriptions['mistralai/mistral-nemo'], event.currentTarget));
Expand Down

0 comments on commit 62613ba

Please sign in to comment.