diff --git a/README.md b/README.md
index 17a1beb..be41b01 100644
--- a/README.md
+++ b/README.md
@@ -47,7 +47,7 @@ View full benchmark results for Rubra models and other models here: https://docs
| [**Rubra Llama-3 8B Instruct**](https://huggingface.co/rubra-ai/Meta-Llama-3-8B-Instruct) | 89.28% | 64.39 | 31.70 | 68.99 | 23.76 | 8.03 |
| [**Rubra Qwen2 7B Instruct**](https://huggingface.co/rubra-ai/Qwen2-7B-Instruct) | 85.71% | 68.88 | 30.36 | 75.82 | 28.72 | 8.08 |
| [**Rubra Mistral 7B Instruct v0.3**](https://huggingface.co/rubra-ai/Mistral-7B-Instruct-v0.3) | 73.57% | 59.12 | 29.91 | 43.29 | 11.14 | 7.69 |
-| [**Rubra Phi-3 Mini 128k Instruct**](https://huggingface.co/rubra-ai/Phi-3-mini-128k-instruct) | 65.71% | 66.66 | 29.24 | 74.09 | 26.84 | 7.45 |
+| [**Rubra Phi-3 Mini 128k Instruct**](https://huggingface.co/rubra-ai/Phi-3-mini-128k-instruct) | 70.00% | 66.66 | 29.24 | 74.09 | 26.84 | 7.45 |
| [**Rubra Mistral 7B Instruct v0.2**](https://huggingface.co/rubra-ai/Mistral-7B-Instruct-v0.2) | 69.28% | 58.90 | 29.91 | 34.12 | 8.36 | 7.36 |
| [**Rubra Gemma-1.1 2B Instruct**](https://huggingface.co/rubra-ai/gemma-1.1-2b-it) | 45.00% | 38.85 | 24.55 | 6.14 | 2.38 | 5.75 |
diff --git a/docs/docs/models/Phi.md b/docs/docs/models/Phi.md
index 4e5c8cd..bf8b762 100644
--- a/docs/docs/models/Phi.md
+++ b/docs/docs/models/Phi.md
@@ -43,32 +43,34 @@ Phi-3 is a state of the art, lightweight model. It performs exceptionally well d
Phi-3 Mini 128k Instruct |
- |
- 68.17 |
- 30.58 |
- 80.44 |
- 28.12 |
- 7.92 |
- 51 |
- 45 |
- 64 |
- 0.31875 |
- 0.28125 |
- 0.51875 |
+ 69.36 |
+ 27.01 |
+ 83.7 |
+ 32.92 |
+ 8.02 |
+ 21 |
+ 72 |
+ 67 |
+ 0.13125 |
+ 0.45000 |
+ 0.340625 |
Rubra Enhanced Phi-3 Mini 128k Instruct |
- 65.71% |
- 66.66 |
- 29.24 |
- 74.09 |
- 26.84 |
- 7.45 |
- 45 |
- 51 |
- 64 |
- 0.28125 |
- 0.31875 |
- 0.48125 |
+ 70.0% |
+ 67.87 |
+ 29.69 |
+ 79.45 |
+ 30.80 |
+ 8.21 |
+ 72 |
+ 21 |
+ 67 |
+ 0.45000 |
+ 0.13125 |
+ 0.659375 |
-
\ No newline at end of file
+
+
+* The above is based on the Phi-3 Mini that was updated by Microsoft in June 2024. The original Phi-3 mini came out in April and the Rubra enhanced model has been trained on both versions
\ No newline at end of file
diff --git a/docs/src/components/BenchmarkTable.js b/docs/src/components/BenchmarkTable.js
index a7432a8..846c633 100644
--- a/docs/src/components/BenchmarkTable.js
+++ b/docs/src/components/BenchmarkTable.js
@@ -117,21 +117,21 @@ const data = [
model: 'Phi-3 Mini 128k Instruct',
params: 3.82,
functionCalling: '-',
- mmlu: '68.17',
- gpqa: '30.58',
- gsm8k: '80.44',
- math: '28.12',
- mtBench: '7.92',
+ mmlu: '69.36',
+ gpqa: '27.01',
+ gsm8k: '83.7',
+ math: '32.92',
+ mtBench: '8.02',
},
{
model: 'Rubra Phi-3 Mini 128k Instruct',
- params: 4.27,
- functionCalling: '65.71%',
- mmlu: '66.66',
- gpqa: '29.24',
- gsm8k: '74.09',
- math: '26.84',
- mtBench: '7.45',
+ params: 4.73,
+ functionCalling: '70.00%',
+ mmlu: '67.87',
+ gpqa: '29.69',
+ gsm8k: '79.45',
+ math: '30.80',
+ mtBench: '8.21',
},
{
model: 'Qwen2-7B-Instruct',