diff --git a/README.md b/README.md index 17a1beb..be41b01 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ View full benchmark results for Rubra models and other models here: https://docs | [**Rubra Llama-3 8B Instruct**](https://huggingface.co/rubra-ai/Meta-Llama-3-8B-Instruct) | 89.28% | 64.39 | 31.70 | 68.99 | 23.76 | 8.03 | | [**Rubra Qwen2 7B Instruct**](https://huggingface.co/rubra-ai/Qwen2-7B-Instruct) | 85.71% | 68.88 | 30.36 | 75.82 | 28.72 | 8.08 | | [**Rubra Mistral 7B Instruct v0.3**](https://huggingface.co/rubra-ai/Mistral-7B-Instruct-v0.3) | 73.57% | 59.12 | 29.91 | 43.29 | 11.14 | 7.69 | -| [**Rubra Phi-3 Mini 128k Instruct**](https://huggingface.co/rubra-ai/Phi-3-mini-128k-instruct) | 65.71% | 66.66 | 29.24 | 74.09 | 26.84 | 7.45 | +| [**Rubra Phi-3 Mini 128k Instruct**](https://huggingface.co/rubra-ai/Phi-3-mini-128k-instruct) | 70.00% | 66.66 | 29.24 | 74.09 | 26.84 | 7.45 | | [**Rubra Mistral 7B Instruct v0.2**](https://huggingface.co/rubra-ai/Mistral-7B-Instruct-v0.2) | 69.28% | 58.90 | 29.91 | 34.12 | 8.36 | 7.36 | | [**Rubra Gemma-1.1 2B Instruct**](https://huggingface.co/rubra-ai/gemma-1.1-2b-it) | 45.00% | 38.85 | 24.55 | 6.14 | 2.38 | 5.75 | diff --git a/docs/src/components/BenchmarkTable.js b/docs/src/components/BenchmarkTable.js index a7432a8..c58e4c8 100644 --- a/docs/src/components/BenchmarkTable.js +++ b/docs/src/components/BenchmarkTable.js @@ -126,7 +126,7 @@ const data = [ { model: 'Rubra Phi-3 Mini 128k Instruct', params: 4.27, - functionCalling: '65.71%', + functionCalling: '70.00%', mmlu: '66.66', gpqa: '29.24', gsm8k: '74.09',