| | --- |
| | license: cc-by-nc-4.0 |
| | language: |
| | - ro |
| | base_model: |
| | - OpenLLM-Ro/RoLlama3-8b-Instruct-2025-04-23 |
| | datasets: |
| | - OpenLLM-Ro/ro_dpo_helpsteer |
| | - OpenLLM-Ro/ro_dpo_ultrafeedback |
| | - OpenLLM-Ro/ro_dpo_magpie |
| | - OpenLLM-Ro/ro_dpo_argilla_magpie |
| | - OpenLLM-Ro/ro_dpo_helpsteer2 |
| | model-index: |
| | - name: OpenLLM-Ro/RoLlama3-8b-Instruct-DPO-2025-04-23 |
| | results: |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: RoMT-Bench |
| | type: RoMT-Bench |
| | metrics: |
| | - name: Score |
| | type: Score |
| | value: 6.67 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: RoCulturaBench |
| | type: RoCulturaBench |
| | metrics: |
| | - name: Score |
| | type: Score |
| | value: 4.83 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: Romanian_Academic_Benchmarks |
| | type: Romanian_Academic_Benchmarks |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 55.86 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_arc_challenge |
| | type: OpenLLM-Ro/ro_arc_challenge |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 52.26 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_mmlu |
| | type: OpenLLM-Ro/ro_mmlu |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 55.35 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_winogrande |
| | type: OpenLLM-Ro/ro_winogrande |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 66.62 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_hellaswag |
| | type: OpenLLM-Ro/ro_hellaswag |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 59.93 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_gsm8k |
| | type: OpenLLM-Ro/ro_gsm8k |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 43.95 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_truthfulqa |
| | type: OpenLLM-Ro/ro_truthfulqa |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 57.06 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_binary |
| | type: LaRoSeDa_binary |
| | metrics: |
| | - name: Average macro-f1 |
| | type: macro-f1 |
| | value: 97.60 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_multiclass |
| | type: LaRoSeDa_multiclass |
| | metrics: |
| | - name: Average macro-f1 |
| | type: macro-f1 |
| | value: 62.16 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_EN-RO |
| | type: WMT_EN-RO |
| | metrics: |
| | - name: Average bleu |
| | type: bleu |
| | value: 18.14 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_RO-EN |
| | type: WMT_RO-EN |
| | metrics: |
| | - name: Average bleu |
| | type: bleu |
| | value: 14.13 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD |
| | type: XQuAD |
| | metrics: |
| | - name: Average exact_match |
| | type: exact_match |
| | value: 30.65 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD |
| | type: XQuAD |
| | metrics: |
| | - name: Average f1 |
| | type: f1 |
| | value: 46.29 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS |
| | type: STS |
| | metrics: |
| | - name: Average spearman |
| | type: spearman |
| | value: 67.62 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS |
| | type: STS |
| | metrics: |
| | - name: Average pearson |
| | type: pearson |
| | value: 67.82 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: RoMT-Bench |
| | type: RoMT-Bench |
| | metrics: |
| | - name: First turn |
| | type: Score |
| | value: 6.81 |
| | - name: Second turn |
| | type: Score |
| | value: 6.54 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_arc_challenge |
| | type: OpenLLM-Ro/ro_arc_challenge |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 48.76 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 49.70 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 52.70 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 54.07 |
| | - name: 10-shot |
| | type: accuracy |
| | value: 53.90 |
| | - name: 25-shot |
| | type: accuracy |
| | value: 54.41 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_mmlu |
| | type: OpenLLM-Ro/ro_mmlu |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 55.78 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 55.09 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 55.39 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 55.15 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_winogrande |
| | type: OpenLLM-Ro/ro_winogrande |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 65.19 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 64.25 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 68.59 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 68.43 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_hellaswag |
| | type: OpenLLM-Ro/ro_hellaswag |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 60.31 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 59.88 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 59.17 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 59.89 |
| | - name: 10-shot |
| | type: accuracy |
| | value: 60.40 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_gsm8k |
| | type: OpenLLM-Ro/ro_gsm8k |
| | metrics: |
| | - name: 1-shot |
| | type: accuracy |
| | value: 32.37 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 46.70 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 52.77 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_binary |
| | type: LaRoSeDa_binary |
| | metrics: |
| | - name: 0-shot |
| | type: macro-f1 |
| | value: 95.79 |
| | - name: 1-shot |
| | type: macro-f1 |
| | value: 97.87 |
| | - name: 3-shot |
| | type: macro-f1 |
| | value: 98.30 |
| | - name: 5-shot |
| | type: macro-f1 |
| | value: 98.43 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_multiclass |
| | type: LaRoSeDa_multiclass |
| | metrics: |
| | - name: 0-shot |
| | type: macro-f1 |
| | value: 64.86 |
| | - name: 1-shot |
| | type: macro-f1 |
| | value: 64.46 |
| | - name: 3-shot |
| | type: macro-f1 |
| | value: 58.36 |
| | - name: 5-shot |
| | type: macro-f1 |
| | value: 60.97 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_EN-RO |
| | type: WMT_EN-RO |
| | metrics: |
| | - name: 0-shot |
| | type: bleu |
| | value: 5.57 |
| | - name: 1-shot |
| | type: bleu |
| | value: 26.05 |
| | - name: 3-shot |
| | type: bleu |
| | value: 24.71 |
| | - name: 5-shot |
| | type: bleu |
| | value: 16.22 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_RO-EN |
| | type: WMT_RO-EN |
| | metrics: |
| | - name: 0-shot |
| | type: bleu |
| | value: 3.01 |
| | - name: 1-shot |
| | type: bleu |
| | value: 22.63 |
| | - name: 3-shot |
| | type: bleu |
| | value: 19.43 |
| | - name: 5-shot |
| | type: bleu |
| | value: 11.47 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD_EM |
| | type: XQuAD_EM |
| | metrics: |
| | - name: 0-shot |
| | type: exact_match |
| | value: 16.55 |
| | - name: 1-shot |
| | type: exact_match |
| | value: 31.76 |
| | - name: 3-shot |
| | type: exact_match |
| | value: 35.97 |
| | - name: 5-shot |
| | type: exact_match |
| | value: 38.32 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD_F1 |
| | type: XQuAD_F1 |
| | metrics: |
| | - name: 0-shot |
| | type: f1 |
| | value: 33.31 |
| | - name: 1-shot |
| | type: f1 |
| | value: 46.85 |
| | - name: 3-shot |
| | type: f1 |
| | value: 50.73 |
| | - name: 5-shot |
| | type: f1 |
| | value: 54.29 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS_Spearman |
| | type: STS_Spearman |
| | metrics: |
| | - name: 1-shot |
| | type: spearman |
| | value: 66.56 |
| | - name: 3-shot |
| | type: spearman |
| | value: 58.64 |
| | - name: 5-shot |
| | type: spearman |
| | value: 77.66 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS_Pearson |
| | type: STS_Pearson |
| | metrics: |
| | - name: 1-shot |
| | type: pearson |
| | value: 70.09 |
| | - name: 3-shot |
| | type: pearson |
| | value: 56.39 |
| | - name: 5-shot |
| | type: pearson |
| | value: 76.97 |
| |
|
| | --- |
| | |
| | # Model Card for Model ID |
| |
|
| | *Built with Meta Llama 3* |
| |
|
| | This model points/is identical to [RoLlama3-8b-Instruct-DPO-2025-04-23](https://huggingface.co/OpenLLM-Ro/RoLlama3-8b-Instruct-DPO-2025-04-23). |
| |
|
| | RoLlama3 is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **human aligned instruct 8B model**. Links to other models can be found at the bottom of this page. |
| |
|
| | ## Model Details |
| |
|
| | ### Model Description |
| |
|
| | <!-- Provide a longer summary of what this model is. --> |
| | OpenLLM represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants. |
| |
|
| |
|
| | - **Developed by:** OpenLLM-Ro |
| | <!-- - **Funded by [optional]:** [More Information Needed] --> |
| | <!-- - **Shared by [optional]:** [More Information Needed] --> |
| | <!-- - **Model type:** [More Information Needed] --> |
| | - **Language(s):** Romanian |
| | - **License:** cc-by-nc-4.0 |
| | - **Finetuned from model:** [RoLlama3-8b-Instruct-2025-04-23](https://huggingface.co/OpenLLM-Ro/RoLlama3-8b-Instruct-2025-04-23) |
| | - **Trained using:** [RoHelpSteer](https://huggingface.co/datasets/OpenLLM-Ro/ro_dpo_helpsteer), [RoUltraFeedback](https://huggingface.co/datasets/OpenLLM-Ro/ro_dpo_ultrafeedback), [RoMagpieDPO](https://huggingface.co/datasets/OpenLLM-Ro/ro_dpo_magpie), [RoArgillaMagpie](https://huggingface.co/datasets/OpenLLM-Ro/ro_dpo_argilla_magpie), [RoHelpSteer2](https://huggingface.co/datasets/OpenLLM-Ro/ro_dpo_helpsteer2) |
| |
|
| |
|
| | ### Model Sources |
| |
|
| | <!-- Provide the basic links for the model. --> |
| |
|
| | - **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory |
| | - **Paper:** https://arxiv.org/abs/2406.18266 |
| |
|
| | ## Intended Use |
| |
|
| | ### Intended Use Cases |
| |
|
| | RoLlama3 is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat. |
| |
|
| | ### Out-of-Scope Use |
| |
|
| | <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> |
| |
|
| | Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian. |
| |
|
| |
|
| |
|
| | ## How to Get Started with the Model |
| |
|
| | Use the code below to get started with the model. |
| |
|
| | ```python |
| | from transformers import AutoTokenizer, AutoModelForCausalLM |
| | |
| | tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoLlama3-8b-Instruct-DPO") |
| | model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoLlama3-8b-Instruct-DPO") |
| | |
| | instruction = "Care este cel mai înalt vârf muntos din România?" |
| | chat = [ |
| | {"role": "system", "content": "Ești un asistent folositor, respectuos și onest. Încearcă să ajuți cât mai mult prin informațiile oferite, excluzând răspunsuri toxice, rasiste, sexiste, periculoase și ilegale."}, |
| | {"role": "user", "content": instruction}, |
| | ] |
| | prompt = tokenizer.apply_chat_template(chat, tokenize=False) |
| | |
| | inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") |
| | outputs = model.generate(input_ids=inputs, max_new_tokens=128) |
| | print(tokenizer.decode(outputs[0])) |
| | ``` |
| |
|
| | ## Academic Benchmarks |
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><strong><center>Average</center></strong></td> |
| | <td><strong><center>ARC</center></strong></td> |
| | <td><strong><center>MMLU</center></strong></td> |
| | <td><strong><center>Winogrande</center></strong></td> |
| | <td><strong><center>Hellaswag</center></strong></td> |
| | <td><strong><center>GSM8k</center></strong></td> |
| | <td><strong><center>TruthfulQA</center></strong></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-3-8B-Instruct</td><td><center>50.62</center></td><td><center>43.69</center></td><td><center>52.04</center></td><td><center>59.33</center></td><td><center>53.19</center></td><td><center>43.87</center></td><td><center>51.59</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-06-28</td><td><center>50.56</center></td><td><center>44.70</center></td><td><center>52.19</center></td><td><center><strong>67.23</strong></center></td><td><center>57.69</center></td><td><center>30.23</center></td><td><center>51.34</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-10-09</td><td><center>52.21</center></td><td><center>47.94</center></td><td><center>53.50</center></td><td><center>66.06</center></td><td><center>59.72</center></td><td><center>40.16</center></td><td><center>45.90</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2025-04-23</td><td><center>54.66</center></td><td><center>50.31</center></td><td><center><strong>55.91</strong></center></td><td><center>67.01</center></td><td><center><strong>61.73</strong></center></td><td><center><strong>47.41</strong></center></td><td><center>45.61</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-DPO-2024-10-09</td><td><center>49.96</center></td><td><center>46.29</center></td><td><center>53.29</center></td><td><center>65.57</center></td><td><center>58.15</center></td><td><center>34.77</center></td><td><center>41.70</center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama3-8b-Instruct-DPO-2025-04-23</em></td><td><center><em><strong>55.86</strong></em></center></td><td><center><em><strong>52.26</strong></em></center></td><td><center><em>55.35</em></center></td><td><center><em>66.62</em></center></td><td><center><em>59.93</em></center></td><td><center><em>43.95</em></center></td><td><center><em><strong>57.06</strong></em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| | ## Downstream tasks |
| | |
| | <table> |
| | <tbody> |
| | <tr> |
| | <td></td> |
| | <td colspan="4"><center><strong>LaRoSeDa</strong></center></td> |
| | <td colspan="4"><center><strong>WMT</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><center><strong>Binary<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>Binary<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>EN-RO<br>(Bleu)</strong></center></td> |
| | <td><center><strong>RO-EN<br>(Bleu)</strong></center></td> |
| | <td><center><strong>EN-RO<br>(Bleu)</strong></center></td> |
| | <td><center><strong>RO-EN<br>(Bleu)</strong></center> |
| | </tr> |
| | <tr> |
| | <td>Llama-3-8B-Instruct</td><td><center>95.88</center></td><td><center>56.21</center></td><td><center><strong>98.53</strong></center></td><td><center>86.19</center></td><td><center>18.88</center></td><td><center><strong>30.98</strong></center></td><td><center><strong>28.02</strong></center></td><td><center>40.28</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-06-28</td><td><center>97.52</center></td><td><center><strong>67.41</strong></center></td><td><center>94.15</center></td><td><center>87.13</center></td><td><center><strong>24.01</strong></center></td><td><center>27.36</center></td><td><center>26.53</center></td><td><center>40.36</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-10-09</td><td><center>95.58</center></td><td><center>61.20</center></td><td><center>96.46</center></td><td><center><strong>87.26</strong></center></td><td><center>22.92</center></td><td><center>24.28</center></td><td><center>27.31</center></td><td><center><strong>40.52</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2025-04-23</td><td><center>96.21</center></td><td><center>59.15</center></td><td><center>-</center></td><td><center>-</center></td><td><center>23.32</center></td><td><center>22.50</center></td><td><center>-</center></td><td><center>-</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-DPO-2024-10-09</td><td><center>97.48</center></td><td><center>54.00</center></td><td><center>-</center></td><td><center>-</center></td><td><center>22.09</center></td><td><center>23.00</center></td><td><center>-</center></td><td><center>-</center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama3-8b-Instruct-DPO-2025-04-23</em></td><td><center><em><strong>97.60</strong></em></center></td><td><center><em>62.16</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>18.14</em></center></td><td><center><em>14.13</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| | |
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td></td> |
| | <td colspan="4"><center><strong>XQuAD</strong></center></td> |
| | <td colspan="4"><center><strong>STS</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><center><strong>(EM)</strong></center></td> |
| | <td><center><strong>(F1)</strong></center></td> |
| | <td><center><strong>(EM)</strong></center></td> |
| | <td><center><strong>(F1)</strong></center></td> |
| | <td><center><strong>(Spearman)</strong></center></td> |
| | <td><center><strong>(Pearson)</strong></center></td> |
| | <td><center><strong>(Spearman)</strong></center></td> |
| | <td><center><strong>(Pearson)</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-3-8B-Instruct</td><td><center><strong>39.47</strong></center></td><td><center>58.67</center></td><td><center><strong>67.65</strong></center></td><td><center><strong>82.77</strong></center></td><td><center>73.04</center></td><td><center>72.36</center></td><td><center>83.49</center></td><td><center>84.06</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-06-28</td><td><center>39.43</center></td><td><center><strong>59.50</strong></center></td><td><center>44.45</center></td><td><center>59.76</center></td><td><center>77.20</center></td><td><center>77.87</center></td><td><center>85.80</center></td><td><center>86.05</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-10-09</td><td><center>18.89</center></td><td><center>31.79</center></td><td><center>50.84</center></td><td><center>65.18</center></td><td><center>77.60</center></td><td><center>76.86</center></td><td><center><strong>86.70</strong></center></td><td><center><strong>87.09</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2025-04-23</td><td><center>11.01</center></td><td><center>23.55</center></td><td><center>-</center></td><td><center>-</center></td><td><center>76.78</center></td><td><center>74.36</center></td><td><center>-</center></td><td><center>-</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-DPO-2024-10-09</td><td><center>26.05</center></td><td><center>42.77</center></td><td><center>-</center></td><td><center>-</center></td><td><center><strong>79.64</strong></center></td><td><center><strong>79.52</strong></center></td><td><center>-</center></td><td><center>-</center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama3-8b-Instruct-DPO-2025-04-23</em></td><td><center><em>30.65</em></center></td><td><center><em>46.29</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>67.62</em></center></td><td><center><em>67.82</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| |
|
| | ## Romanian MT-Bench |
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><strong><center>Average</center></strong></td> |
| | <td><strong><center>1st turn</center></strong></td> |
| | <td><strong><center>2nd turn</center></strong></td> |
| | <td><strong><center>Answers in Ro</center></strong></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-3-8B-Instruct</td><td><center>5.96</center></td><td><center>6.16</center></td><td><center>5.76</center></td><td><center>158/160</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-06-28</td><td><center>5.15</center></td><td><center>6.03</center></td><td><center>4.28</center></td><td><center><strong>160/160</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-10-09</td><td><center>5.38</center></td><td><center>6.09</center></td><td><center>4.67</center></td><td><center><strong>160/160</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2025-04-23</td><td><center>6.39</center></td><td><center><strong>7.12</strong></center></td><td><center>5.66</center></td><td><center><strong>160/160</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-DPO-2024-10-09</td><td><center>5.87</center></td><td><center>6.22</center></td><td><center>5.49</center></td><td><center><strong>160/160</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama3-8b-Instruct-DPO-2025-04-23</em></td><td><center><em><strong>6.67</strong></em></center></td><td><center><em>6.81</em></center></td><td><center><em><strong>6.54</strong></em></center></td><td><center><em><strong>160/160</strong></em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| |
|
| | ## RoCulturaBench |
| |
|
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><strong><center>Average</center></strong></td> |
| | <td><strong><center>Answers in Ro</center></strong></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-3-8B-Instruct</td><td><center>4.62</center></td><td><center><strong>100/100</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-06-28</td><td><center>3.71</center></td><td><center><strong>100/100</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2024-10-09</td><td><center>3.81</center></td><td><center><strong>100/100</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-2025-04-23</td><td><center>4.05</center></td><td><center><strong>100/100</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama3-8b-Instruct-DPO-2024-10-09</td><td><center>4.40</center></td><td><center><strong>100/100</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama3-8b-Instruct-DPO-2025-04-23</em></td><td><center><em><strong>4.83</strong></em></center></td><td><center><em><strong>100/100</strong></em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| |
|
| |
|
| | ## RoLlama3 Model Family |
| |
|
| | | Model | Link | |
| | |--------------------|:--------:| |
| | |RoLlama3-8b-Base-2024-05-14 | [link](https://huggingface.co/OpenLLM-Ro/RoLlam32-8b-Base-2024-05-14) | |
| | |RoLlama3-8b-Instruct-2024-05-14 | [link](https://huggingface.co/OpenLLM-Ro/RoLlama3-8b-Instruct-2024-05-14) | |
| | |RoLlama3-8b-Instruct-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoLlama3-8b-Instruct-2024-10-09) | |
| | |RoLlama3-8b-Instruct-2025-04-23| [link](https://huggingface.co/OpenLLM-Ro/RoLlama3-8b-Instruct-2025-04-23) | |
| | |RoLlama3-8b-Instruct-DPO-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoLlama3-8b-Instruct-DPO-2024-10-09) | |
| | |*RoLlama3-8b-Instruct-DPO-2025-04-23*| [link](https://huggingface.co/OpenLLM-Ro/RoLlama3-8b-Instruct-DPO-2025-04-23) | |
| |
|
| |
|
| |
|
| | ## Citation |
| |
|
| | ``` |
| | @misc{masala2024vorbecstiromanecsterecipetrain, |
| | title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions}, |
| | author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea}, |
| | year={2024}, |
| | eprint={2406.18266}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.CL}, |
| | url={https://arxiv.org/abs/2406.18266}, |
| | } |
| | ``` |
| | <!-- **APA:** |
| |
|
| | [More Information Needed] --> |