| --- | |
| license: mit | |
| datasets: | |
| - openai/MMMLU | |
| - argilla/FinePersonas-v0.1 | |
| - SkunkworksAI/reasoning-0.01 | |
| - fka/awesome-chatgpt-prompts | |
| metrics: | |
| - accuracy | |
| - bleu | |
| - bertscore | |
| - bleurt | |
| base_model: | |
| - meta-llama/Llama-3.2-11B-Vision-Instruct | |
| - nvidia/NVLM-D-72B | |
| - openai/whisper-large-v3-turbo | |
| new_version: meta-llama/Llama-3.1-8B-Instruct | |
| pipeline_tag: question-answering | |
| library_name: adapter-transformers | |
| from transformers import AutoModelForQuestionAnswering, Trainer, TrainingArguments | |
| model = AutoModelForQuestionAnswering.from_pretrained("your-model-name") | |
| pip install transformers datasets | |
| training_args = TrainingArguments( | |
| output_dir='./results', | |
| num_train_epochs=3, | |
| per_device_train_batch_size=16, | |
| evaluation_strategy="epoch" | |
| ) | |
| ---eval_results = trainer.evaluate() | |
| print(eval_results) | |