Spaces:
Running
Running
File size: 4,010 Bytes
ec0af28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import os
from dotenv import load_dotenv
from datasets import load_dataset
load_dotenv()
# --- CONFIGURATION ---
MODAL_BASE_URL = "https://mohdfanis--unsloth-model-arena-backend" # Base URL
MY_AUTH_TOKEN = os.environ.get("ARENA_AUTH_TOKEN")
# --- DATASETS ---
print("Loading Hugging Face datasets...")
try:
math_dataset = load_dataset("microsoft/orca-math-word-problems-200k", split="train")
bio_dataset = load_dataset("bio-nlp-umass/bioinstruct", split="train")
print("✅ Datasets loaded successfully.")
except Exception as e:
print(f"❌ Failed to load datasets: {e}")
math_dataset, bio_dataset = [], []
DATASET_CONFIG = {
"Math": {
"dataset": math_dataset,
"question_col": "question",
"answer_col": "answer"
},
"Bio": {
"dataset": bio_dataset,
"instruction_col": "instruction",
"input_col": "input",
"answer_col": "output"
}
}
# --- MODEL DEFINITIONS ---
BASE_MODELS = {
"Base Llama-3.1 8B Instruct": "unsloth/llama-3.1-8b-instruct-bnb-4bit",
"Base Llama-3 8B Instruct": "unsloth/llama-3-8b-instruct-bnb-4bit",
"Base Llama-2 7B Chat": "unsloth/llama-2-7b-chat-bnb-4bit",
"Base Mistral 7B Instruct": "unsloth/mistral-7b-v0.3-instruct-bnb-4bit",
"Base Qwen-2 7B Instruct": "unsloth/qwen2-7B-instruct-bnb-4bit",
"Base Gemma-2 9B Instruct": "unsloth/gemma-2-9b-it-bnb-4bit",
"Base Gemma 7B Instruct": "unsloth/gemma-7b-it-bnb-4bit",
}
FINETUNED_MATH = {
"Finetuned Llama-3.1 8B (e3) - MATH": "farhananis005/lora-llama-3.1-8b-Math-e3",
"Finetuned Llama-3.1 8B (e1) - MATH": "farhananis005/lora-llama-3.1-8b-Math-e1",
"Finetuned Llama-3 8B (e3) - MATH": "farhananis005/lora-llama-3-8b-Math-e3",
"Finetuned Llama-3 8B (e1) - MATH": "farhananis005/lora-llama-3-8b-Math-e1",
"Finetuned Llama-2 7B (e3) - MATH": "farhananis005/lora-llama-2-7b-Math-e3",
"Finetuned Llama-2 7B (e1) - MATH": "farhananis005/lora-llama-2-7b-Math-e1",
"Finetuned Mistral 7B (e3) - MATH": "farhananis005/lora-mistral-7b-v0.3-Math-e3",
"Finetuned Mistral 7B (e1) - MATH": "farhananis005/lora-mistral-7b-v0.3-Math-e1",
"Finetuned Qwen-2 7B (e3) - MATH": "farhananis005/lora-qwen-2-7b-Math-e3",
"Finetuned Qwen-2 7B (e1) - MATH": "farhananis005/lora-qwen-2-7b-Math-e1",
"Finetuned Gemma-2 9B (e3) - MATH": "farhananis005/lora-gemma-2-9b-Math-e3",
"Finetuned Gemma-2 9B (e1) - MATH": "farhananis005/lora-gemma-2-9b-Math-e1",
"Finetuned Gemma 7B (e3) - MATH": "farhananis005/lora-gemma-7b-Math-e3",
"Finetuned Gemma 7B (e1) - MATH": "farhananis005/lora-gemma-7b-Math-e1",
}
FINETUNED_BIO = {
"Finetuned Llama-3.1 8B (e3) - BIO": "farhananis005/lora-llama-3.1-8b-Bio-e3",
"Finetuned Llama-3.1 8B (e1) - BIO": "farhananis005/lora-llama-3.1-8b-Bio-e1",
"Finetuned Llama-3 8B (e3) - BIO": "farhananis005/lora-llama-3-8b-Bio-e3",
"Finetuned Llama-3 8B (e1) - BIO": "farhananis005/lora-llama-3-8b-Bio-e1",
"Finetuned Llama-2 7B (e3) - BIO": "farhananis005/lora-llama-2-7b-Bio-e3",
"Finetuned Llama-2 7B (e1) - BIO": "farhananis005/lora-llama-2-7b-Bio-e1",
"Finetuned Mistral 7B (e3) - BIO": "farhananis005/lora-mistral-7b-v0.3-Bio-e3",
"Finetuned Mistral 7B (e1) - BIO": "farhananis005/lora-mistral-7b-v0.3-Bio-e1",
"Finetuned Qwen-2 7B (e3) - BIO": "farhananis005/lora-qwen-2-7b-Bio-e3",
"Finetuned Qwen-2 7B (e1) - BIO": "farhananis005/lora-qwen-2-7b-Bio-e1",
"Finetuned Gemma-2 9B (e3) - BIO": "farhananis005/lora-gemma-2-9b-Bio-e3",
"Finetuned Gemma-2 9B (e1) - BIO": "farhananis005/lora-gemma-2-9b-Bio-e1",
"Finetuned Gemma 7B (e3) - BIO": "farhananis005/lora-gemma-7b-Bio-e3",
"Finetuned Gemma 7B (e1) - BIO": "farhananis005/lora-gemma-7b-Bio-e1",
}
ALL_MODELS = {
"Math": {"-- Select Math Model --": None, **BASE_MODELS, **FINETUNED_MATH},
"Bio": {"-- Select Bio Model --": None, **BASE_MODELS, **FINETUNED_BIO}
} |