LLM-Judge / models_config.py
workbykait's picture
Update models_config.py
53f9d55 verified
raw
history blame contribute delete
733 Bytes
# models_config.py
MODELS = [
{
"id": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"name": "Llama-3.1-8B-Instruct",
"quantized": False,
"provider": "groq" # Groq usually reliable for Llama-3.1
},
{
"id": "google/gemma-2-9b-it",
"name": "Gemma-2-9B-it",
"quantized": False,
"provider": "nebius" # Nebius AI – currently active for Gemma-2-9b-it
# Alternative: "featherless-ai" if Nebius has issues
},
# Add more if needed (avoid deprecated combinations)
]
JUDGE_MODEL = "meta-llama/Meta-Llama-3.1-70B-Instruct" # or use "groq" provider below if needed
# JUDGE_PROVIDER = "groq" # uncomment if judge fails on default