deep_research / model_config.py
OzanSevindir's picture
Upload folder using huggingface_hub
ff520b7 verified
import os
from dotenv import load_dotenv
from agents import AsyncOpenAI, OpenAIChatCompletionsModel
load_dotenv(override=True)
def get_model(model_choice: str):
"""
Get the appropriate model based on user selection
Args:
model_choice: String from radio (e.g., "Gemini 2.5 Flash")
Returns:
OpenAIChatCompletionsModel configured for the selected model
"""
# Map model names to configurations
if "2.5 Flash" in model_choice:
model_name = "gemini-2.5-flash"
api_key = os.getenv('GEMINI_API_KEY')
base_url = "https://generativelanguage.googleapis.com/v1beta/openai/"
elif "2.0 Pro" in model_choice:
model_name = "gemini-2.0-flash-thinking-exp-01-21"
api_key = os.getenv('GEMINI_API_KEY')
base_url = "https://generativelanguage.googleapis.com/v1beta/openai/"
elif "2.0 Flash" in model_choice:
model_name = "gemini-2.0-flash-exp"
api_key = os.getenv('GEMINI_API_KEY')
base_url = "https://generativelanguage.googleapis.com/v1beta/openai/"
elif "Llama 3.3" in model_choice:
model_name = "llama-3.3-70b-versatile"
api_key = os.getenv('GROQ_API_KEY')
base_url = "https://api.groq.com/openai/v1"
else:
# Default to gemini-2.5-flash
model_name = "gemini-2.5-flash"
api_key = os.getenv('GEMINI_API_KEY')
base_url = "https://generativelanguage.googleapis.com/v1beta/openai/"
# Create client
client = AsyncOpenAI(
api_key=api_key,
base_url=base_url
)
# Return model
return OpenAIChatCompletionsModel(
model=model_name,
openai_client=client
)
def get_model_display_name(model_choice: str) -> str:
"""Get a clean display name for the model"""
# Already clean, just return as-is
return model_choice