Spaces:
Sleeping
Sleeping
File size: 3,384 Bytes
ddabbe4 8d5b1f0 ece54a0 8d5b1f0 ddabbe4 8d5b1f0 ac32153 8d5b1f0 ddabbe4 ece54a0 ac32153 ece54a0 8d5b1f0 ece54a0 ddabbe4 8d5b1f0 ddabbe4 8d5b1f0 ddabbe4 8d5b1f0 ddabbe4 8d5b1f0 ece54a0 8d5b1f0 ece54a0 8d5b1f0 ece54a0 8d5b1f0 ece54a0 8d5b1f0 ece54a0 8d5b1f0 d2aa9b5 8d5b1f0 d2aa9b5 fea19b5 ece54a0 8d5b1f0 ece54a0 8d5b1f0 ddabbe4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
"""
Configuration settings for the NBA data analysis project.
"""
import os
from crewai import LLM
# NBA Data Configuration
NBA_DATA_PATH = "nba24-25.csv"
# LLM Configuration - Choose your provider
# Options: "openai", "ollama", "litellm", "openrouter", "huggingface"
# Default to huggingface for best open-source model
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "huggingface")
# OpenAI Configuration
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
# Ollama Configuration (for local open-source models)
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434/v1") # /v1 for OpenAI-compatible API
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "mistral") # Model name (Ollama handles :latest automatically)
# LiteLLM Configuration (for Hugging Face or other providers)
LITELLM_MODEL = os.getenv("LITELLM_MODEL", "huggingface/meta-llama/Llama-3.2-3B-Instruct")
LITELLM_API_KEY = os.getenv("LITELLM_API_KEY", "") # Optional, depends on provider
# Hugging Face Configuration (for using HF Inference API)
HF_API_KEY = os.getenv("HF_API_KEY", "") # Get from https://huggingface.co/settings/tokens
HF_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen2.5-7B-Instruct") # Best open-source model (8B params)
# OpenRouter Configuration (backup option)
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
OPENROUTER_MODEL = os.getenv("OPENROUTER_MODEL", "google/gemma-2-2b-it:free")
def get_llm() -> LLM:
"""
Create and return a CrewAI LLM instance based on the configured provider.
Returns:
LLM: Configured CrewAI LLM instance
Raises:
ValueError: If required configuration is not set
"""
if LLM_PROVIDER == "ollama":
# Ollama (local models only, not for cloud deployment)
return LLM(
model=OLLAMA_MODEL,
base_url=OLLAMA_BASE_URL,
api_key="ollama"
)
elif LLM_PROVIDER == "huggingface":
# Hugging Face Inference API - Best open-source models
if not HF_API_KEY:
raise ValueError(
"HF_API_KEY environment variable is not set. "
"Get a free token from https://huggingface.co/settings/tokens"
)
return LLM(
model=f"huggingface/{HF_MODEL}",
api_key=HF_API_KEY
)
elif LLM_PROVIDER == "litellm":
# LiteLLM (alternative provider)
return LLM(
model=f"litellm/{LITELLM_MODEL}",
api_key=LITELLM_API_KEY if LITELLM_API_KEY else "dummy"
)
elif LLM_PROVIDER == "openrouter":
# OpenRouter (backup option if HF is unavailable)
if not OPENROUTER_API_KEY:
raise ValueError(
"OPENROUTER_API_KEY environment variable is not set. "
"Get a free key at https://openrouter.ai"
)
return LLM(
model=f"openrouter/{OPENROUTER_MODEL}",
api_key=OPENROUTER_API_KEY,
temperature=0.3
)
else:
# OpenAI (paid option)
if not OPENAI_API_KEY:
raise ValueError(
"OPENAI_API_KEY environment variable is not set. "
"Please set it using: export OPENAI_API_KEY='your-api-key'"
)
return LLM(
model=OPENAI_MODEL,
api_key=OPENAI_API_KEY
)
|