shekkari21 commited on
Commit
fea19b5
·
1 Parent(s): d2aa9b5

changed to diff model

Browse files
Files changed (1) hide show
  1. config.py +6 -2
config.py CHANGED
@@ -26,7 +26,9 @@ LITELLM_API_KEY = os.getenv("LITELLM_API_KEY", "") # Optional, depends on provi
26
 
27
  # OpenRouter Configuration (access to many open-source models)
28
  OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
29
- OPENROUTER_MODEL = os.getenv("OPENROUTER_MODEL", "meta-llama/llama-3.2-3b-instruct:free") # Free tier available
 
 
30
 
31
 
32
  def get_llm() -> LLM:
@@ -67,9 +69,11 @@ def get_llm() -> LLM:
67
  "Get a free key at https://openrouter.ai"
68
  )
69
  # CrewAI uses LiteLLM internally, so we need to format as openrouter/model-name
 
70
  return LLM(
71
  model=f"openrouter/{OPENROUTER_MODEL}",
72
- api_key=OPENROUTER_API_KEY
 
73
  )
74
 
75
  else:
 
26
 
27
  # OpenRouter Configuration (access to many open-source models)
28
  OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
29
+ # Try different free models if one is rate-limited
30
+ OPENROUTER_MODEL = os.getenv("OPENROUTER_MODEL", "google/gemma-2-2b-it:free") # Alternative free model
31
+ # Other free options: "meta-llama/llama-3.2-3b-instruct:free", "mistralai/mistral-7b-instruct:free"
32
 
33
 
34
  def get_llm() -> LLM:
 
69
  "Get a free key at https://openrouter.ai"
70
  )
71
  # CrewAI uses LiteLLM internally, so we need to format as openrouter/model-name
72
+ # Using gemma-2-2b as default (less likely to be rate-limited than llama-3.2)
73
  return LLM(
74
  model=f"openrouter/{OPENROUTER_MODEL}",
75
+ api_key=OPENROUTER_API_KEY,
76
+ temperature=0.3 # Add temperature for better responses
77
  )
78
 
79
  else: