Spaces:
Sleeping
Sleeping
| from crewai import LLM | |
| import os | |
| from dotenv import load_dotenv | |
| from pathlib import Path | |
| from langchain_openai import ChatOpenAI | |
| env_path = Path(__file__).resolve().parents[1] / ".env" | |
| # Load variables from .env file | |
| load_dotenv(dotenv_path=env_path) | |
| # Now you can access them | |
| gemini_key = os.getenv("GEMINI_API_KEY") | |
| tavily_key = os.getenv("TAVILY_API_KEY") | |
| openrouter_key = os.getenv("OPENROUTER_API_KEY") | |
| os.environ["ANTHROPIC_API_KEY"] = openrouter_key | |
| # llm = LLM(model="gemini/gemini-2.5-flash", temperature=0, api_key=gemini_key) | |
| llm = LLM( | |
| model="openrouter/openai/gpt-oss-20b:free", | |
| api_key=openrouter_key, | |
| base_url="https://openrouter.ai/api/v1", | |
| ) | |
| llm_oss = LLM( | |
| model="openrouter/openai/gpt-oss-20b:free", | |
| api_key=openrouter_key, | |
| base_url="https://openrouter.ai/api/v1", | |
| ) | |
| llm_g = LLM( | |
| model="openrouter/google/gemini-2.5-flash", | |
| temperature=0, | |
| api_key=openrouter_key, | |
| base_url="https://openrouter.ai/api/v1", | |
| ) | |
| llm_5m = LLM( | |
| model="gpt-5-mini-2025-08-07", | |
| ) | |
| llm_4m = LLM( | |
| model="gpt-4.1-mini-2025-04-14", | |
| ) | |
| llm_5n = LLM( | |
| model="gpt-5-nano-2025-08-07", | |
| ) | |
| llm_open_c3 = LLM( | |
| api_key=os.environ["OPENROUTER_API_KEY"], | |
| base_url="https://openrouter.ai/api/v1", | |
| model="openrouter/anthropic/claude-3-haiku", | |
| streaming=False, | |
| ) | |
| llm_open_c35 = LLM( | |
| api_key=os.environ["OPENROUTER_API_KEY"], | |
| base_url="https://openrouter.ai/api/v1", | |
| model="openrouter/anthropic/claude-3.5-haiku", | |
| streaming=False, | |
| ) | |
| # Initialize Gemini model | |
| model = ChatOpenAI( | |
| model="google/gemini-2.5-flash", | |
| temperature=0, | |
| api_key=openrouter_key, | |
| base_url="https://openrouter.ai/api/v1", | |
| ) | |
| # # check llm | |
| # response=llm_g.call("Hello, how are you?") | |
| # print(response) | |