Adding Gemini 2.5 & Deepseek
Browse files- config/llm_config.py +14 -5
config/llm_config.py
CHANGED
|
@@ -3,11 +3,14 @@ import os
|
|
| 3 |
from langchain_openai import ChatOpenAI
|
| 4 |
from langchain_anthropic import ChatAnthropic
|
| 5 |
from langchain_deepseek import ChatDeepSeek
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Retrieve API keys from environment variables
|
| 8 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 9 |
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
|
| 10 |
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
|
|
|
| 11 |
|
| 12 |
# Define temperature presets (adjust as needed)
|
| 13 |
ZERO = 0
|
|
@@ -37,7 +40,10 @@ def create_anthropic_reasoning_llm(model_name: str, reasoning_effort: str = None
|
|
| 37 |
return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name)
|
| 38 |
|
| 39 |
def create_deepseek_llm(model_name: str, temperature: float):
|
| 40 |
-
return
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
# all of them in one dictionary
|
| 43 |
llms = {
|
|
@@ -69,10 +75,13 @@ llms = {
|
|
| 69 |
"Claude 3.7": create_anthropic_reasoning_llm("claude-3-7-sonnet-latest"),
|
| 70 |
|
| 71 |
# DeepSeek
|
| 72 |
-
"Deepseek R1 (zero temp)π§":
|
| 73 |
-
"Deepseek R1 (low temp)π§":
|
| 74 |
-
"Deepseek R1 (mid temp)π§":
|
| 75 |
-
"Deepseek R1 (high temp)π§":
|
|
|
|
|
|
|
|
|
|
| 76 |
}
|
| 77 |
|
| 78 |
# specific for Diagnosis tab
|
|
|
|
| 3 |
from langchain_openai import ChatOpenAI
|
| 4 |
from langchain_anthropic import ChatAnthropic
|
| 5 |
from langchain_deepseek import ChatDeepSeek
|
| 6 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 7 |
+
from openai import api_key
|
| 8 |
|
| 9 |
# Retrieve API keys from environment variables
|
| 10 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 11 |
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
|
| 12 |
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
| 13 |
+
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 14 |
|
| 15 |
# Define temperature presets (adjust as needed)
|
| 16 |
ZERO = 0
|
|
|
|
| 40 |
return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name)
|
| 41 |
|
| 42 |
def create_deepseek_llm(model_name: str, temperature: float):
|
| 43 |
+
return ChatDeepSeek(api_key=DEEPSEEK_API_KEY, model_name=model_name, temperature=temperature)
|
| 44 |
+
|
| 45 |
+
def create_google_reasoning_llm(model_name: str):
|
| 46 |
+
return ChatGoogleGenerativeAI(api_key=GOOGLE_API_KEY, model_name=model_name)
|
| 47 |
|
| 48 |
# all of them in one dictionary
|
| 49 |
llms = {
|
|
|
|
| 75 |
"Claude 3.7": create_anthropic_reasoning_llm("claude-3-7-sonnet-latest"),
|
| 76 |
|
| 77 |
# DeepSeek
|
| 78 |
+
"Deepseek R1 (zero temp)π§": create_deepseek_llm("deepseek-reasoner", ZERO),
|
| 79 |
+
"Deepseek R1 (low temp)π§": create_deepseek_llm("deepseek-reasoner", LOW),
|
| 80 |
+
"Deepseek R1 (mid temp)π§": create_deepseek_llm("deepseek-reasoner", MID),
|
| 81 |
+
"Deepseek R1 (high temp)π§": create_deepseek_llm("deepseek-reasoner", HIGH),
|
| 82 |
+
|
| 83 |
+
# Google models (Gemini)
|
| 84 |
+
"Gemini 2.5 Pro Experimental (zero temp)π§": create_google_reasoning_llm(model_name= "gemini-2.5-pro-exp-03-25"),
|
| 85 |
}
|
| 86 |
|
| 87 |
# specific for Diagnosis tab
|