hjmmm
Browse files- app.py +52 -72
- chains/diagnoser_chain.py +28 -11
- chains/distractors_chain.py +13 -7
- config/chain_configs.py +10 -8
- config/llm_config.py +26 -5
- config/templates.py +27 -23
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -4,67 +4,53 @@ import os
|
|
| 4 |
import asyncio
|
| 5 |
import logging
|
| 6 |
|
| 7 |
-
from utils.auth import login as auth_login #
|
| 8 |
-
from config.chain_configs import chain_configs
|
|
|
|
| 9 |
|
| 10 |
logger = logging.getLogger(__name__)
|
| 11 |
|
| 12 |
-
|
| 13 |
-
async def run_chain(chain_name: str, input_variables: dict):
|
| 14 |
-
"""
|
| 15 |
-
A generic async function to run a given structured chain.
|
| 16 |
-
Handles prompt formatting, invoking the LLM asynchronously, and returning results.
|
| 17 |
-
|
| 18 |
-
Args:
|
| 19 |
-
chain_name (str): The key for the desired chain in the chain_configs.
|
| 20 |
-
input_variables (dict): A dictionary of variables to format the chain's prompt.
|
| 21 |
-
|
| 22 |
-
Returns:
|
| 23 |
-
The result of invoking the chain's LLM with the formatted prompt.
|
| 24 |
-
"""
|
| 25 |
try:
|
| 26 |
-
# Resolve the chain configuration by its name.
|
| 27 |
chain_config = chain_configs.get(chain_name)
|
| 28 |
if not chain_config:
|
| 29 |
-
raise KeyError(f"Chain '{chain_name}' not found
|
| 30 |
-
|
| 31 |
-
#
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
logger.info(f"Chain '{chain_name}' executed successfully.")
|
| 38 |
return result
|
| 39 |
|
| 40 |
-
except KeyError as e:
|
| 41 |
-
logger.error(f"Chain configuration error: {e}")
|
| 42 |
-
raise ValueError(f"Invalid chain structure: missing {e}")
|
| 43 |
-
|
| 44 |
except Exception as e:
|
| 45 |
-
logger.error(f"Error
|
| 46 |
-
|
| 47 |
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
-
async def
|
| 50 |
-
"""
|
| 51 |
-
Async function to run the diagnoser chain.
|
| 52 |
-
Prepares the input variables and awaits the chain's result.
|
| 53 |
-
"""
|
| 54 |
-
input_vars = {"user_query": user_query}
|
| 55 |
-
result = await run_chain("diagnoser", input_vars)
|
| 56 |
-
return result
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
async def run_distractors(user_query: str) -> str:
|
| 60 |
-
"""
|
| 61 |
-
Async function to run the distractors brainstorm chain.
|
| 62 |
-
Prepares the input variables and awaits the chain's result.
|
| 63 |
-
"""
|
| 64 |
-
input_vars = {"user_query": user_query}
|
| 65 |
-
result = await run_chain("distractors", input_vars)
|
| 66 |
-
return result
|
| 67 |
-
|
| 68 |
|
| 69 |
# -------------------------------
|
| 70 |
# Build the Gradio Interface
|
|
@@ -73,59 +59,53 @@ with gr.Blocks() as demo:
|
|
| 73 |
# --- Login Page ---
|
| 74 |
with gr.Column(visible=True, elem_id="login_page") as login_container:
|
| 75 |
gr.Markdown("## 🔒 Please Login")
|
| 76 |
-
password_input = gr.Textbox(
|
| 77 |
-
label="Enter Password",
|
| 78 |
-
type="password",
|
| 79 |
-
placeholder="Enter password to access the app"
|
| 80 |
-
)
|
| 81 |
login_button = gr.Button("Login")
|
| 82 |
-
login_error = gr.Markdown(value="")
|
| 83 |
|
| 84 |
# --- Main App (initially hidden) ---
|
| 85 |
with gr.Column(visible=False, elem_id="main_app") as app_container:
|
| 86 |
gr.Markdown("## Core Functionalities")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
with gr.Tabs():
|
| 88 |
with gr.TabItem("Diagnoser"):
|
| 89 |
gr.Markdown("### Diagnoser")
|
| 90 |
-
diagnoser_input = gr.Textbox(
|
| 91 |
-
label="Enter Diagnoser Query",
|
| 92 |
-
placeholder="Type your query here..."
|
| 93 |
-
)
|
| 94 |
diagnoser_button = gr.Button("Submit")
|
| 95 |
-
diagnoser_output = gr.Textbox(label="
|
| 96 |
with gr.TabItem("Distractors brainstorm"):
|
| 97 |
gr.Markdown("### Distractors brainstorm")
|
| 98 |
-
distractors_input = gr.Textbox(
|
| 99 |
-
label="Enter Brainstorm Query",
|
| 100 |
-
placeholder="Type your query here..."
|
| 101 |
-
)
|
| 102 |
distractors_button = gr.Button("Submit")
|
| 103 |
distractors_output = gr.Textbox(label="Response", interactive=False)
|
| 104 |
|
| 105 |
# -------------------------------
|
| 106 |
# Set Up Interactions
|
| 107 |
# -------------------------------
|
| 108 |
-
|
| 109 |
-
# Login button: if the password is correct, hide the login container and reveal the main app.
|
| 110 |
login_button.click(
|
| 111 |
fn=auth_login,
|
| 112 |
inputs=[password_input],
|
| 113 |
outputs=[login_container, app_container, login_error]
|
| 114 |
)
|
| 115 |
|
| 116 |
-
#
|
| 117 |
diagnoser_button.click(
|
| 118 |
fn=run_diagnoser,
|
| 119 |
-
inputs=[diagnoser_input],
|
| 120 |
outputs=[diagnoser_output]
|
| 121 |
)
|
| 122 |
-
|
| 123 |
-
# Run the Distractors brainstorm chain asynchronously.
|
| 124 |
distractors_button.click(
|
| 125 |
fn=run_distractors,
|
| 126 |
-
inputs=[distractors_input],
|
| 127 |
outputs=[distractors_output]
|
| 128 |
)
|
| 129 |
|
| 130 |
-
# Launch the
|
| 131 |
demo.launch()
|
|
|
|
| 4 |
import asyncio
|
| 5 |
import logging
|
| 6 |
|
| 7 |
+
from utils.auth import login as auth_login # Simple authentication
|
| 8 |
+
from config.chain_configs import chain_configs
|
| 9 |
+
from config.llm_config import llms
|
| 10 |
|
| 11 |
logger = logging.getLogger(__name__)
|
| 12 |
|
| 13 |
+
# A generic async runner for chains.
|
| 14 |
+
async def run_chain(chain_name: str, input_variables: dict, selected_model: str):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
try:
|
|
|
|
| 16 |
chain_config = chain_configs.get(chain_name)
|
| 17 |
if not chain_config:
|
| 18 |
+
raise KeyError(f"Chain '{chain_name}' not found.")
|
| 19 |
+
|
| 20 |
+
# Override the LLM based on user selection.
|
| 21 |
+
chosen_llm = llms.get(selected_model)
|
| 22 |
+
if not chosen_llm:
|
| 23 |
+
raise KeyError(f"LLM '{selected_model}' is not configured.")
|
| 24 |
+
|
| 25 |
+
# Instantiate the chain with the chosen LLM.
|
| 26 |
+
if chain_name == "diagnoser":
|
| 27 |
+
chain_instance = chain_config["class"](
|
| 28 |
+
template_standardize=chain_config["template_standardize"],
|
| 29 |
+
template_diagnose=chain_config["template_diagnose"],
|
| 30 |
+
llm=chosen_llm,
|
| 31 |
+
)
|
| 32 |
+
elif chain_name == "distractors":
|
| 33 |
+
chain_instance = chain_config["class"](
|
| 34 |
+
template=chain_config["template"],
|
| 35 |
+
llm=chosen_llm,
|
| 36 |
+
)
|
| 37 |
+
else:
|
| 38 |
+
raise KeyError(f"Chain '{chain_name}' is not implemented.")
|
| 39 |
+
|
| 40 |
+
result = await chain_instance.run(input_variables["user_query"])
|
| 41 |
logger.info(f"Chain '{chain_name}' executed successfully.")
|
| 42 |
return result
|
| 43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
except Exception as e:
|
| 45 |
+
logger.error(f"Error in run_chain for '{chain_name}': {e}")
|
| 46 |
+
return f"Error: {e}"
|
| 47 |
|
| 48 |
+
# Async wrappers for each chain.
|
| 49 |
+
async def run_diagnoser(user_query: str, model_choice: str) -> str:
|
| 50 |
+
return await run_chain("diagnoser", {"user_query": user_query}, model_choice)
|
| 51 |
|
| 52 |
+
async def run_distractors(user_query: str, model_choice: str) -> str:
|
| 53 |
+
return await run_chain("distractors", {"user_query": user_query}, model_choice)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
# -------------------------------
|
| 56 |
# Build the Gradio Interface
|
|
|
|
| 59 |
# --- Login Page ---
|
| 60 |
with gr.Column(visible=True, elem_id="login_page") as login_container:
|
| 61 |
gr.Markdown("## 🔒 Please Login")
|
| 62 |
+
password_input = gr.Textbox(label="Enter Password", type="password", placeholder="Enter password to access the app")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
login_button = gr.Button("Login")
|
| 64 |
+
login_error = gr.Markdown(value="")
|
| 65 |
|
| 66 |
# --- Main App (initially hidden) ---
|
| 67 |
with gr.Column(visible=False, elem_id="main_app") as app_container:
|
| 68 |
gr.Markdown("## Core Functionalities")
|
| 69 |
+
# Dropdown for LLM selection.
|
| 70 |
+
model_choice = gr.Dropdown(
|
| 71 |
+
choices=list(llms.keys()),
|
| 72 |
+
value="OpenAI",
|
| 73 |
+
label="Select LLM Model",
|
| 74 |
+
interactive=True,
|
| 75 |
+
)
|
| 76 |
with gr.Tabs():
|
| 77 |
with gr.TabItem("Diagnoser"):
|
| 78 |
gr.Markdown("### Diagnoser")
|
| 79 |
+
diagnoser_input = gr.Textbox(label="Enter Diagnoser Query", placeholder="Type your exercise description here...")
|
|
|
|
|
|
|
|
|
|
| 80 |
diagnoser_button = gr.Button("Submit")
|
| 81 |
+
diagnoser_output = gr.Textbox(label="Diagnosis", interactive=False)
|
| 82 |
with gr.TabItem("Distractors brainstorm"):
|
| 83 |
gr.Markdown("### Distractors brainstorm")
|
| 84 |
+
distractors_input = gr.Textbox(label="Enter Brainstorm Query", placeholder="Type your query here...")
|
|
|
|
|
|
|
|
|
|
| 85 |
distractors_button = gr.Button("Submit")
|
| 86 |
distractors_output = gr.Textbox(label="Response", interactive=False)
|
| 87 |
|
| 88 |
# -------------------------------
|
| 89 |
# Set Up Interactions
|
| 90 |
# -------------------------------
|
| 91 |
+
# Login button interaction.
|
|
|
|
| 92 |
login_button.click(
|
| 93 |
fn=auth_login,
|
| 94 |
inputs=[password_input],
|
| 95 |
outputs=[login_container, app_container, login_error]
|
| 96 |
)
|
| 97 |
|
| 98 |
+
# Note: Gradio supports async functions as callbacks.
|
| 99 |
diagnoser_button.click(
|
| 100 |
fn=run_diagnoser,
|
| 101 |
+
inputs=[diagnoser_input, model_choice],
|
| 102 |
outputs=[diagnoser_output]
|
| 103 |
)
|
|
|
|
|
|
|
| 104 |
distractors_button.click(
|
| 105 |
fn=run_distractors,
|
| 106 |
+
inputs=[distractors_input, model_choice],
|
| 107 |
outputs=[distractors_output]
|
| 108 |
)
|
| 109 |
|
| 110 |
+
# Launch the app.
|
| 111 |
demo.launch()
|
chains/diagnoser_chain.py
CHANGED
|
@@ -1,14 +1,31 @@
|
|
|
|
|
| 1 |
from pydantic import BaseModel
|
| 2 |
-
|
|
|
|
| 3 |
|
| 4 |
class DiagnoserChain(BaseModel):
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# chains/diagnoser_chain.py
|
| 2 |
from pydantic import BaseModel
|
| 3 |
+
from typing import Any
|
| 4 |
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
| 5 |
|
| 6 |
class DiagnoserChain(BaseModel):
|
| 7 |
+
template_standardize: ChatPromptTemplate
|
| 8 |
+
template_diagnose: ChatPromptTemplate
|
| 9 |
+
llm: Any # This will be an LLM instance (e.g. ChatOpenAI or ChatAnthropic)
|
| 10 |
+
|
| 11 |
+
async def run(self, user_query: str) -> str:
|
| 12 |
+
"""
|
| 13 |
+
Runs the composite chain:
|
| 14 |
+
1. Standardizes the exercise description.
|
| 15 |
+
2. Generates a diagnosis from the standardized format.
|
| 16 |
+
"""
|
| 17 |
+
# Step 1: Standardize the exercise description.
|
| 18 |
+
# Using async formatting/invocation.
|
| 19 |
+
prompt_std = await self.template_standardize.aformat_prompt(user_input=user_query)
|
| 20 |
+
std_messages = prompt_std.to_messages()
|
| 21 |
+
standardized_exercise = await self.llm.ainvoke(std_messages)
|
| 22 |
+
# (Optionally extract content here if needed.)
|
| 23 |
+
|
| 24 |
+
# Step 2: Generate a diagnosis based on the standardized exercise.
|
| 25 |
+
prompt_diag = await self.template_diagnose.aformat_prompt(standardized_exercise=standardized_exercise)
|
| 26 |
+
diag_messages = prompt_diag.to_messages()
|
| 27 |
+
diagnosis = await self.llm.ainvoke(diag_messages)
|
| 28 |
+
return diagnosis
|
| 29 |
+
|
| 30 |
+
class Config:
|
| 31 |
+
arbitrary_types_allowed = True
|
chains/distractors_chain.py
CHANGED
|
@@ -1,11 +1,17 @@
|
|
|
|
|
| 1 |
from pydantic import BaseModel
|
| 2 |
-
|
|
|
|
| 3 |
|
| 4 |
class DistractorsChain(BaseModel):
|
| 5 |
-
template:
|
| 6 |
-
llm:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
response = self.llm.call(prompt)
|
| 11 |
-
return response
|
|
|
|
| 1 |
+
# chains/distractors_chain.py
|
| 2 |
from pydantic import BaseModel
|
| 3 |
+
from typing import Any
|
| 4 |
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
| 5 |
|
| 6 |
class DistractorsChain(BaseModel):
|
| 7 |
+
template: ChatPromptTemplate
|
| 8 |
+
llm: Any
|
| 9 |
+
|
| 10 |
+
async def run(self, user_query: str) -> str:
|
| 11 |
+
prompt = await self.template.aformat_prompt(user_input=user_query)
|
| 12 |
+
messages = prompt.to_messages()
|
| 13 |
+
result = await self.llm.ainvoke(messages)
|
| 14 |
+
return result
|
| 15 |
|
| 16 |
+
class Config:
|
| 17 |
+
arbitrary_types_allowed = True
|
|
|
|
|
|
config/chain_configs.py
CHANGED
|
@@ -1,18 +1,20 @@
|
|
| 1 |
-
|
| 2 |
-
from config.templates import
|
| 3 |
from chains.diagnoser_chain import DiagnoserChain
|
| 4 |
from chains.distractors_chain import DistractorsChain
|
|
|
|
| 5 |
|
|
|
|
| 6 |
chain_configs = {
|
| 7 |
"diagnoser": {
|
| 8 |
-
"template": dummy_template,
|
| 9 |
"class": DiagnoserChain,
|
| 10 |
-
"
|
|
|
|
|
|
|
| 11 |
},
|
| 12 |
"distractors": {
|
| 13 |
-
"template": dummy_template, # Replace with a specific template if needed.
|
| 14 |
"class": DistractorsChain,
|
| 15 |
-
"
|
|
|
|
| 16 |
},
|
| 17 |
-
|
| 18 |
-
}
|
|
|
|
| 1 |
+
# config/chain_configs.py
|
| 2 |
+
from config.templates import standardize_template, diagnose_template, distractors_template
|
| 3 |
from chains.diagnoser_chain import DiagnoserChain
|
| 4 |
from chains.distractors_chain import DistractorsChain
|
| 5 |
+
from config.llm_config import llms
|
| 6 |
|
| 7 |
+
# Note: The default LLM here is OpenAI; the UI can override this choice.
|
| 8 |
chain_configs = {
|
| 9 |
"diagnoser": {
|
|
|
|
| 10 |
"class": DiagnoserChain,
|
| 11 |
+
"template_standardize": standardize_template,
|
| 12 |
+
"template_diagnose": diagnose_template,
|
| 13 |
+
"llm": llms["OpenAI"],
|
| 14 |
},
|
| 15 |
"distractors": {
|
|
|
|
| 16 |
"class": DistractorsChain,
|
| 17 |
+
"template": distractors_template,
|
| 18 |
+
"llm": llms["OpenAI"],
|
| 19 |
},
|
| 20 |
+
}
|
|
|
config/llm_config.py
CHANGED
|
@@ -1,14 +1,35 @@
|
|
|
|
|
|
|
|
| 1 |
from langchain_openai import ChatOpenAI
|
|
|
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
ZERO = 0
|
| 4 |
LOW = 0.2
|
| 5 |
MID = 0.7
|
| 6 |
HIGH = 1.2
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
llms = {
|
| 9 |
-
"gpt4o":
|
| 10 |
-
"mini":
|
| 11 |
-
"gpt4o_high_temp":
|
| 12 |
-
"mini_high_temp":
|
| 13 |
-
"o1":
|
|
|
|
|
|
|
| 14 |
}
|
|
|
|
| 1 |
+
# config/llm_config.py
|
| 2 |
+
import os
|
| 3 |
from langchain_openai import ChatOpenAI
|
| 4 |
+
from langchain_anthropic import ChatAnthropic
|
| 5 |
|
| 6 |
+
# Retrieve API keys from environment variables
|
| 7 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 8 |
+
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
|
| 9 |
+
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
| 10 |
+
|
| 11 |
+
# Define temperature presets (adjust as needed)
|
| 12 |
ZERO = 0
|
| 13 |
LOW = 0.2
|
| 14 |
MID = 0.7
|
| 15 |
HIGH = 1.2
|
| 16 |
|
| 17 |
+
# Factory functions for each provider
|
| 18 |
+
def create_openai_llm(model_name: str, temperature: float):
|
| 19 |
+
return ChatOpenAI(api_key=OPENAI_API_KEY, model_name=model_name, temperature=temperature)
|
| 20 |
+
|
| 21 |
+
def create_anthropic_llm(model_name: str, temperature: float):
|
| 22 |
+
return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name, temperature=temperature)
|
| 23 |
+
|
| 24 |
+
def create_deepseek_llm(model_name: str, temperature: float):
|
| 25 |
+
return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name, temperature=temperature)
|
| 26 |
+
|
| 27 |
llms = {
|
| 28 |
+
"gpt4o": create_openai_llm("gpt-4o", LOW),
|
| 29 |
+
"mini": create_openai_llm("gpt-4o-mini", LOW),
|
| 30 |
+
"gpt4o_high_temp": create_openai_llm("gpt-4o", HIGH),
|
| 31 |
+
"mini_high_temp": create_openai_llm("gpt-4o-mini", HIGH),
|
| 32 |
+
"o1": create_openai_llm("o1"),
|
| 33 |
+
"Claude35": create_anthropic_llm("claude-v1", LOW),
|
| 34 |
+
"R1": create_anthropic_llm("deepseek-reasoner", LOW),
|
| 35 |
}
|
config/templates.py
CHANGED
|
@@ -1,25 +1,29 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
"""),
|
| 12 |
-
])
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
""
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# config/templates.py
|
| 2 |
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
| 3 |
|
| 4 |
+
# Template to standardize the exercise description.
|
| 5 |
+
standardize_template = ChatPromptTemplate(
|
| 6 |
+
messages=[
|
| 7 |
+
("system", "You are an exercise standardizer. Convert the following exercise description into a standardized format."),
|
| 8 |
+
("human", "{user_input}")
|
| 9 |
+
],
|
| 10 |
+
input_variables=["user_input"]
|
| 11 |
+
)
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
# Template to generate a diagnosis from the standardized exercise.
|
| 14 |
+
diagnose_template = ChatPromptTemplate(
|
| 15 |
+
messages=[
|
| 16 |
+
("system", "You are a diagnostic assistant. Based on the standardized exercise description, provide a detailed diagnosis of potential issues and improvements."),
|
| 17 |
+
("human", "{standardized_exercise}")
|
| 18 |
+
],
|
| 19 |
+
input_variables=["standardized_exercise"]
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# Template for the distractors brainstorm (a single-step chain).
|
| 23 |
+
distractors_template = ChatPromptTemplate(
|
| 24 |
+
messages=[
|
| 25 |
+
("system", "You are a brainstorming assistant. Provide creative distractors and brainstorm ideas based on the user input."),
|
| 26 |
+
("human", "{user_input}")
|
| 27 |
+
],
|
| 28 |
+
input_variables=["user_input"]
|
| 29 |
+
)
|
requirements.txt
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
gradio>=3.42.0
|
| 2 |
openai==1.57.3
|
| 3 |
-
langchain-openai==0.
|
|
|
|
| 4 |
langchain-core==0.3.24
|
| 5 |
pydantic==2.9.2
|
| 6 |
typing_extensions==4.12.2
|
|
|
|
| 1 |
gradio>=3.42.0
|
| 2 |
openai==1.57.3
|
| 3 |
+
langchain-openai==0.3.4
|
| 4 |
+
langchain-anthropic==0.3.7
|
| 5 |
langchain-core==0.3.24
|
| 6 |
pydantic==2.9.2
|
| 7 |
typing_extensions==4.12.2
|