BtB-ExpC commited on
Commit
40aa0fe
·
1 Parent(s): 40f7f2d

diagnoser chain fix

Browse files
Files changed (3) hide show
  1. app.py +1 -0
  2. chains/diagnoser_chain.py +1 -1
  3. config/llm_config.py +1 -1
app.py CHANGED
@@ -71,6 +71,7 @@ async def run_diagnoser(user_query: str, chosen_model: str, exercise_format: str
71
  chain_instance = config["class"](
72
  template_standardize=config["template_standardize"],
73
  templates_diagnose=config["templates_diagnose"],
 
74
  llm_standardize=config["llm_standardize"], # Fixed: gpt4o-mini
75
  llm_diagnose=llms.get(chosen_model, config["llm_diagnose"]) # Override or fallback to default
76
  )
 
71
  chain_instance = config["class"](
72
  template_standardize=config["template_standardize"],
73
  templates_diagnose=config["templates_diagnose"],
74
+ template_diagnose_scorecard=config["template_diagnose_scorecard"],
75
  llm_standardize=config["llm_standardize"], # Fixed: gpt4o-mini
76
  llm_diagnose=llms.get(chosen_model, config["llm_diagnose"]) # Override or fallback to default
77
  )
chains/diagnoser_chain.py CHANGED
@@ -8,7 +8,7 @@ from config.exercise_standardizer import standardize_exercise
8
 
9
  class DiagnoserChain(BaseModel):
10
  template_standardize: ChatPromptTemplate
11
- llm_standardize: Any # Fixed LLM for step 1 and 3
12
  templates_diagnose: List[ChatPromptTemplate]
13
  llm_diagnose: Any # User-selectable LLM for step 2
14
  template_diagnose_scorecard: ChatPromptTemplate
 
8
 
9
  class DiagnoserChain(BaseModel):
10
  template_standardize: ChatPromptTemplate
11
+ llm_standardize: Any # Fixed LLM for step 1
12
  templates_diagnose: List[ChatPromptTemplate]
13
  llm_diagnose: Any # User-selectable LLM for step 2
14
  template_diagnose_scorecard: ChatPromptTemplate
config/llm_config.py CHANGED
@@ -30,7 +30,7 @@ def create_deepseek_llm(model_name: str, temperature: float):
30
 
31
  llms = {
32
  "GPT-4o": create_openai_llm("gpt-4o", LOW),
33
- "GPT-4o-mini": create_openai_llm("gpt-4o-mini", LOW),
34
  "GPT-4o_high_temp": create_openai_llm("gpt-4o", HIGH),
35
  "GPT-4o-mini_high_temp": create_openai_llm("gpt-4o-mini", HIGH),
36
  "GPT-4 Turbo": create_openai_llm("gpt-4-turbo-2024-04-09", HIGH),
 
30
 
31
  llms = {
32
  "GPT-4o": create_openai_llm("gpt-4o", LOW),
33
+ "GPT-4o-mini": create_openai_llm("gpt-4o-mini", ZERO),
34
  "GPT-4o_high_temp": create_openai_llm("gpt-4o", HIGH),
35
  "GPT-4o-mini_high_temp": create_openai_llm("gpt-4o-mini", HIGH),
36
  "GPT-4 Turbo": create_openai_llm("gpt-4-turbo-2024-04-09", HIGH),