okokok, changing buttons clicks after all
Browse files- chains/diagnoser/runner.py +8 -8
- chains/distractors/runner.py +1 -1
- config/llm_config.py +3 -3
- main.py +3 -11
chains/diagnoser/runner.py
CHANGED
|
@@ -6,7 +6,7 @@ from app.helpers.exercise_standardizer import standardize_exercise
|
|
| 6 |
from config.llm_config import llms
|
| 7 |
|
| 8 |
|
| 9 |
-
async def run_diagnoser(user_query: str,
|
| 10 |
"""
|
| 11 |
Diagnose exercise(s) in parallel using a configured DiagnoserChain.
|
| 12 |
|
|
@@ -18,15 +18,15 @@ async def run_diagnoser(user_query: str, model_choice_validate: str, exercise_fo
|
|
| 18 |
|
| 19 |
Args:
|
| 20 |
user_query (str): Raw exercise data submitted by the user.
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
|
| 25 |
Returns:
|
| 26 |
tuple: A tuple of length 10, each containing a diagnosis result (or empty string if not enough samples).
|
| 27 |
"""
|
| 28 |
# figure out how many times to run
|
| 29 |
-
num_samples = int("".join(filter(str.isdigit,
|
| 30 |
|
| 31 |
# Fetch the DiagnoserChain configuration.
|
| 32 |
config = chain_configs["diagnoser"]
|
|
@@ -34,7 +34,7 @@ async def run_diagnoser(user_query: str, model_choice_validate: str, exercise_fo
|
|
| 34 |
# 1) Standardize the user query exactly once
|
| 35 |
standardized_exercise = await standardize_exercise(
|
| 36 |
user_query,
|
| 37 |
-
|
| 38 |
config["template_standardize"], # Only if you kept them in config
|
| 39 |
config["llm_standardize"]
|
| 40 |
)
|
|
@@ -42,7 +42,7 @@ async def run_diagnoser(user_query: str, model_choice_validate: str, exercise_fo
|
|
| 42 |
# 2) Instantiate the DiagnoserChain using the user-selected LLM for diagnosing
|
| 43 |
chain_instance = config["class"](
|
| 44 |
templates_diagnose=config["templates_diagnose"],
|
| 45 |
-
llm_diagnose=llms.get(
|
| 46 |
template_diagnose_scorecard=config["template_diagnose_scorecard"],
|
| 47 |
llm_4o_mini=config["llm_4o_mini"],
|
| 48 |
llm_4o=config["llm_4o"]
|
|
@@ -61,4 +61,4 @@ async def run_diagnoser(user_query: str, model_choice_validate: str, exercise_fo
|
|
| 61 |
all_responses = list(responses) + [""] * (10 - len(responses))
|
| 62 |
|
| 63 |
# Return a tuple of exactly 5 responses.
|
| 64 |
-
return tuple(all_responses)
|
|
|
|
| 6 |
from config.llm_config import llms
|
| 7 |
|
| 8 |
|
| 9 |
+
async def run_diagnoser(user_query: str, model_choice_diagnose: str, exercise_format_diagnose: str, sampling_count_diagnose: str) -> tuple:
|
| 10 |
"""
|
| 11 |
Diagnose exercise(s) in parallel using a configured DiagnoserChain.
|
| 12 |
|
|
|
|
| 18 |
|
| 19 |
Args:
|
| 20 |
user_query (str): Raw exercise data submitted by the user.
|
| 21 |
+
model_choice_diagnose (str): The key/name of the chosen LLM for diagnosing.
|
| 22 |
+
exercise_format_diagnose (str): The desired format for standardizing the exercise.
|
| 23 |
+
sampling_count_diagnose (str): A string representing how many diagnoses to run concurrently (e.g., "3").
|
| 24 |
|
| 25 |
Returns:
|
| 26 |
tuple: A tuple of length 10, each containing a diagnosis result (or empty string if not enough samples).
|
| 27 |
"""
|
| 28 |
# figure out how many times to run
|
| 29 |
+
num_samples = int("".join(filter(str.isdigit, sampling_count_diagnose)))
|
| 30 |
|
| 31 |
# Fetch the DiagnoserChain configuration.
|
| 32 |
config = chain_configs["diagnoser"]
|
|
|
|
| 34 |
# 1) Standardize the user query exactly once
|
| 35 |
standardized_exercise = await standardize_exercise(
|
| 36 |
user_query,
|
| 37 |
+
exercise_format_diagnose,
|
| 38 |
config["template_standardize"], # Only if you kept them in config
|
| 39 |
config["llm_standardize"]
|
| 40 |
)
|
|
|
|
| 42 |
# 2) Instantiate the DiagnoserChain using the user-selected LLM for diagnosing
|
| 43 |
chain_instance = config["class"](
|
| 44 |
templates_diagnose=config["templates_diagnose"],
|
| 45 |
+
llm_diagnose=llms.get(model_choice_diagnose, config["llm_diagnose"]),
|
| 46 |
template_diagnose_scorecard=config["template_diagnose_scorecard"],
|
| 47 |
llm_4o_mini=config["llm_4o_mini"],
|
| 48 |
llm_4o=config["llm_4o"]
|
|
|
|
| 61 |
all_responses = list(responses) + [""] * (10 - len(responses))
|
| 62 |
|
| 63 |
# Return a tuple of exactly 5 responses.
|
| 64 |
+
return tuple(all_responses) + (standardized_exercise,)
|
chains/distractors/runner.py
CHANGED
|
@@ -57,4 +57,4 @@ async def run_distractors(
|
|
| 57 |
# 4) Pad up to 10 outputs to correspond to 10 response fields
|
| 58 |
all_responses = list(results) + [""] * (10 - len(results))
|
| 59 |
|
| 60 |
-
return tuple(all_responses)
|
|
|
|
| 57 |
# 4) Pad up to 10 outputs to correspond to 10 response fields
|
| 58 |
all_responses = list(results) + [""] * (10 - len(results))
|
| 59 |
|
| 60 |
+
return tuple(all_responses) + (standardized_exercise)
|
config/llm_config.py
CHANGED
|
@@ -45,9 +45,9 @@ llms = {
|
|
| 45 |
|
| 46 |
# OpenAI reasoning models (no temperature)
|
| 47 |
"o1": create_openai_reasoning_llm("o1-2024-12-17"),
|
| 48 |
-
"o3-mini (low
|
| 49 |
-
"o3-mini (medium
|
| 50 |
-
"o3-mini (high
|
| 51 |
|
| 52 |
# Anthropic models (Claude)
|
| 53 |
"Claude 3.5 (zero temp)": create_anthropic_llm("claude-3-5-sonnet-latest", ZERO),
|
|
|
|
| 45 |
|
| 46 |
# OpenAI reasoning models (no temperature)
|
| 47 |
"o1": create_openai_reasoning_llm("o1-2024-12-17"),
|
| 48 |
+
"o3-mini (low reasoning_effort)": create_openai_reasoning_llm("o3-mini", reasoning_effort="low"),
|
| 49 |
+
"o3-mini (medium reasoning_effort)": create_openai_reasoning_llm("o3-mini", reasoning_effort="medium"),
|
| 50 |
+
"o3-mini (high reasoning_effort)": create_openai_reasoning_llm("o3-mini", reasoning_effort="high"),
|
| 51 |
|
| 52 |
# Anthropic models (Claude)
|
| 53 |
"Claude 3.5 (zero temp)": create_anthropic_llm("claude-3-5-sonnet-latest", ZERO),
|
main.py
CHANGED
|
@@ -93,30 +93,22 @@ with gr.Blocks() as interface:
|
|
| 93 |
diagnoser_button.click(
|
| 94 |
fn=run_diagnoser,
|
| 95 |
inputs=[diagnoser_input, model_choice_diagnose, exercise_format_diagnose, sampling_count_diagnose],
|
| 96 |
-
outputs=diagnoser_responses
|
| 97 |
)
|
| 98 |
|
| 99 |
distractors_button.click(
|
| 100 |
fn=run_distractors,
|
| 101 |
inputs=[
|
| 102 |
-
#
|
| 103 |
-
distractors_input,
|
| 104 |
-
# 2) model_choice_distractors_1
|
| 105 |
model_choice_distractors_1,
|
| 106 |
-
# 3) model_choice_distractors_2
|
| 107 |
model_choice_distractors_2,
|
| 108 |
-
# 4) model_choice_distractors_3
|
| 109 |
model_choice_distractors_3,
|
| 110 |
-
# 5) exercise_format_distractors
|
| 111 |
exercise_format_distractors,
|
| 112 |
-
# 6) sampling_count_distractors
|
| 113 |
sampling_count_distractors,
|
| 114 |
-
# 7) intermediate_distractors_specification
|
| 115 |
intermediate_distractors_specification,
|
| 116 |
-
# 8) final_distractors_specification
|
| 117 |
final_distractors_specification,
|
| 118 |
],
|
| 119 |
-
outputs=distractors_responses
|
| 120 |
)
|
| 121 |
|
| 122 |
# Launch the app.
|
|
|
|
| 93 |
diagnoser_button.click(
|
| 94 |
fn=run_diagnoser,
|
| 95 |
inputs=[diagnoser_input, model_choice_diagnose, exercise_format_diagnose, sampling_count_diagnose],
|
| 96 |
+
outputs=diagnoser_responses + [dummy_state]
|
| 97 |
)
|
| 98 |
|
| 99 |
distractors_button.click(
|
| 100 |
fn=run_distractors,
|
| 101 |
inputs=[
|
| 102 |
+
distractors_input, # user query
|
|
|
|
|
|
|
| 103 |
model_choice_distractors_1,
|
|
|
|
| 104 |
model_choice_distractors_2,
|
|
|
|
| 105 |
model_choice_distractors_3,
|
|
|
|
| 106 |
exercise_format_distractors,
|
|
|
|
| 107 |
sampling_count_distractors,
|
|
|
|
| 108 |
intermediate_distractors_specification,
|
|
|
|
| 109 |
final_distractors_specification,
|
| 110 |
],
|
| 111 |
+
outputs=distractors_responses + [dummy_state]
|
| 112 |
)
|
| 113 |
|
| 114 |
# Launch the app.
|