BtB-ExpC commited on
Commit
1937b5f
·
1 Parent(s): 2e8cc42

correct input value names

Browse files
chains/distractors/runner.py CHANGED
@@ -30,7 +30,7 @@ async def run_distractors(
30
  # Fetch the DistractorsChain configuration.
31
  config = chain_configs["distractors"]
32
 
33
- # 1) Standardize the user query exactly once
34
  standardized_exercise = await standardize_exercise(
35
  user_query,
36
  exercise_format_distractors,
 
30
  # Fetch the DistractorsChain configuration.
31
  config = chain_configs["distractors"]
32
 
33
+ # 1) Standardize the user query once for all tracks
34
  standardized_exercise = await standardize_exercise(
35
  user_query,
36
  exercise_format_distractors,
chains/exercises/fluster_writing_chain.py CHANGED
@@ -15,7 +15,7 @@ class FlusterWritingChain(BaseModel):
15
  default_llm_a: Any
16
  default_llm_b: Any
17
 
18
- template_refine_distractors: ChatPromptTemplate
19
  llm_refine: Any
20
 
21
  template_sanitize_fluster: ChatPromptTemplate
 
15
  default_llm_a: Any
16
  default_llm_b: Any
17
 
18
+ template_refine_fluster: ChatPromptTemplate
19
  llm_refine: Any
20
 
21
  template_sanitize_fluster: ChatPromptTemplate
chains/exercises/runner.py CHANGED
@@ -27,7 +27,7 @@ async def run_fluster(
27
  llm_a = llms.get(model_choice_1, config["default_llm_a"])
28
  llm_b = llms.get(model_choice_2, config["default_llm_b"])
29
 
30
- template_refine = config["template_refine_distractors"]
31
  llm_refine = config["llm_refine"]
32
 
33
  template_sanitize = config["template_sanitize"]
@@ -60,17 +60,17 @@ async def run_fluster(
60
  gen_llm = llm_b
61
 
62
  # 1) Generate
63
- gen_msg = await gen_template.aformat_prompt(user_input=user_input_text)
64
  gen_resp = await gen_llm.ainvoke(gen_msg.to_messages())
65
  write_fluster_result = getattr(gen_resp, "content", gen_resp)
66
 
67
  # 2) Refine distractors
68
- refine_msg = await template_refine.aformat_prompt(raw_exercise=write_fluster_result)
69
  refine_resp = await llm_refine.ainvoke(refine_msg.to_messages())
70
  refined_output = getattr(refine_resp, "content", refine_resp)
71
 
72
  # 3) Sanitize
73
- sanitize_msg = await template_sanitize.aformat_prompt(raw_output=refined_output)
74
  sanitize_resp = await llm_sanitize.ainvoke(sanitize_msg.to_messages())
75
  sanitized_output = getattr(sanitize_resp, "content", sanitize_resp)
76
 
 
27
  llm_a = llms.get(model_choice_1, config["default_llm_a"])
28
  llm_b = llms.get(model_choice_2, config["default_llm_b"])
29
 
30
+ template_refine = config["template_refine_fluster"]
31
  llm_refine = config["llm_refine"]
32
 
33
  template_sanitize = config["template_sanitize"]
 
60
  gen_llm = llm_b
61
 
62
  # 1) Generate
63
+ gen_msg = await gen_template.aformat_prompt(learning_objective=user_input_text)
64
  gen_resp = await gen_llm.ainvoke(gen_msg.to_messages())
65
  write_fluster_result = getattr(gen_resp, "content", gen_resp)
66
 
67
  # 2) Refine distractors
68
+ refine_msg = await template_refine.aformat_prompt(write_fluster_result=write_fluster_result)
69
  refine_resp = await llm_refine.ainvoke(refine_msg.to_messages())
70
  refined_output = getattr(refine_resp, "content", refine_resp)
71
 
72
  # 3) Sanitize
73
+ sanitize_msg = await template_sanitize.aformat_prompt(refinement_result=refined_output)
74
  sanitize_resp = await llm_sanitize.ainvoke(sanitize_msg.to_messages())
75
  sanitized_output = getattr(sanitize_resp, "content", sanitize_resp)
76
 
config/chain_configs.py CHANGED
@@ -15,7 +15,7 @@ from config.templates import (
15
  template_sanitize_learning_objectives,
16
  template_write_fluster_a,
17
  template_write_fluster_b,
18
- template_refine_distractors,
19
  template_sanitize_fluster,
20
  )
21
  from chains.diagnoser.diagnoser_chain import DiagnoserChain
@@ -72,7 +72,7 @@ chain_configs = {
72
  "default_llm_a": llms["o1 (high reasoning_effort)"],
73
  "default_llm_b": llms["o3-mini (high reasoning_effort)"],
74
  # Prompt & LLM for the refine-distractors step
75
- "template_refine_distractors": template_refine_distractors,
76
  "llm_refine": llms["o1 (high reasoning_effort)"],
77
  "template_sanitize": template_sanitize_fluster,
78
  "llm_sanitize": "GPT-4o-mini (zero temp)",
 
15
  template_sanitize_learning_objectives,
16
  template_write_fluster_a,
17
  template_write_fluster_b,
18
+ template_refine_fluster,
19
  template_sanitize_fluster,
20
  )
21
  from chains.diagnoser.diagnoser_chain import DiagnoserChain
 
72
  "default_llm_a": llms["o1 (high reasoning_effort)"],
73
  "default_llm_b": llms["o3-mini (high reasoning_effort)"],
74
  # Prompt & LLM for the refine-distractors step
75
+ "template_refine_fluster": template_refine_fluster,
76
  "llm_refine": llms["o1 (high reasoning_effort)"],
77
  "template_sanitize": template_sanitize_fluster,
78
  "llm_sanitize": "GPT-4o-mini (zero temp)",
config/system_prompt_texts.py CHANGED
@@ -775,7 +775,7 @@ Be precise. Shun absolute terms like 'never' or 'always', as they imply complete
775
  """
776
 
777
 
778
- template_refine_distractors_text = """
779
  Given some source data containing exercises, critically analyze this with the goal of refining and improving the exercises.
780
  Play devil's advocate here: in what ways is this version of the exercises not perfect? Do some reasoning about this, and then give the improved exercises. If you didn't find anything to improve, just return the exercises as they are.
781
  """
 
775
  """
776
 
777
 
778
+ template_refine_fluster_text = """
779
  Given some source data containing exercises, critically analyze this with the goal of refining and improving the exercises.
780
  Play devil's advocate here: in what ways is this version of the exercises not perfect? Do some reasoning about this, and then give the improved exercises. If you didn't find anything to improve, just return the exercises as they are.
781
  """
config/templates.py CHANGED
@@ -17,7 +17,7 @@ from config.system_prompt_texts import (
17
  template_sanitize_learning_objectives_text,
18
  template_write_fluster_a_text,
19
  template_write_fluster_b_text,
20
- template_refine_distractors_text,
21
  template_sanitize_fluster_text,
22
  )
23
 
@@ -230,9 +230,9 @@ template_write_fluster_b = ChatPromptTemplate(
230
  )
231
 
232
 
233
- template_refine_distractors = ChatPromptTemplate(
234
  messages=[
235
- ("system", template_refine_distractors_text),
236
  ("human", "Here's the source data:\n"
237
  "{write_fluster_result}")
238
  ],
 
17
  template_sanitize_learning_objectives_text,
18
  template_write_fluster_a_text,
19
  template_write_fluster_b_text,
20
+ template_refine_fluster_text,
21
  template_sanitize_fluster_text,
22
  )
23
 
 
230
  )
231
 
232
 
233
+ template_refine_fluster = ChatPromptTemplate(
234
  messages=[
235
+ ("system", template_refine_fluster_text),
236
  ("human", "Here's the source data:\n"
237
  "{write_fluster_result}")
238
  ],