BtB-ExpC commited on
Commit
b60cd75
·
1 Parent(s): c343e35

standardized_format_state

Browse files
chains/learning_objectives_generator/learning_objectives_chain.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/chains/learning_objectives/learning_objectives_chain.py
2
+ import asyncio
3
+ from pydantic import BaseModel
4
+ from typing import Any
5
+ from langchain_core.prompts.chat import ChatPromptTemplate
6
+
7
+ class LearningObjectivesChain(BaseModel):
8
+ """
9
+ Orchestrates multi-step generation of learning objectives:
10
+ 1) Two parallel calls to 'learning_objective_generator' (LLM1, LLM2).
11
+ 2) Combine those outputs.
12
+ 3) 'learning_objective_eliminator' on the combined output (Main LLM).
13
+ 4) 'learning_objective_finetuner' on the output of that (Main LLM).
14
+ 5) 'learning_objective_presenter' to finalize (Main LLM).
15
+
16
+ Each step can have its own ChatPromptTemplate and uses the relevant LLM.
17
+
18
+ If you want a separate alt LLM for step #3 or #4, just expand as needed.
19
+ """
20
+
21
+ # Templates
22
+ template_generator: ChatPromptTemplate
23
+ template_eliminator: ChatPromptTemplate
24
+ template_finetuner: ChatPromptTemplate
25
+ template_presenter: ChatPromptTemplate
26
+
27
+ # LLM references
28
+ llm_main: Any # The "Main LLM"
29
+ llm_alt: Any # The "Other LLM" used only for the second generator prompt
30
+
31
+ async def run(self, standardized_studytext: str) -> str:
32
+ """
33
+ Main pipeline for a single run. The 'standardized_studytext' is assumed
34
+ to be already standardized outside of this chain, so we jump straight to generation.
35
+ """
36
+
37
+ # 1) Two parallel calls to learning_objective_generator
38
+ # with different LLMs (llm_main vs. llm_alt).
39
+
40
+ async def run_generator(llm, llm_label) -> str:
41
+ # Format the generator prompt
42
+ prompt = await self.template_generator.aformat_prompt(
43
+ standardized_studytext=standardized_studytext, # possibly other variables
44
+ llm_label=llm_label
45
+ )
46
+ messages = prompt.to_messages()
47
+ response = await llm.ainvoke(messages)
48
+ return getattr(response, "content", response)
49
+
50
+ # Launch in parallel
51
+ gen_main_task = asyncio.create_task(run_generator(self.llm_main, "MAIN"))
52
+ gen_alt_task = asyncio.create_task(run_generator(self.llm_alt, "ALT"))
53
+
54
+ # Wait for both to finish
55
+ gen_main_result, gen_alt_result = await asyncio.gather(gen_main_task, gen_alt_task)
56
+
57
+ # Combine them
58
+ combined_generators = f"[GEN MAIN]\n{gen_main_result}\n\n[GEN ALT]\n{gen_alt_result}"
59
+
60
+ # 2) learning_objective_eliminator (llm_main)
61
+ prompt_eliminate = await self.template_eliminator.aformat_prompt(
62
+ combined_generators=combined_generators,
63
+ standardized_studytext=standardized_studytext
64
+ )
65
+ elim_messages = prompt_eliminate.to_messages()
66
+ elim_response = await self.llm_main.ainvoke(elim_messages)
67
+ elim_output = getattr(elim_response, "content", elim_response)
68
+
69
+ # 3) learning_objective_finetuner (llm_main)
70
+ prompt_fine = await self.template_finetuner.aformat_prompt(
71
+ elimination_output=elim_output,
72
+ standardized_studytext=standardized_studytext
73
+ )
74
+ fine_messages = prompt_fine.to_messages()
75
+ fine_response = await self.llm_main.ainvoke(fine_messages)
76
+ fine_output = getattr(fine_response, "content", fine_response)
77
+
78
+ # 4) learning_objective_presenter (llm_main)
79
+ prompt_present = await self.template_presenter.aformat_prompt(
80
+ finetuned_output=fine_output,
81
+ standardized_studytext=standardized_studytext
82
+ )
83
+ present_messages = prompt_present.to_messages()
84
+ present_response = await self.llm_main.ainvoke(present_messages)
85
+ final_output = getattr(present_response, "content", present_response)
86
+
87
+ return final_output
88
+
89
+ class Config:
90
+ arbitrary_types_allowed = True
chains/learning_objectives_generator/runner.py ADDED
File without changes
config/exercise_standardizer.py CHANGED
@@ -1,12 +1,14 @@
1
  # exercise_standardizer.py
 
2
  from langchain_core.prompts import ChatPromptTemplate
3
  from typing import Any
4
  from config.format_mappings import FORMAT_MAPPINGS
 
 
5
 
6
  async def standardize_exercise(user_query: str, exercise_format: str, template: ChatPromptTemplate, llm: Any) -> str:
7
  """
8
- Standardizes an exercise's format using the specified template and LLM.
9
- Uses token streaming for efficiency.
10
  """
11
  if exercise_format == "Raw (original)":
12
  return user_query # No transformation needed
@@ -25,4 +27,7 @@ async def standardize_exercise(user_query: str, exercise_format: str, template:
25
  response = await llm.ainvoke(std_messages)
26
  standardized_exercise = getattr(response, "content", response)
27
 
 
 
28
  return standardized_exercise
 
 
1
  # exercise_standardizer.py
2
+ import gradio as gr
3
  from langchain_core.prompts import ChatPromptTemplate
4
  from typing import Any
5
  from config.format_mappings import FORMAT_MAPPINGS
6
+ from utils.state_manager import standardized_format_state
7
+
8
 
9
  async def standardize_exercise(user_query: str, exercise_format: str, template: ChatPromptTemplate, llm: Any) -> str:
10
  """
11
+ Standardizes an exercise's format using the specified template and LLM, and updates the UI via standardized_format_state.
 
12
  """
13
  if exercise_format == "Raw (original)":
14
  return user_query # No transformation needed
 
27
  response = await llm.ainvoke(std_messages)
28
  standardized_exercise = getattr(response, "content", response)
29
 
30
+ standardized_format_state.value = standardized_exercise
31
+
32
  return standardized_exercise
33
+
main.py CHANGED
@@ -1,7 +1,7 @@
1
  # main.py
2
  import gradio as gr
3
  import logging
4
-
5
  from app.ui.diagnoser_tab import build_diagnoser_tab
6
  from app.ui.distractors_tab import build_distractors_tab
7
  from chains.diagnoser.runner import run_diagnoser
@@ -68,6 +68,9 @@ with gr.Blocks() as interface:
68
 
69
  # --- Main App (initially hidden) ---
70
  with gr.Column(visible=False, elem_id="main_app") as app_container:
 
 
 
71
  gr.Markdown("## Pick the tab for your task of choice")
72
 
73
  with gr.Tabs():
@@ -123,6 +126,12 @@ with gr.Blocks() as interface:
123
  outputs=[login_container, app_container, login_error]
124
  )
125
 
 
 
 
 
 
 
126
  diagnoser_button.click(
127
  fn=run_diagnoser,
128
  inputs=[diagnoser_input, model_choice_diagnose, exercise_format_diagnose, sampling_count_diagnose],
 
1
  # main.py
2
  import gradio as gr
3
  import logging
4
+ from utils.state_manager import standardized_format_state
5
  from app.ui.diagnoser_tab import build_diagnoser_tab
6
  from app.ui.distractors_tab import build_distractors_tab
7
  from chains.diagnoser.runner import run_diagnoser
 
68
 
69
  # --- Main App (initially hidden) ---
70
  with gr.Column(visible=False, elem_id="main_app") as app_container:
71
+ # --- Standardized Exercise/Studytext Display (Initially Invisible Because it's empty) ---
72
+ standardized_format_display = gr.Markdown("", visible=True)
73
+
74
  gr.Markdown("## Pick the tab for your task of choice")
75
 
76
  with gr.Tabs():
 
126
  outputs=[login_container, app_container, login_error]
127
  )
128
 
129
+ standardized_format_state.change(
130
+ fn=lambda text: gr.update(value="#### Here's the most recent standardized format" + f"\n{standardized_format_state}"), # ✅ Only update value, no need to toggle visibility
131
+ inputs=[standardized_format_state],
132
+ outputs=[standardized_format_display]
133
+ )
134
+
135
  diagnoser_button.click(
136
  fn=run_diagnoser,
137
  inputs=[diagnoser_input, model_choice_diagnose, exercise_format_diagnose, sampling_count_diagnose],
test exercises.md → test samples.md RENAMED
@@ -1,4 +1,5 @@
1
- ## Fine
 
2
  ### 1
3
  Theorie:
4
  In iedere gemeente zijn maatschappelijk werkers werkzaam.
@@ -7,7 +8,7 @@ Vraag:
7
  Wat is de hoofdtaak van maatschappelijk werkers?
8
 
9
  1. Mensen verwijzen naar professionele hulpverlening
10
- 2. Assisteren van de huisarts
11
  3. De levenskwaliteit van mensen verbeteren
12
  4. Contact onderhouden met hulpverlenende instanties
13
 
@@ -16,6 +17,20 @@ Correct antwoord:
16
 
17
 
18
  ### 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  ### 3
20
 
21
  ---
@@ -51,3 +66,14 @@ Correct antwoord:
51
  ### 1
52
  ### 2
53
  ### 3
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Exercises
2
+ ## False positives
3
  ### 1
4
  Theorie:
5
  In iedere gemeente zijn maatschappelijk werkers werkzaam.
 
8
  Wat is de hoofdtaak van maatschappelijk werkers?
9
 
10
  1. Mensen verwijzen naar professionele hulpverlening
11
+ 2. **Assisteren van de huisarts**
12
  3. De levenskwaliteit van mensen verbeteren
13
  4. Contact onderhouden met hulpverlenende instanties
14
 
 
17
 
18
 
19
  ### 2
20
+
21
+ ## False negatives
22
+ ### 1
23
+ Vraag:
24
+ Wat betekent observeren in de context van zorgverlening?
25
+
26
+ 1. Het plannen van zorgtaken
27
+ 2. Het uitvoeren van medische handelingen
28
+ 3. Het geven van je mening over de situatie
29
+ 4. **Bewust letten op wat er om je heen gebeurt**
30
+
31
+ Correct antwoord:
32
+ 4. Bewust letten op wat er om je heen gebeurt
33
+ 5.
34
  ### 3
35
 
36
  ---
 
66
  ### 1
67
  ### 2
68
  ### 3
69
+
70
+ # Learning objectives
71
+ ## 1
72
+ ### Text
73
+ Het hart bestaat uit vier holtes: twee boezems aan de bovenkant en twee kamers aan de onderkant.
74
+ ### Good LOs
75
+ - De student weet dat het hart uit vier holtes bestaat.
76
+ - De student weet dat het hart uit twee boezems bestaat.
77
+ - De student weet dat de boezems van het hart aan de bovenkant zitten.
78
+ - De student weet dat het hart uit twee kamers bestaat.
79
+ - De student weet dat de kamers van het hart aan de onderkant zitten.
utils/state_manager.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # utils.state_manager.py
2
+ import gradio as gr
3
+
4
+ # Global state variable for standardized formats
5
+ standardized_format_state = gr.State("")