BtB-ExpC commited on
Commit
3640574
·
1 Parent(s): 7677cf9

sampling implemented

Browse files
app.py CHANGED
@@ -54,7 +54,8 @@ async def run_chain(chain_name: str, input_variables: dict, selected_model: str)
54
  return f"Error: {e}"
55
 
56
  # Async wrappers for each chain.
57
- async def run_diagnoser(user_query: str, chosen_model: str) -> str:
 
58
  # Fetch the DiagnoserChain configuration.
59
  config = chain_configs["diagnoser"]
60
 
@@ -67,7 +68,18 @@ async def run_diagnoser(user_query: str, chosen_model: str) -> str:
67
  llm_standardize=config["llm_standardize"], # Fixed: gpt4o-mini
68
  llm_diagnose=llms.get(chosen_model, config["llm_diagnose"]) # Override or fallback to default
69
  )
70
- return await chain_instance.run(user_query, exercise_format)
 
 
 
 
 
 
 
 
 
 
 
71
 
72
 
73
  async def run_distractors(user_query: str, model_choice: str) -> str:
@@ -116,31 +128,20 @@ with gr.Blocks() as demo:
116
  )
117
  with gr.Tabs():
118
  with gr.TabItem("🩺 Validate exercise"):
119
- # Insert custom CSS to enlarge the tab content
120
- gr.HTML(
121
- """
122
- <style>
123
- .tab-content {
124
- font-size: 1.2em; /* Increase text size */
125
- padding: 20px; /* Add more padding inside the tab */
126
- }
127
- </style>
128
- """
129
- )
130
-
131
  # Insert an HTML info icon with a tooltip at the top of the tab content.
132
  gr.HTML(
133
  """
134
  <div style="margin-bottom: 10px;">
135
- <span style="font-size: 1.5em; cursor: help;" title="Diagnoses potential issues for the given exercise(s).">
136
  ℹ️ <i>← mouseover for more info</i>
137
  </span>
138
  </div>
139
  """
140
  )
141
- diagnoser_input = gr.Textbox(label="Enter exercise(s) in any format", placeholder="Exercise body: <mc:exercise xmlns:mc=...")
142
  diagnoser_button = gr.Button("Submit")
143
- diagnoser_output = gr.Textbox(label="Diagnosis", interactive=False)
 
144
  with gr.TabItem("🤔 Generate distractors"):
145
  # Insert an HTML info icon with a tooltip at the top of the tab content.
146
  gr.HTML(
@@ -152,9 +153,9 @@ with gr.Blocks() as demo:
152
  </div>
153
  """
154
  )
155
- distractors_input = gr.Textbox(label="Enter exercise(s) in any format", placeholder="Paste your exercise here...")
156
  distractors_button = gr.Button("Submit")
157
- distractors_output = gr.Textbox(label="Response", interactive=False)
158
  with gr.TabItem("🚧 Generate learning objectives"):
159
  # Insert an HTML info icon with a tooltip at the top of the tab content.
160
  gr.HTML(
@@ -166,9 +167,9 @@ with gr.Blocks() as demo:
166
  </div>
167
  """
168
  )
169
- learning_objectives_input = gr.Textbox(label="Enter exercise(s) in any format", placeholder="Paste your study text here...")
170
  learning_objectives_button = gr.Button("Submit")
171
- learning_objectives_output = gr.Textbox(label="Response", interactive=False)
172
 
173
  # -------------------------------
174
  # Set Up Interactions
@@ -183,12 +184,12 @@ with gr.Blocks() as demo:
183
  diagnoser_button.click(
184
  fn=run_diagnoser,
185
  inputs=[diagnoser_input, model_choice, exercise_format, sampling_count],
186
- outputs=[diagnoser_output]
187
  )
188
  distractors_button.click(
189
  fn=run_distractors,
190
- inputs=[distractors_input, model_choice],
191
- outputs=[distractors_output]
192
  )
193
 
194
  # Launch the app.
 
54
  return f"Error: {e}"
55
 
56
  # Async wrappers for each chain.
57
+ async def run_diagnoser(user_query: str, chosen_model: str, exercise_format: str, sampling_count: str) -> str:
58
+ num_samples = int(sampling_count)
59
  # Fetch the DiagnoserChain configuration.
60
  config = chain_configs["diagnoser"]
61
 
 
68
  llm_standardize=config["llm_standardize"], # Fixed: gpt4o-mini
69
  llm_diagnose=llms.get(chosen_model, config["llm_diagnose"]) # Override or fallback to default
70
  )
71
+ responses = []
72
+ for i in range(num_samples):
73
+ response = await chain_instance.run(user_query, exercise_format)
74
+ responses.append(response)
75
+
76
+ # Create a list of individual output components (e.g. Textboxes) for each sample.
77
+ output_components = [
78
+ gr.Textbox(value=f"Response {i + 1}:\n{resp}", interactive=False)
79
+ for i, resp in enumerate(responses)
80
+ ]
81
+ # Return an update for the output column with these new children.
82
+ return gr.Column.update(children=output_components)
83
 
84
 
85
  async def run_distractors(user_query: str, model_choice: str) -> str:
 
128
  )
129
  with gr.Tabs():
130
  with gr.TabItem("🩺 Validate exercise"):
 
 
 
 
 
 
 
 
 
 
 
 
131
  # Insert an HTML info icon with a tooltip at the top of the tab content.
132
  gr.HTML(
133
  """
134
  <div style="margin-bottom: 10px;">
135
+ <span style="font-size: 1.5em; cursor: help;" title="Validate exercise: Diagnoses potential issues for the given exercise(s).">
136
  ℹ️ <i>← mouseover for more info</i>
137
  </span>
138
  </div>
139
  """
140
  )
141
+ diagnoser_input = gr.Textbox(label="Enter exercise(s) in any format", placeholder="Exercise body: <mc:exercise xmlns:mc= ...")
142
  diagnoser_button = gr.Button("Submit")
143
+ # Replace the single output textbox with a Column for multiple outputs:
144
+ diagnoser_responses = gr.Column(label="Response(s)")
145
  with gr.TabItem("🤔 Generate distractors"):
146
  # Insert an HTML info icon with a tooltip at the top of the tab content.
147
  gr.HTML(
 
153
  </div>
154
  """
155
  )
156
+ distractors_input = gr.Textbox(label="Enter exercise(s) in any format", placeholder="Stelling: Dit is een ..... voorbeeld van een stelling. A. Mooi B. Lelijk ...")
157
  distractors_button = gr.Button("Submit")
158
+ distractors_responses = gr.Column(label="Response(s)")
159
  with gr.TabItem("🚧 Generate learning objectives"):
160
  # Insert an HTML info icon with a tooltip at the top of the tab content.
161
  gr.HTML(
 
167
  </div>
168
  """
169
  )
170
+ learning_objectives_input = gr.Textbox(label="Enter exercise(s) in any format", placeholder="<h3>Infusie en infuussystemen</h3> <h4>Inleiding</h4> ...")
171
  learning_objectives_button = gr.Button("Submit")
172
+ learning_objectives_responses = gr.Column(label="Response(s)")
173
 
174
  # -------------------------------
175
  # Set Up Interactions
 
184
  diagnoser_button.click(
185
  fn=run_diagnoser,
186
  inputs=[diagnoser_input, model_choice, exercise_format, sampling_count],
187
+ outputs=[diagnoser_responses]
188
  )
189
  distractors_button.click(
190
  fn=run_distractors,
191
+ inputs=[distractors_input, model_choice, exercise_format, sampling_count],
192
+ outputs=[distractors_responses]
193
  )
194
 
195
  # Launch the app.
chains/diagnoser_chain.py CHANGED
@@ -21,7 +21,7 @@ class DiagnoserChain(BaseModel):
21
  else:
22
  mapping = {
23
  "Markdown": "Please format the exercise in Markdown.",
24
- "XML": "Please format the exercise in XML.",
25
  "Plaintext": "Please format the exercise in plain text."
26
  }
27
  formatting_instructions = mapping.get(exercise_format, "Please format the exercise in Markdown.")
 
21
  else:
22
  mapping = {
23
  "Markdown": "Please format the exercise in Markdown.",
24
+ "XML": "Please format the exercise in XML, using '",
25
  "Plaintext": "Please format the exercise in plain text."
26
  }
27
  formatting_instructions = mapping.get(exercise_format, "Please format the exercise in Markdown.")
config/chain_configs.py CHANGED
@@ -10,12 +10,12 @@ chain_configs = {
10
  "class": DiagnoserChain,
11
  "template_standardize": standardize_template,
12
  "template_diagnose": diagnose_template,
13
- "llm_standardize": llms["gpt4o-mini"], # Always fixed
14
- "llm_diagnose": llms["gpt4o"], # Default; can be replaced in UI
15
  },
16
  "distractors": {
17
  "class": DistractorsChain,
18
  "template": distractors_template,
19
- "llm": llms["gpt4o"],
20
  },
21
  }
 
10
  "class": DiagnoserChain,
11
  "template_standardize": standardize_template,
12
  "template_diagnose": diagnose_template,
13
+ "llm_standardize": llms["gpt-4o-mini"], # Always fixed
14
+ "llm_diagnose": llms["gpt-4o"], # Default; can be replaced in UI
15
  },
16
  "distractors": {
17
  "class": DistractorsChain,
18
  "template": distractors_template,
19
+ "llm": llms["gpt-4o"],
20
  },
21
  }
config/llm_config.py CHANGED
@@ -29,11 +29,11 @@ def create_deepseek_llm(model_name: str, temperature: float):
29
  return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name, temperature=temperature)
30
 
31
  llms = {
32
- "gpt4o": create_openai_llm("gpt-4o", LOW),
33
- "gpt4o-mini": create_openai_llm("gpt-4o-mini", LOW),
34
- "gpt4o_high_temp": create_openai_llm("gpt-4o", HIGH),
35
- "gpt4o-mini_high_temp": create_openai_llm("gpt-4o-mini", HIGH),
36
  "o1": create_openai_reasoning_llm("o1"),
37
- "Claude3.5": create_anthropic_llm("claude-3-5-sonnet-latest", LOW),
38
- "DeepseekR1🚧": create_anthropic_llm("deepseek-reasoner", LOW),
39
  }
 
29
  return ChatAnthropic(api_key=ANTHROPIC_API_KEY, model_name=model_name, temperature=temperature)
30
 
31
  llms = {
32
+ "gpt-4o": create_openai_llm("gpt-4o", LOW),
33
+ "gpt-4o-mini": create_openai_llm("gpt-4o-mini", LOW),
34
+ "gpt-4o_high_temp": create_openai_llm("gpt-4o", HIGH),
35
+ "gpt-4o-mini_high_temp": create_openai_llm("gpt-4o-mini", HIGH),
36
  "o1": create_openai_reasoning_llm("o1"),
37
+ "Claude 3.5": create_anthropic_llm("claude-3-5-sonnet-latest", LOW),
38
+ "Deepseek R1🚧": create_anthropic_llm("deepseek-reasoner", LOW),
39
  }