trying solution for rewrite + fix
Browse files
chains/exercises/run_fluster_with_diagnosis.py
CHANGED
|
@@ -236,55 +236,26 @@ async def diagnose_exercise(ex: Exercise) -> str:
|
|
| 236 |
|
| 237 |
from pydantic import ValidationError
|
| 238 |
|
| 239 |
-
async def fix_exercise(
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
)
|
| 260 |
-
messages = prompt_value.to_messages()
|
| 261 |
-
|
| 262 |
-
# 3) Invoke the LLM
|
| 263 |
-
fix_resp = await llm_fix.ainvoke(messages)
|
| 264 |
-
raw_content = getattr(fix_resp, "content", fix_resp)
|
| 265 |
-
|
| 266 |
-
# 4) We can parse the LLM result if we want a structured object
|
| 267 |
-
# For example, if we told the LLM to return JSON that matches the Exercise schema:
|
| 268 |
-
# ex_fixed_data = parse the JSON
|
| 269 |
-
# ex_fixed = Exercise.model_validate(ex_fixed_data)
|
| 270 |
-
#
|
| 271 |
-
# Or if the LLM just returned plain text, you can do a simpler approach:
|
| 272 |
-
# For now, as a placeholder, let's just say we re-build the prompt field:
|
| 273 |
-
|
| 274 |
-
# If you do structured output, do something like:
|
| 275 |
-
# try:
|
| 276 |
-
# ex_dict = json.loads(raw_content)
|
| 277 |
-
# ex_fixed = Exercise.model_validate(ex_dict)
|
| 278 |
-
# except (JSONDecodeError, ValidationError) as e:
|
| 279 |
-
# # fallback if needed
|
| 280 |
-
# ex_fixed = ex.copy(update={"prompt": ex.prompt + " (fallback fix)"})
|
| 281 |
-
|
| 282 |
-
# For the sake of example, let's do a naive approach:
|
| 283 |
-
ex_fixed = ex.copy(update={"prompt": raw_content})
|
| 284 |
-
|
| 285 |
-
return ex_fixed
|
| 286 |
-
|
| 287 |
-
|
| 288 |
|
| 289 |
|
| 290 |
|
|
|
|
| 236 |
|
| 237 |
from pydantic import ValidationError
|
| 238 |
|
| 239 |
+
async def fix_exercise(ex: Exercise, diag_str: str, cfg: dict) -> Exercise:
|
| 240 |
+
tmpl_fix = cfg["template_fix_exercise"]
|
| 241 |
+
llm_fix = cfg["llm_fix_exercise"]
|
| 242 |
+
llm_cast = cfg["llm_structurize"] # already in chain_configs
|
| 243 |
+
|
| 244 |
+
# 1️⃣ first call – creative rewrite
|
| 245 |
+
prompt = await tmpl_fix.aformat_prompt(
|
| 246 |
+
exercise_text = exercise_to_string(ex),
|
| 247 |
+
diagnosis = diag_str
|
| 248 |
+
)
|
| 249 |
+
raw = (await llm_fix.ainvoke(prompt.to_messages())).content
|
| 250 |
+
|
| 251 |
+
# 2️⃣ second call – cast to schema
|
| 252 |
+
try:
|
| 253 |
+
ex_fixed = await llm_cast.with_structured_output(Exercise).ainvoke(
|
| 254 |
+
[("user", raw)] # minimal prompt: just the text
|
| 255 |
+
)
|
| 256 |
+
return ex_fixed
|
| 257 |
+
except Exception:
|
| 258 |
+
return ex.copy(update={"prompt": raw})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
|
| 260 |
|
| 261 |
|