github-actions[bot] commited on
Commit
f09ce8f
Β·
1 Parent(s): 2e03e76

Sync from GitHub 1379732458575bf35217d4930a784dcfb30c8bf4

Browse files
app.py CHANGED
@@ -1005,8 +1005,8 @@ async def generate_report_for_notebook(
1005
  topic_focus=payload.topic_focus,
1006
  )
1007
  except Exception as exc:
1008
- crud.update_artifact(db, artifact.id, status="failed", error_message=str(exc))
1009
- raise HTTPException(status_code=500, detail=f"Report generation failed: {exc}") from exc
1010
 
1011
  if "error" in result:
1012
  artifact = crud.update_artifact(db, artifact.id, status="failed", error_message=result["error"])
 
1005
  topic_focus=payload.topic_focus,
1006
  )
1007
  except Exception as exc:
1008
+ artifact = crud.update_artifact(db, artifact.id, status="failed", error_message=str(exc))
1009
+ return _artifact_response(artifact)
1010
 
1011
  if "error" in result:
1012
  artifact = crud.update_artifact(db, artifact.id, status="failed", error_message=result["error"])
frontend/app.py CHANGED
@@ -450,6 +450,37 @@ def choose_workspace_section(notebook_id: int) -> str:
450
  return str(selected)
451
 
452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453
  inject_theme()
454
 
455
  if "page" not in st.session_state:
@@ -1113,78 +1144,74 @@ elif page == "Notebooks":
1113
  render_metric_card("Failed", failed_count)
1114
 
1115
  st.dataframe(artifacts, use_container_width=True, hide_index=True)
1116
- artifact_options = {
1117
- build_artifact_option_label(a): a
1118
- for a in artifacts
1119
- if isinstance(a, dict) and "id" in a
1120
- }
1121
- selected_artifact_label = st.selectbox(
1122
- "Select artifact",
1123
- options=list(artifact_options.keys()),
1124
- key="selected_artifact_label",
1125
  )
1126
- selected_artifact = artifact_options[selected_artifact_label]
1127
- artifact_id = int(selected_artifact["id"])
1128
- artifact_type = str(selected_artifact.get("type", ""))
1129
- artifact_status = str(selected_artifact.get("status", ""))
1130
- artifact_content = selected_artifact.get("content")
1131
- artifact_error = selected_artifact.get("error_message")
1132
-
1133
- status_col, type_col = st.columns([1, 4])
1134
- with status_col:
1135
- render_status_pill(artifact_status)
1136
- with type_col:
1137
- st.caption(f"Artifact type: {artifact_type}")
1138
-
1139
- if artifact_error:
1140
- st.error(str(artifact_error))
1141
-
1142
- if artifact_type == "report" and artifact_content:
1143
- st.markdown("### Report Preview")
1144
- st.markdown(str(artifact_content))
1145
- st.download_button(
1146
- "Download report (.md)",
1147
- data=str(artifact_content).encode("utf-8"),
1148
- file_name=f"report_{artifact_id}.md",
1149
- mime="text/markdown",
1150
- )
1151
- elif artifact_type == "quiz" and artifact_content:
1152
- st.markdown("### Quiz Preview")
1153
- st.markdown(str(artifact_content))
1154
- st.download_button(
1155
- "Download quiz (.md)",
1156
- data=str(artifact_content).encode("utf-8"),
1157
- file_name=f"quiz_{artifact_id}.md",
1158
- mime="text/markdown",
1159
- )
1160
- elif artifact_type == "podcast":
1161
- if artifact_content:
1162
- st.markdown("### Transcript")
1163
  st.markdown(str(artifact_content))
1164
  st.download_button(
1165
- "Download transcript (.md)",
1166
  data=str(artifact_content).encode("utf-8"),
1167
- file_name=f"podcast_transcript_{artifact_id}.md",
1168
  mime="text/markdown",
1169
  )
1170
- if artifact_status == "ready":
1171
- ok_audio, audio_result, _ = api_get_bytes(
1172
- f"/notebooks/{selected_notebook_id}/artifacts/{artifact_id}/audio"
 
 
 
 
 
1173
  )
1174
- if ok_audio and isinstance(audio_result, bytes):
1175
- st.audio(audio_result, format="audio/mp3")
 
 
1176
  st.download_button(
1177
- "Download podcast (.mp3)",
1178
- data=audio_result,
1179
- file_name=f"podcast_{artifact_id}.mp3",
1180
- mime="audio/mpeg",
 
 
 
 
1181
  )
 
 
 
 
 
 
 
 
 
 
1182
  else:
1183
- st.error(f"Unable to load audio: {audio_result}")
1184
  else:
1185
- st.info(f"Podcast status: {artifact_status}")
1186
- else:
1187
- st.info("Select an artifact to preview.")
1188
 
1189
  if auto_refresh and in_flight > 0:
1190
  st.caption(
 
450
  return str(selected)
451
 
452
 
453
+ def choose_artifact_for_notebook(notebook_id: int, artifacts: list[dict[str, Any]]) -> dict[str, Any] | None:
454
+ valid_artifacts = [
455
+ artifact for artifact in artifacts if isinstance(artifact, dict) and "id" in artifact
456
+ ]
457
+ if not valid_artifacts:
458
+ return None
459
+
460
+ # Latest first for easier access to recently generated items.
461
+ valid_artifacts.sort(key=lambda a: int(a.get("id", 0)), reverse=True)
462
+ artifact_map = {int(a["id"]): a for a in valid_artifacts}
463
+ artifact_ids = list(artifact_map.keys())
464
+
465
+ state_key = f"selected_artifact_id_{notebook_id}"
466
+ previous_id = st.session_state.get(state_key)
467
+ if not isinstance(previous_id, int) or previous_id not in artifact_map:
468
+ previous_id = artifact_ids[0]
469
+
470
+ selected_id = st.selectbox(
471
+ "Select artifact",
472
+ options=artifact_ids,
473
+ index=artifact_ids.index(previous_id),
474
+ key=f"artifact_selector_{notebook_id}",
475
+ format_func=lambda aid: build_artifact_option_label(artifact_map[int(aid)]),
476
+ )
477
+ if not isinstance(selected_id, int):
478
+ return artifact_map[previous_id]
479
+
480
+ st.session_state[state_key] = selected_id
481
+ return artifact_map[selected_id]
482
+
483
+
484
  inject_theme()
485
 
486
  if "page" not in st.session_state:
 
1144
  render_metric_card("Failed", failed_count)
1145
 
1146
  st.dataframe(artifacts, use_container_width=True, hide_index=True)
1147
+ selected_artifact = choose_artifact_for_notebook(
1148
+ int(selected_notebook_id),
1149
+ artifacts,
 
 
 
 
 
 
1150
  )
1151
+ if selected_artifact is None:
1152
+ st.info("Select an artifact to preview.")
1153
+ else:
1154
+ artifact_id = int(selected_artifact["id"])
1155
+ artifact_type = str(selected_artifact.get("type", ""))
1156
+ artifact_status = str(selected_artifact.get("status", ""))
1157
+ artifact_content = selected_artifact.get("content")
1158
+ artifact_error = selected_artifact.get("error_message")
1159
+
1160
+ status_col, type_col = st.columns([1, 4])
1161
+ with status_col:
1162
+ render_status_pill(artifact_status)
1163
+ with type_col:
1164
+ st.caption(f"Artifact type: {artifact_type}")
1165
+
1166
+ if artifact_error:
1167
+ st.error(str(artifact_error))
1168
+
1169
+ if artifact_type == "report" and artifact_content:
1170
+ st.markdown("### Report Preview")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1171
  st.markdown(str(artifact_content))
1172
  st.download_button(
1173
+ "Download report (.md)",
1174
  data=str(artifact_content).encode("utf-8"),
1175
+ file_name=f"report_{artifact_id}.md",
1176
  mime="text/markdown",
1177
  )
1178
+ elif artifact_type == "quiz" and artifact_content:
1179
+ st.markdown("### Quiz Preview")
1180
+ st.markdown(str(artifact_content))
1181
+ st.download_button(
1182
+ "Download quiz (.md)",
1183
+ data=str(artifact_content).encode("utf-8"),
1184
+ file_name=f"quiz_{artifact_id}.md",
1185
+ mime="text/markdown",
1186
  )
1187
+ elif artifact_type == "podcast":
1188
+ if artifact_content:
1189
+ st.markdown("### Transcript")
1190
+ st.markdown(str(artifact_content))
1191
  st.download_button(
1192
+ "Download transcript (.md)",
1193
+ data=str(artifact_content).encode("utf-8"),
1194
+ file_name=f"podcast_transcript_{artifact_id}.md",
1195
+ mime="text/markdown",
1196
+ )
1197
+ if artifact_status == "ready":
1198
+ ok_audio, audio_result, _ = api_get_bytes(
1199
+ f"/notebooks/{selected_notebook_id}/artifacts/{artifact_id}/audio"
1200
  )
1201
+ if ok_audio and isinstance(audio_result, bytes):
1202
+ st.audio(audio_result, format="audio/mp3")
1203
+ st.download_button(
1204
+ "Download podcast (.mp3)",
1205
+ data=audio_result,
1206
+ file_name=f"podcast_{artifact_id}.mp3",
1207
+ mime="audio/mpeg",
1208
+ )
1209
+ else:
1210
+ st.error(f"Unable to load audio: {audio_result}")
1211
  else:
1212
+ st.info(f"Podcast status: {artifact_status}")
1213
  else:
1214
+ st.info("Select an artifact to preview.")
 
 
1215
 
1216
  if auto_refresh and in_flight > 0:
1217
  st.caption(
src/artifacts/quiz_generator.py CHANGED
@@ -312,13 +312,9 @@ class QuizGenerator:
312
  if not prompt:
313
  continue
314
 
315
- options_raw = item.get("options")
316
- options: List[str] = []
317
- if isinstance(options_raw, list):
318
- for opt in options_raw:
319
- text = str(opt).strip()
320
- if text:
321
- options.append(text)
322
 
323
  answer = self._normalize_answer_letter(str(item.get("correct_answer", "")).strip())
324
  explanation = str(item.get("explanation", "")).strip()
@@ -339,8 +335,48 @@ class QuizGenerator:
339
  break
340
  return normalized
341
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  def _normalize_answer_letter(self, value: str) -> str:
343
- match = re.search(r"[A-D]", value.upper())
344
  return match.group(0) if match else ""
345
 
346
  def _build_quiz_prompt(self, context: str, num_questions: int, difficulty: str) -> str:
@@ -412,7 +448,9 @@ Requirements:
412
  lines.append(f"### {idx}. {prompt or 'Question'}")
413
  options = question.get("options", [])
414
  for option in options if isinstance(options, list) else []:
415
- lines.append(f"- {str(option)}")
 
 
416
  lines.append("")
417
 
418
  lines.append("## Answer Key")
 
312
  if not prompt:
313
  continue
314
 
315
+ options = self._normalize_options(item.get("options"))
316
+ if not options:
317
+ continue
 
 
 
 
318
 
319
  answer = self._normalize_answer_letter(str(item.get("correct_answer", "")).strip())
320
  explanation = str(item.get("explanation", "")).strip()
 
335
  break
336
  return normalized
337
 
338
+ def _normalize_options(self, options_raw: Any) -> List[str]:
339
+ """
340
+ Normalize options into 2-6 labeled choices: A) ... B) ... etc.
341
+ Accepts list, dict, or multiline string.
342
+ """
343
+ parsed: List[str] = []
344
+
345
+ if isinstance(options_raw, list):
346
+ parsed = [str(opt).strip() for opt in options_raw if str(opt).strip()]
347
+ elif isinstance(options_raw, dict):
348
+ for key in sorted(options_raw.keys()):
349
+ value = str(options_raw.get(key, "")).strip()
350
+ if not value:
351
+ continue
352
+ parsed.append(f"{str(key).strip().upper()}) {value}")
353
+ elif isinstance(options_raw, str):
354
+ lines = [line.strip() for line in options_raw.replace("\r", "\n").split("\n")]
355
+ parsed = [line for line in lines if line]
356
+
357
+ if not parsed:
358
+ return []
359
+
360
+ cleaned: List[str] = []
361
+ letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
362
+ for idx, text in enumerate(parsed):
363
+ body = self._option_text_only(text)
364
+ if not body:
365
+ continue
366
+ label = letters[idx] if idx < len(letters) else str(idx + 1)
367
+ cleaned.append(f"{label}) {body}")
368
+
369
+ # Keep a reasonable range and ensure common quiz shape is preserved.
370
+ return cleaned[:6]
371
+
372
+ def _option_text_only(self, value: str) -> str:
373
+ text = str(value or "").strip()
374
+ # Strip prefixes like "A)", "A.", "(A)", "1)", "1."
375
+ text = re.sub(r"^\(?[A-Z0-9]\)?[\.\):\-]\s*", "", text, flags=re.IGNORECASE)
376
+ return text.strip()
377
+
378
  def _normalize_answer_letter(self, value: str) -> str:
379
+ match = re.search(r"[A-Z]", value.upper())
380
  return match.group(0) if match else ""
381
 
382
  def _build_quiz_prompt(self, context: str, num_questions: int, difficulty: str) -> str:
 
448
  lines.append(f"### {idx}. {prompt or 'Question'}")
449
  options = question.get("options", [])
450
  for option in options if isinstance(options, list) else []:
451
+ option_text = str(option).strip()
452
+ if option_text:
453
+ lines.append(f"- {option_text}")
454
  lines.append("")
455
 
456
  lines.append("## Answer Key")
src/artifacts/report_generator.py CHANGED
@@ -10,17 +10,73 @@ from typing import Optional
10
 
11
  from dotenv import load_dotenv
12
  from openai import OpenAI
 
13
 
14
  from src.ingestion.vectorstore import ChromaAdapter
15
 
16
  load_dotenv()
17
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  class ReportGenerator:
20
- def __init__(self, api_key: Optional[str] = None, model: Optional[str] = None):
21
- self.api_key = api_key or os.getenv("OPENAI_API_KEY")
22
- self.model = model or os.getenv("LLM_MODEL", "gpt-4o-mini")
23
- self.client = OpenAI(api_key=self.api_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  def generate_report(
26
  self,
@@ -40,6 +96,8 @@ class ReportGenerator:
40
  return {
41
  "content": report_markdown,
42
  "detail_level": detail_level,
 
 
43
  }
44
 
45
  def _get_report_context(self, user_id: str, notebook_id: str, topic_focus: str | None) -> str:
@@ -101,24 +159,50 @@ Requirements:
101
  """
102
 
103
  try:
104
- response = self.client.chat.completions.create(
 
 
 
 
 
 
 
105
  model=self.model,
106
  messages=[
107
- {
108
- "role": "system",
109
- "content": (
110
- "You write high quality reports grounded only in provided source context. "
111
- "Do not invent facts."
112
- ),
113
- },
114
  {"role": "user", "content": prompt},
115
  ],
116
  temperature=0.4,
117
  )
118
- content = response.choices[0].message.content or ""
119
- return str(content).strip()
120
- except Exception:
121
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  def save_report(self, markdown_text: str, user_id: str, notebook_id: str) -> str:
124
  report_dir = Path(f"data/users/{user_id}/notebooks/{notebook_id}/artifacts/reports")
 
10
 
11
  from dotenv import load_dotenv
12
  from openai import OpenAI
13
+ import requests
14
 
15
  from src.ingestion.vectorstore import ChromaAdapter
16
 
17
  load_dotenv()
18
 
19
+ SUPPORTED_REPORT_LLM_PROVIDERS = {"openai", "groq", "ollama"}
20
+ DEFAULT_REPORT_MODELS = {
21
+ "openai": "gpt-4o-mini",
22
+ "groq": "llama-3.1-8b-instant",
23
+ "ollama": "qwen2.5:3b",
24
+ }
25
+ REPORT_SYSTEM_PROMPT = (
26
+ "You write high quality reports grounded only in provided source context. "
27
+ "Do not invent facts."
28
+ )
29
+
30
 
31
  class ReportGenerator:
32
+ def __init__(
33
+ self,
34
+ api_key: Optional[str] = None,
35
+ model: Optional[str] = None,
36
+ llm_provider: Optional[str] = None,
37
+ ):
38
+ provider_default = (
39
+ llm_provider
40
+ or os.getenv("REPORT_LLM_PROVIDER", "").strip()
41
+ or os.getenv("QUIZ_LLM_PROVIDER", "").strip()
42
+ or os.getenv("TRANSCRIPT_LLM_PROVIDER", "").strip()
43
+ or "openai"
44
+ )
45
+ self.llm_provider = provider_default.strip().lower()
46
+ if self.llm_provider not in SUPPORTED_REPORT_LLM_PROVIDERS:
47
+ raise ValueError(
48
+ f"Unsupported REPORT_LLM_PROVIDER='{self.llm_provider}'. "
49
+ f"Choose from: {sorted(SUPPORTED_REPORT_LLM_PROVIDERS)}"
50
+ )
51
+
52
+ self.model = self._resolve_model_name(model)
53
+ self._openai_client: OpenAI | None = None
54
+ self._groq_client = None
55
+ self._ollama_base_url = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434").rstrip("/")
56
+
57
+ if self.llm_provider == "openai":
58
+ self.api_key = api_key or os.getenv("OPENAI_API_KEY")
59
+ self._openai_client = OpenAI(api_key=self.api_key)
60
+ elif self.llm_provider == "groq":
61
+ from groq import Groq
62
+
63
+ groq_api_key = os.getenv("GROQ_API_KEY")
64
+ if not groq_api_key:
65
+ raise ValueError("GROQ_API_KEY is required when REPORT_LLM_PROVIDER=groq")
66
+ self._groq_client = Groq(api_key=groq_api_key)
67
+ else:
68
+ self.api_key = None
69
+
70
+ def _resolve_model_name(self, explicit_model: Optional[str]) -> str:
71
+ if explicit_model and explicit_model.strip():
72
+ return explicit_model.strip()
73
+ configured = os.getenv("REPORT_LLM_MODEL", "").strip()
74
+ if configured:
75
+ return configured
76
+ legacy = os.getenv("LLM_MODEL", "").strip()
77
+ if legacy:
78
+ return legacy
79
+ return DEFAULT_REPORT_MODELS.get(self.llm_provider, "gpt-4o-mini")
80
 
81
  def generate_report(
82
  self,
 
96
  return {
97
  "content": report_markdown,
98
  "detail_level": detail_level,
99
+ "llm_provider": self.llm_provider,
100
+ "llm_model": self.model,
101
  }
102
 
103
  def _get_report_context(self, user_id: str, notebook_id: str, topic_focus: str | None) -> str:
 
159
  """
160
 
161
  try:
162
+ return self._generate_report_content(prompt)
163
+ except Exception:
164
+ return ""
165
+
166
+ def _generate_report_content(self, prompt: str) -> str:
167
+ if self.llm_provider == "openai":
168
+ assert self._openai_client is not None
169
+ response = self._openai_client.chat.completions.create(
170
  model=self.model,
171
  messages=[
172
+ {"role": "system", "content": REPORT_SYSTEM_PROMPT},
 
 
 
 
 
 
173
  {"role": "user", "content": prompt},
174
  ],
175
  temperature=0.4,
176
  )
177
+ return str(response.choices[0].message.content or "").strip()
178
+
179
+ if self.llm_provider == "groq":
180
+ assert self._groq_client is not None
181
+ response = self._groq_client.chat.completions.create(
182
+ model=self.model,
183
+ messages=[
184
+ {"role": "system", "content": REPORT_SYSTEM_PROMPT},
185
+ {"role": "user", "content": prompt},
186
+ ],
187
+ temperature=0.4,
188
+ )
189
+ return str(response.choices[0].message.content or "").strip()
190
+
191
+ payload = {
192
+ "model": self.model,
193
+ "system": REPORT_SYSTEM_PROMPT,
194
+ "prompt": prompt,
195
+ "stream": False,
196
+ "options": {"temperature": 0.4},
197
+ }
198
+ response = requests.post(
199
+ f"{self._ollama_base_url}/api/generate",
200
+ json=payload,
201
+ timeout=120,
202
+ )
203
+ response.raise_for_status()
204
+ body = response.json()
205
+ return str(body.get("response", "")).strip()
206
 
207
  def save_report(self, markdown_text: str, user_id: str, notebook_id: str) -> str:
208
  report_dir = Path(f"data/users/{user_id}/notebooks/{notebook_id}/artifacts/reports")
tests/test_artifact_api.py CHANGED
@@ -208,6 +208,20 @@ class TestReportEndpoint:
208
  )
209
  assert resp.status_code == 400
210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  def test_generate_report_unknown_notebook_404(self, client):
212
  resp = client.post("/notebooks/9999/artifacts/report", json={"detail_level": "medium"})
213
  assert resp.status_code == 404
 
208
  )
209
  assert resp.status_code == 400
210
 
211
+ def test_generate_report_provider_config_error_returns_failed_artifact(self, client, notebook):
212
+ """Provider init/runtime errors should not bubble as HTTP 500."""
213
+ with patch("app.ReportGenerator", side_effect=ValueError("missing provider credentials")):
214
+ resp = client.post(
215
+ f"/notebooks/{notebook.id}/artifacts/report",
216
+ json={"detail_level": "medium"},
217
+ )
218
+
219
+ assert resp.status_code == 200
220
+ data = resp.json()
221
+ assert data["type"] == "report"
222
+ assert data["status"] == "failed"
223
+ assert "missing provider credentials" in str(data.get("error_message"))
224
+
225
  def test_generate_report_unknown_notebook_404(self, client):
226
  resp = client.post("/notebooks/9999/artifacts/report", json={"detail_level": "medium"})
227
  assert resp.status_code == 404
tests/test_artifacts.py CHANGED
@@ -182,6 +182,43 @@ class TestQuizGenerator:
182
  assert "## Answer Key" in saved
183
  assert "1. **B**" in saved
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  # ── PodcastGenerator tests ────────────────────────────────────────────────────
187
 
 
182
  assert "## Answer Key" in saved
183
  assert "1. **B**" in saved
184
 
185
+ def test_generate_quiz_normalizes_multiline_options(self, tmp_path):
186
+ """Multiline option strings are normalized into labeled bullet options."""
187
+ _chroma_dir(tmp_path)
188
+
189
+ mock_store = MagicMock()
190
+ mock_store.query.return_value = MOCK_CHROMA_RESULTS
191
+ raw_payload = {
192
+ "questions": [
193
+ {
194
+ "id": 1,
195
+ "question": "What is the goal?",
196
+ "options": "A) One\nB) Two\nC) Three\nD) Four",
197
+ "correct_answer": "B) Two",
198
+ "explanation": "Two is correct.",
199
+ "topic": "Goals",
200
+ }
201
+ ]
202
+ }
203
+ mock_llm_resp = _make_openai_chat_response(raw_payload)
204
+
205
+ env = {"STORAGE_BASE_DIR": str(tmp_path / "data"), "OPENAI_API_KEY": "test-key"}
206
+ with patch.dict(os.environ, env):
207
+ with patch("src.artifacts.quiz_generator.ChromaAdapter", return_value=mock_store):
208
+ with patch("src.artifacts.quiz_generator.OpenAI") as mock_openai_cls:
209
+ mock_client = MagicMock()
210
+ mock_client.chat.completions.create.return_value = mock_llm_resp
211
+ mock_openai_cls.return_value = mock_client
212
+
213
+ gen = QuizGenerator()
214
+ result = gen.generate_quiz(user_id="1", notebook_id="1", num_questions=1)
215
+ markdown = gen.format_quiz_markdown(result, title="Quiz")
216
+
217
+ assert "error" not in result
218
+ assert result["questions"][0]["options"] == ["A) One", "B) Two", "C) Three", "D) Four"]
219
+ assert "- A) One" in markdown
220
+ assert "- D) Four" in markdown
221
+
222
 
223
  # ── PodcastGenerator tests ────────────────────────────────────────────────────
224
 
tests/test_report_llm_providers.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provider-specific tests for report generation.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import os
7
+ import pathlib
8
+ import sys
9
+ from unittest.mock import MagicMock, patch
10
+
11
+ ROOT = pathlib.Path(__file__).resolve().parents[1]
12
+ sys.path.insert(0, str(ROOT))
13
+
14
+ from src.artifacts.report_generator import ReportGenerator
15
+
16
+
17
+ def _prepare_store(mock_store_cls):
18
+ mock_store = MagicMock()
19
+ mock_store.query.return_value = [
20
+ ("chunk-1", 0.1, {"document": "Context block for report generation.", "metadata": {}})
21
+ ]
22
+ mock_store_cls.return_value = mock_store
23
+
24
+
25
+ def test_report_generator_ollama_provider_without_openai_key(tmp_path):
26
+ env = {
27
+ "STORAGE_BASE_DIR": str(tmp_path / "data"),
28
+ "REPORT_LLM_PROVIDER": "ollama",
29
+ "REPORT_LLM_MODEL": "qwen2.5:3b",
30
+ "OLLAMA_BASE_URL": "http://127.0.0.1:11434",
31
+ "OPENAI_API_KEY": "",
32
+ }
33
+ with patch.dict(os.environ, env, clear=False):
34
+ with patch("src.artifacts.report_generator.Path.exists", return_value=True):
35
+ with patch("src.artifacts.report_generator.ChromaAdapter") as mock_store_cls:
36
+ _prepare_store(mock_store_cls)
37
+
38
+ mock_resp = MagicMock()
39
+ mock_resp.raise_for_status.return_value = None
40
+ mock_resp.json.return_value = {"response": "# Report\n\nGenerated from Ollama."}
41
+
42
+ with patch("src.artifacts.report_generator.requests.post", return_value=mock_resp):
43
+ generator = ReportGenerator(llm_provider="ollama")
44
+ result = generator.generate_report("1", "1")
45
+
46
+ assert "error" not in result
47
+ assert "content" in result
48
+ assert "Generated from Ollama." in result["content"]
49
+ assert result["llm_provider"] == "ollama"
50
+
51
+
52
+ def test_report_generator_groq_provider_without_openai_key(tmp_path):
53
+ env = {
54
+ "STORAGE_BASE_DIR": str(tmp_path / "data"),
55
+ "REPORT_LLM_PROVIDER": "groq",
56
+ "REPORT_LLM_MODEL": "llama-3.1-8b-instant",
57
+ "GROQ_API_KEY": "gsk-test",
58
+ "OPENAI_API_KEY": "",
59
+ }
60
+ with patch.dict(os.environ, env, clear=False):
61
+ with patch("src.artifacts.report_generator.Path.exists", return_value=True):
62
+ with patch("src.artifacts.report_generator.ChromaAdapter") as mock_store_cls:
63
+ _prepare_store(mock_store_cls)
64
+
65
+ with patch("groq.Groq") as mock_groq_cls:
66
+ mock_groq = MagicMock()
67
+ mock_groq.chat.completions.create.return_value = MagicMock(
68
+ choices=[MagicMock(message=MagicMock(content="# Report\n\nGenerated from Groq."))]
69
+ )
70
+ mock_groq_cls.return_value = mock_groq
71
+
72
+ generator = ReportGenerator(llm_provider="groq")
73
+ result = generator.generate_report("1", "1")
74
+
75
+ assert "error" not in result
76
+ assert "content" in result
77
+ assert "Generated from Groq." in result["content"]
78
+ assert result["llm_provider"] == "groq"