KeenWoo commited on
Commit
870ae73
Β·
verified Β·
1 Parent(s): f5ded32

Upload evaluate.py

Browse files
Files changed (1) hide show
  1. evaluate.py +438 -0
evaluate.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # evaluate.py
2
+
3
+ import os
4
+ import json
5
+ import time
6
+ import re # <-- ADD THIS IMPORT
7
+ import pandas as pd
8
+ from typing import List, Dict, Any
9
+ from pathlib import Path
10
+
11
+ # --- Imports from the main application ---
12
+ # In evaluate.py
13
+
14
+ try:
15
+ from alz_companion.agent import (
16
+ make_rag_chain, route_query_type, detect_tags_from_query,
17
+ answer_query, call_llm, build_or_load_vectorstore
18
+ )
19
+ from alz_companion.prompts import FAITHFULNESS_JUDGE_PROMPT
20
+ from langchain_community.vectorstores import FAISS
21
+ # --- Also move this import inside the try block for consistency ---
22
+ from langchain.schema import Document
23
+
24
+ except ImportError:
25
+ # --- START: FALLBACK DEFINITIONS ---
26
+ class FAISS:
27
+ def __init__(self): self.docstore = type('obj', (object,), {'_dict': {}})()
28
+ def add_documents(self, docs): pass
29
+ def save_local(self, path): pass
30
+ @classmethod
31
+ def from_documents(cls, docs, embeddings=None): return cls()
32
+
33
+ class Document:
34
+ def __init__(self, page_content, metadata=None):
35
+ self.page_content = page_content
36
+ self.metadata = metadata or {}
37
+
38
+ def make_rag_chain(*args, **kwargs): return lambda q, **k: {"answer": f"(Eval Fallback) You asked: {q}", "sources": []}
39
+ def route_query_type(q, **kwargs): return "general_conversation"
40
+ def detect_tags_from_query(*args, **kwargs): return {}
41
+ def answer_query(chain, q, **kwargs): return chain(q, **kwargs)
42
+ def call_llm(*args, **kwargs): return "{}"
43
+
44
+ # --- ADD FALLBACK DEFINITION FOR THE MISSING FUNCTION ---
45
+ def build_or_load_vectorstore(docs, index_path, is_personal=False):
46
+ return FAISS()
47
+ # --- END OF ADDITION ---
48
+
49
+ FAITHFULNESS_JUDGE_PROMPT = ""
50
+ print("WARNING: Could not import from alz_companion. Evaluation functions will use fallbacks.")
51
+ # --- END: FALLBACK DEFINITIONS ---
52
+
53
+
54
+ # --- LLM-as-a-Judge Prompt for Answer Correctness ---
55
+ ANSWER_CORRECTNESS_JUDGE_PROMPT = """You are an expert evaluator. Your task is to assess the factual correctness of a generated answer against a ground truth answer.
56
+
57
+ - GROUND_TRUTH_ANSWER: This is the gold-standard, correct answer.
58
+ - GENERATED_ANSWER: This is the answer produced by the AI model.
59
+
60
+ Evaluate if the GENERATED_ANSWER is factually aligned with the GROUND_TRUTH_ANSWER. Ignore minor differences in phrasing, tone, or structure. The key is factual accuracy.
61
+
62
+ Respond with a single JSON object containing a float score from 0.0 to 1.0.
63
+ - 1.0: The generated answer is factually correct and aligns perfectly with the ground truth.
64
+ - 0.5: The generated answer is partially correct but misses key information or contains minor inaccuracies.
65
+ - 0.0: The generated answer is factually incorrect or contradicts the ground truth.
66
+
67
+ --- DATA TO EVALUATE ---
68
+ GROUND_TRUTH_ANSWER:
69
+ {ground_truth_answer}
70
+
71
+ GENERATED_ANSWER:
72
+ {generated_answer}
73
+ ---
74
+
75
+ Return a single JSON object with your score:
76
+ {{
77
+ "correctness_score": <float>
78
+ }}
79
+ """
80
+
81
+ test_fixtures = []
82
+
83
+ def load_test_fixtures():
84
+ """Loads fixtures into the test_fixtures list."""
85
+ global test_fixtures
86
+ test_fixtures = []
87
+ env_path = os.environ.get("TEST_FIXTURES_PATH", "").strip()
88
+
89
+ # --- START: DEFINITIVE FIX ---
90
+ # The old code used a relative path, which is unreliable.
91
+ # This new code builds an absolute path to the fixture file based on
92
+ # the location of this (evaluate.py) script.
93
+ script_dir = Path(__file__).parent
94
+ default_fixture_file = script_dir / "small_test_cases_v10.jsonl"
95
+
96
+ candidates = [env_path] if env_path else [str(default_fixture_file)]
97
+ # --- END: DEFINITIVE FIX ---
98
+ # candidates = [env_path] if env_path else ["conversation_test_fixtures_v10.jsonl"]
99
+ # candidates = [env_path] if env_path else ["small_test_cases_v10.jsonl"]
100
+
101
+ path = next((p for p in candidates if p and os.path.exists(p)), None)
102
+ if not path:
103
+ print("Warning: No test fixtures file found for evaluation.")
104
+ return
105
+
106
+ # Use the corrected v10 file if available
107
+ # if "conversation_test_fixtures_v10.jsonl" in path:
108
+ if "small_test_cases_v10.jsonl" in path:
109
+ print(f"Using corrected test fixtures: {path}")
110
+
111
+ with open(path, "r", encoding="utf-8") as f:
112
+ for line in f:
113
+ try:
114
+ test_fixtures.append(json.loads(line))
115
+ except json.JSONDecodeError:
116
+ print(f"Skipping malformed JSON line in {path}")
117
+ print(f"Loaded {len(test_fixtures)} fixtures for evaluation from {path}")
118
+
119
+
120
+ def evaluate_nlu_tags(expected: Dict[str, Any], actual: Dict[str, Any], tag_key: str, expected_key_override: str = None) -> Dict[str, float]:
121
+ lookup_key = expected_key_override or tag_key
122
+ expected_raw = expected.get(lookup_key, [])
123
+ expected_set = set(expected_raw if isinstance(expected_raw, list) else [expected_raw]) if expected_raw and expected_raw != "None" else set()
124
+ actual_raw = actual.get(tag_key, [])
125
+ actual_set = set(actual_raw if isinstance(actual_raw, list) else [actual_raw]) if actual_raw and actual_raw != "None" else set()
126
+ if not expected_set and not actual_set:
127
+ return {"precision": 1.0, "recall": 1.0, "f1_score": 1.0}
128
+ true_positives = len(expected_set.intersection(actual_set))
129
+ precision = true_positives / len(actual_set) if actual_set else 0.0
130
+ recall = true_positives / len(expected_set) if expected_set else 0.0
131
+ f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
132
+ return {"precision": precision, "recall": recall, "f1_score": f1_score}
133
+
134
+ def _parse_judge_json(raw_str: str) -> dict | None:
135
+ try:
136
+ start_brace = raw_str.find('{')
137
+ end_brace = raw_str.rfind('}')
138
+ if start_brace != -1 and end_brace > start_brace:
139
+ json_str = raw_str[start_brace : end_brace + 1]
140
+ return json.loads(json_str)
141
+ return None
142
+ except (json.JSONDecodeError, AttributeError):
143
+ return None
144
+
145
+ # --- NEW: helpers for categorisation and error-class labelling ---
146
+ def _categorize_test(test_id: str) -> str:
147
+ tid = (test_id or "").lower()
148
+ if "synonym" in tid: return "synonym"
149
+ if "multi_fact" in tid or "multi-hop" in tid or "multihop" in tid: return "multi_fact"
150
+ if "omission" in tid: return "omission"
151
+ if "hallucination" in tid: return "hallucination"
152
+ if "time" in tid or "temporal" in tid: return "temporal"
153
+ if "context" in tid: return "context_disambig"
154
+ return "baseline"
155
+
156
+ def _classify_error(gt: str, gen: str) -> str:
157
+ import re
158
+ gt = (gt or "").strip().lower()
159
+ gen = (gen or "").strip().lower()
160
+ if not gen:
161
+ return "empty"
162
+ if not gt:
163
+ return "hallucination" if gen else "empty"
164
+ if gt in gen:
165
+ return "paraphrase"
166
+ gt_tokens = set([t for t in re.split(r'\W+', gt) if t])
167
+ gen_tokens = set([t for t in re.split(r'\W+', gen) if t])
168
+ overlap = len(gt_tokens & gen_tokens) / max(1, len(gt_tokens))
169
+ if overlap >= 0.3:
170
+ return "omission"
171
+ return "contradiction"
172
+
173
+ ## NEW
174
+ # In evaluate.py
175
+ def run_comprehensive_evaluation(
176
+ vs_general: FAISS,
177
+ vs_personal: FAISS,
178
+ nlu_vectorstore: FAISS,
179
+ config: Dict[str, Any],
180
+ storage_path: Path # <-- ADD THIS PARAMETER
181
+ ):
182
+ global test_fixtures
183
+ if not test_fixtures:
184
+ # The return signature is now back to 3 items.
185
+ return "No test fixtures loaded.", [], []
186
+
187
+ vs_personal_test = None
188
+ personal_context_docs = []
189
+ personal_context_file = "sample_data/1 Complaints of a Dutiful Daughter.txt"
190
+
191
+ if os.path.exists(personal_context_file):
192
+ print(f"Found personal context file for evaluation: '{personal_context_file}'")
193
+ with open(personal_context_file, "r", encoding="utf-8") as f:
194
+ content = f.read()
195
+ doc = Document(page_content=content, metadata={"source": os.path.basename(personal_context_file)})
196
+ personal_context_docs.append(doc)
197
+ else:
198
+ print(f"WARNING: Personal context file not found at '{personal_context_file}'. Factual tests will likely fail.")
199
+
200
+ vs_personal_test = build_or_load_vectorstore(
201
+ personal_context_docs,
202
+ index_path="tmp/eval_personal_index",
203
+ is_personal=True
204
+ )
205
+ print(f"Successfully created temporary personal vectorstore with {len(personal_context_docs)} document(s) for this evaluation run.")
206
+
207
+ def _norm(label: str) -> str:
208
+ label = (label or "").strip().lower()
209
+ return "factual_question" if "factual" in label else label
210
+
211
+ print("Starting comprehensive evaluation...")
212
+ results: List[Dict[str, Any]] = []
213
+ total_fixtures = len(test_fixtures)
214
+ print(f"\nπŸš€ STARTING EVALUATION on {total_fixtures} test cases...")
215
+
216
+ for i, fx in enumerate(test_fixtures):
217
+ test_id = fx.get("test_id", "N/A")
218
+ print(f"--- Processing Test Case {i+1}/{total_fixtures}: ID = {test_id} ---")
219
+
220
+ turns = fx.get("turns") or []
221
+ api_chat_history = [{"role": t.get("role"), "content": t.get("text")} for t in turns]
222
+ query = next((t["content"] for t in reversed(api_chat_history) if (t.get("role") or "user").lower() == "user"), "")
223
+ if not query: continue
224
+
225
+ print(f'Query: "{query}"')
226
+
227
+ ground_truth = fx.get("ground_truth", {})
228
+ expected_route = _norm(ground_truth.get("expected_route", "caregiving_scenario"))
229
+ expected_tags = ground_truth.get("expected_tags", {})
230
+ actual_route = _norm(route_query_type(query))
231
+ route_correct = (actual_route == expected_route)
232
+
233
+ actual_tags: Dict[str, Any] = {}
234
+ if "caregiving_scenario" in actual_route:
235
+ actual_tags = detect_tags_from_query(
236
+ query, nlu_vectorstore=nlu_vectorstore,
237
+ behavior_options=config["behavior_tags"], emotion_options=config["emotion_tags"],
238
+ topic_options=config["topic_tags"], context_options=config["context_tags"],
239
+ )
240
+
241
+ behavior_metrics = evaluate_nlu_tags(expected_tags, actual_tags, "detected_behaviors")
242
+ emotion_metrics = evaluate_nlu_tags(expected_tags, actual_tags, "detected_emotion")
243
+ topic_metrics = evaluate_nlu_tags(expected_tags, actual_tags, "detected_topics")
244
+ context_metrics = evaluate_nlu_tags(expected_tags, actual_tags, "detected_contexts")
245
+
246
+ final_tags = {}
247
+ if "caregiving_scenario" in actual_route:
248
+ final_tags = {
249
+ "scenario_tag": (actual_tags.get("detected_behaviors") or [None])[0],
250
+ "emotion_tag": actual_tags.get("detected_emotion"),
251
+ "topic_tag": (actual_tags.get("detected_topics") or [None])[0],
252
+ "context_tags": actual_tags.get("detected_contexts", [])
253
+ }
254
+
255
+ current_test_role = fx.get("test_role", "patient")
256
+ rag_chain = make_rag_chain(
257
+ vs_general,
258
+ vs_personal,
259
+ role=current_test_role,
260
+ for_evaluation=True
261
+ )
262
+
263
+ t0 = time.time()
264
+ response = answer_query(rag_chain, query, query_type=actual_route, chat_history=api_chat_history, **final_tags)
265
+ latency_ms = round((time.time() - t0) * 1000.0, 1)
266
+ answer_text = response.get("answer", "ERROR")
267
+ ground_truth_answer = ground_truth.get("ground_truth_answer")
268
+
269
+ category = _categorize_test(test_id)
270
+ error_class = _classify_error(ground_truth_answer, answer_text)
271
+
272
+ expected_sources_set = set(map(str, ground_truth.get("expected_sources", [])))
273
+ raw_sources = response.get("sources", [])
274
+ actual_sources_set = set(map(str, raw_sources if isinstance(raw_sources, (list, tuple)) else [raw_sources]))
275
+
276
+ print("\n" + "-"*20 + " SOURCE EVALUATION " + "-"*20)
277
+ print(f" - Expected: {sorted(list(expected_sources_set))}")
278
+ print(f" - Actual: {sorted(list(actual_sources_set))}")
279
+
280
+ true_positives = expected_sources_set.intersection(actual_sources_set)
281
+ false_positives = actual_sources_set - expected_sources_set
282
+ false_negatives = expected_sources_set - actual_sources_set
283
+
284
+ if not false_positives and not false_negatives:
285
+ print(" - Result: βœ… Perfect Match!")
286
+ else:
287
+ if false_positives:
288
+ print(f" - πŸ”» False Positives (hurts precision): {sorted(list(false_positives))}")
289
+ if false_negatives:
290
+ print(f" - πŸ”» False Negatives (hurts recall): {sorted(list(false_negatives))}")
291
+ print("-"*59 + "\n")
292
+
293
+ context_precision, context_recall = 0.0, 0.0
294
+ if expected_sources_set or actual_sources_set:
295
+ tp = len(expected_sources_set.intersection(actual_sources_set))
296
+ if len(actual_sources_set) > 0: context_precision = tp / len(actual_sources_set)
297
+ if len(expected_sources_set) > 0: context_recall = tp / len(expected_sources_set)
298
+ elif not expected_sources_set and not actual_sources_set:
299
+ context_precision, context_recall = 1.0, 1.0
300
+
301
+ # TURN DEBUG on Answer Correctness
302
+ # print("\n" + "-"*20 + " ANSWER & CORRECTNESS EVALUATION " + "-"*20)
303
+ # print(f" - Ground Truth Answer: {ground_truth_answer}")
304
+ # print(f" - Generated Answer: {answer_text}")
305
+ # print("-" * 59)
306
+
307
+ answer_correctness_score = None
308
+ if ground_truth_answer and "ERROR" not in answer_text:
309
+ try:
310
+ judge_msg = ANSWER_CORRECTNESS_JUDGE_PROMPT.format(ground_truth_answer=ground_truth_answer, generated_answer=answer_text)
311
+ print(f" - Judge Prompt Sent:\n{judge_msg}")
312
+ raw_correctness = call_llm([{"role": "user", "content": judge_msg}], temperature=0.0)
313
+ print(f" - Judge Raw Response: {raw_correctness}")
314
+ correctness_data = _parse_judge_json(raw_correctness)
315
+ if correctness_data and "correctness_score" in correctness_data:
316
+ answer_correctness_score = float(correctness_data["correctness_score"])
317
+ print(f" - Final Score: {answer_correctness_score}")
318
+ except Exception as e:
319
+ print(f"ERROR during answer correctness judging: {e}")
320
+
321
+ faithfulness = None
322
+ hallucination_rate = None
323
+ source_docs = response.get("source_documents", [])
324
+ if source_docs and "ERROR" not in answer_text:
325
+ context_blob = "\n---\n".join([doc.page_content for doc in source_docs])
326
+ judge_msg = FAITHFULNESS_JUDGE_PROMPT.format(query=query, answer=answer_text, sources=context_blob)
327
+ try:
328
+ if context_blob.strip():
329
+ raw = call_llm([{"role": "user", "content": judge_msg}], temperature=0.0)
330
+ data = _parse_judge_json(raw)
331
+ if data:
332
+ denom = data.get("supported", 0) + data.get("contradicted", 0) + data.get("not_enough_info", 0)
333
+ if denom > 0:
334
+ faithfulness = round(data.get("supported", 0) / denom, 3)
335
+ hallucination_rate = 1.0 - faithfulness
336
+ elif data.get("ignored", 0) > 0:
337
+ faithfulness = 1.0
338
+ hallucination_rate = 0.0
339
+
340
+ except Exception as e:
341
+ print(f"ERROR during faithfulness judging: {e}")
342
+
343
+ sources_pretty = ", ".join(sorted(s)) if (s:=actual_sources_set) else ""
344
+ results.append({
345
+ "test_id": fx.get("test_id", "N/A"), "title": fx.get("title", "N/A"),
346
+ "route_correct": "βœ…" if route_correct else "❌", "expected_route": expected_route, "actual_route": actual_route,
347
+ "behavior_f1": f"{behavior_metrics['f1_score']:.2f}", "emotion_f1": f"{emotion_metrics['f1_score']:.2f}",
348
+ "topic_f1": f"{topic_metrics['f1_score']:.2f}", "context_f1": f"{context_metrics['f1_score']:.2f}",
349
+ "generated_answer": answer_text, "sources": sources_pretty, "source_count": len(actual_sources_set),
350
+ "context_precision": context_precision, "context_recall": context_recall,
351
+ "faithfulness": faithfulness, "hallucination_rate": hallucination_rate,
352
+ "answer_correctness": answer_correctness_score,
353
+ "category": category, "error_class": error_class,
354
+ "latency_ms": latency_ms
355
+ })
356
+
357
+ df = pd.DataFrame(results)
358
+ summary_text, table_rows, headers = "No valid test fixtures found to evaluate.", [], []
359
+
360
+ if not df.empty:
361
+ # Add "hallucination_rate" to this list of columns to ensure it is not dropped.
362
+ cols = [
363
+ "test_id", "title", "route_correct", "expected_route", "actual_route",
364
+ "behavior_f1", "emotion_f1", "topic_f1", "context_f1",
365
+ "generated_answer", "sources", "source_count",
366
+ "context_precision", "context_recall",
367
+ "faithfulness", "hallucination_rate",
368
+ "answer_correctness",
369
+ "category", "error_class", "latency_ms",
370
+ ]
371
+ df = df[[c for c in cols if c in df.columns]]
372
+
373
+ # --- START OF MODIFICATION ---
374
+ pct = df["route_correct"].value_counts(normalize=True).get("βœ…", 0) * 100
375
+ to_f = lambda s: pd.to_numeric(s, errors="coerce")
376
+
377
+ # Calculate the mean for the NLU F1 scores
378
+ bf1_mean = to_f(df["behavior_f1"]).mean() * 100
379
+ ef1_mean = to_f(df["emotion_f1"]).mean() * 100
380
+ tf1_mean = to_f(df["topic_f1"]).mean() * 100
381
+ cf1_mean = to_f(df["context_f1"]).mean() * 100
382
+
383
+ # Calculate the mean for Faithfulness
384
+ faith_mean = to_f(df["faithfulness"]).mean() * 100
385
+ # --- CHANGE 6: Calculate the mean for the new metric ---
386
+ halluc_mean = to_f(df["hallucination_rate"]).mean() * 100
387
+
388
+ rag_with_sources_pct = (df["source_count"] > 0).mean() * 100 if "source_count" in df else 0
389
+
390
+ # Add the NLU metrics to the summary f-string
391
+ # Choose to use Hallucination - **RAG: Faithfulness**: {faith_mean:.1f}%
392
+ summary_text = f"""## Evaluation Summary
393
+ - **Routing Accuracy**: {pct:.2f}%
394
+ - **Behaviour F1 (avg)**: {bf1_mean:.2f}%
395
+ - **Emotion F1 (avg)**: {ef1_mean:.2f}%
396
+ - **Topic F1 (avg)**: {tf1_mean:.2f}%
397
+ - **Context F1 (avg)**: {cf1_mean:.2f}%
398
+ - **RAG: Context Precision**: {(to_f(df["context_precision"]).mean() * 100):.1f}%
399
+ - **RAG: Context Recall**: {(to_f(df["context_recall"]).mean() * 100):.1f}%
400
+ - **RAG Answers w/ Sources**: {rag_with_sources_pct:.1f}%
401
+ - **RAG: Hallucination Rate**: {halluc_mean:.1f}% (Lower is better)
402
+ - **RAG: Answer Correctness (LLM-judge)**: {(to_f(df["answer_correctness"]).mean() * 100):.1f}%
403
+ - **RAG: Avg Latency (ms)**: {to_f(df["latency_ms"]).mean():.1f}
404
+ """
405
+ # --- END OF MODIFICATION ---
406
+ print(summary_text)
407
+
408
+ df_display = df.rename(columns={"context_precision": "Ctx. Precision", "context_recall": "Ctx. Recall"})
409
+ table_rows = df_display.values.tolist()
410
+ headers = df_display.columns.tolist()
411
+
412
+ # --- NEW: per-category averages ---
413
+ try:
414
+ cat_means = df.groupby("category")["answer_correctness"].mean().reset_index()
415
+ print("\nπŸ“Š Correctness by Category:")
416
+ print(cat_means.to_string(index=False))
417
+ except Exception as e:
418
+ print(f"WARNING: Could not compute category breakdown: {e}")
419
+
420
+ # --- NEW: confusion-style matrix ---
421
+ try:
422
+ confusion = pd.crosstab(df.get("category", []), df.get("error_class", []),
423
+ rownames=["Category"], colnames=["Error Class"], dropna=False)
424
+ print("\nπŸ“Š Error Class Distribution by Category:")
425
+ print(confusion.to_string())
426
+ except Exception as e:
427
+ print(f"WARNING: Could not build confusion matrix: {e}")
428
+ # END
429
+
430
+ else:
431
+ summary_text = "No valid test fixtures found to evaluate."
432
+ table_rows, headers = [], []
433
+
434
+
435
+ return summary_text, table_rows, headers
436
+ # return summary_text, table_rows
437
+
438
+ ## END