Rajan Sharma commited on
Commit
e8e5773
·
verified ·
1 Parent(s): 83c1ac8

Create scenario_engine.py

Browse files
Files changed (1) hide show
  1. scenario_engine.py +550 -0
scenario_engine.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # scenario_engine.py
2
+ from __future__ import annotations
3
+ from typing import Dict, List, Any, Tuple, Union, Optional
4
+ import re
5
+ import math
6
+ import statistics
7
+ import json
8
+ import ast
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+
13
+ # ----------------------------
14
+ # Safe expression evaluation
15
+ # ----------------------------
16
+ _ALLOWED_FUNCS = {
17
+ "abs": abs,
18
+ "round": round,
19
+ "sqrt": math.sqrt,
20
+ "log": math.log,
21
+ "exp": math.exp,
22
+ "min": np.minimum, # vectorized
23
+ "max": np.maximum, # vectorized
24
+ "mean": np.mean,
25
+ "avg": np.mean,
26
+ "median": np.median,
27
+ "sum": np.sum,
28
+ "count": lambda x: np.size(x),
29
+ "p50": lambda x: np.percentile(x, 50),
30
+ "p75": lambda x: np.percentile(x, 75),
31
+ "p90": lambda x: np.percentile(x, 90),
32
+ "p95": lambda x: np.percentile(x, 95),
33
+ "p99": lambda x: np.percentile(x, 99),
34
+ "ceil": np.ceil,
35
+ "floor": np.floor,
36
+ }
37
+
38
+ class _SafeExpr(ast.NodeTransformer):
39
+ """
40
+ Restrict expressions to:
41
+ - Names (columns), numbers, strings, booleans
42
+ - Arithmetic: + - * / // % **, comparisons, and/or/not
43
+ - Calls to allowed functions (above)
44
+ """
45
+ def __init__(self, allowed_names: set):
46
+ self.allowed_names = allowed_names
47
+
48
+ def visit_Name(self, node):
49
+ if node.id not in self.allowed_names and node.id not in ("True","False","None"):
50
+ raise ValueError(f"Unknown name in expression: {node.id}")
51
+ return node
52
+
53
+ def visit_Call(self, node):
54
+ if not isinstance(node.func, ast.Name):
55
+ raise ValueError("Only simple function calls are allowed")
56
+ func = node.func.id
57
+ if func not in _ALLOWED_FUNCS:
58
+ raise ValueError(f"Function not allowed: {func}")
59
+ self.generic_visit(node)
60
+ return node
61
+
62
+ def generic_visit(self, node):
63
+ allowed = (
64
+ ast.Expression, ast.BoolOp, ast.BinOp, ast.UnaryOp,
65
+ ast.Compare, ast.Call, ast.Name, ast.Load, ast.Constant,
66
+ ast.And, ast.Or, ast.Not,
67
+ ast.Add, ast.Sub, ast.Mult, ast.Div, ast.Mod, ast.Pow, ast.FloorDiv,
68
+ ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE, ast.In, ast.NotIn,
69
+ ast.USub, ast.UAdd
70
+ )
71
+ if not isinstance(node, allowed):
72
+ raise ValueError(f"Unsupported syntax: {type(node).__name__}")
73
+ return super().generic_visit(node)
74
+
75
+ def _eval_series_expr(expr: str, df: pd.DataFrame) -> pd.Series:
76
+ allowed_names = set(df.columns) | {"True", "False", "None"}
77
+ tree = ast.parse(expr, mode="eval")
78
+ _SafeExpr(allowed_names).visit(tree)
79
+ code = compile(tree, "<expr>", "eval")
80
+ env = {**{k: df[k] for k in df.columns}, **_ALLOWED_FUNCS}
81
+ return eval(code, {"__builtins__": {}}, env)
82
+
83
+ # ----------------------------
84
+ # Engine
85
+ # ----------------------------
86
+
87
+ class ScenarioEngine:
88
+ """
89
+ Scenario-first engine:
90
+ - Parse tasks + inline directives from scenario text
91
+ - For each task, execute a pipeline over analysis_results:
92
+ load -> filter -> derive -> groupby/agg -> pivot -> sort/top -> select fields -> render
93
+ - Render formats: table | list | comparison | map | narrative | chart (Vega-Lite spec)
94
+ - Strict: only what is asked is emitted.
95
+ """
96
+
97
+ @staticmethod
98
+ def render(scenario_text: str, analysis_results: Dict[str, Any]) -> str:
99
+ scen = ScenarioEngine._parse_scenario(scenario_text)
100
+ out: List[str] = ["# Scenario Output\n"]
101
+ for task in scen["tasks"]:
102
+ out.append(ScenarioEngine._render_task(task, analysis_results))
103
+ return "\n".join(out).strip()
104
+
105
+ # ------------- Parsing -------------
106
+ @staticmethod
107
+ def _parse_scenario(s: str) -> Dict[str, Any]:
108
+ """
109
+ Detect a 'Tasks/Deliverables/Requirements/Your Tasks' block; fallback to any bullet/numbered lines.
110
+ Each task may include inline directives: key: value
111
+ Supported directives (per task):
112
+ format: table|list|comparison|map|narrative|chart
113
+ data_key: <key in analysis_results>
114
+ filter: <expr using columns> e.g., zone == "North" and wait_time > 5
115
+ derive: <col>=<expr>[, <col2>=<expr2> ...]
116
+ group_by: col1[, col2 ...]
117
+ agg: avg(x), median(y), sum(z), p90(wait), count(*)
118
+ pivot: index=a[,b] columns=c values=v (values must be an aggregated column)
119
+ sort_by: col sort_dir: asc|desc
120
+ top: N
121
+ fields: col1 col2 col3 (space or comma separated)
122
+ title: Custom name
123
+ chart: bar|line|area|point (Vega-Lite spec emitted)
124
+ x: <field> y: <field> color: <field> column: <facet>
125
+ """
126
+ lines = [ln.rstrip() for ln in s.splitlines()]
127
+ task_hdr = re.compile(r'^\s*(tasks?|deliverables|requirements|your tasks?)\s*$', re.I)
128
+ bullet = re.compile(r'^\s*(?:\d+\.\s+|[-*•]\s+)')
129
+ in_tasks = False
130
+ raw_tasks: List[str] = []
131
+
132
+ for ln in lines:
133
+ if task_hdr.match(ln):
134
+ in_tasks = True
135
+ continue
136
+ if in_tasks:
137
+ if bullet.match(ln.strip()):
138
+ raw_tasks.append(ln.strip())
139
+ elif ln.strip() == "":
140
+ continue
141
+ else:
142
+ # stop when we hit a non-task looking line after capturing some tasks
143
+ if raw_tasks:
144
+ in_tasks = False
145
+
146
+ if not raw_tasks:
147
+ # fallback: grab any bullet/numbered lines
148
+ raw_tasks = [ln.strip() for ln in lines if bullet.match(ln.strip())]
149
+
150
+ tasks: List[Dict[str, Any]] = []
151
+ for raw in raw_tasks:
152
+ directives = ScenarioEngine._extract_directives(raw)
153
+ title = directives.get("title") or ScenarioEngine._strip_bullet(raw)
154
+ tasks.append({"title": title, "raw": raw, "d": directives})
155
+ return {"tasks": tasks}
156
+
157
+ @staticmethod
158
+ def _strip_bullet(line: str) -> str:
159
+ return re.sub(r'^\s*(?:\d+\.\s+|[-*•]\s+)', '', line).strip()
160
+
161
+ @staticmethod
162
+ def _extract_directives(text: str) -> Dict[str, Any]:
163
+ d: Dict[str, Any] = {}
164
+ # key: value pairs (value extends until ; or end or two spaces before next key:)
165
+ for m in re.finditer(r'([a-z_]+)\s*:\s*([^|,\n;]+)', text, re.I):
166
+ k = m.group(1).strip().lower()
167
+ v = m.group(2).strip()
168
+ d[k] = v
169
+
170
+ def _split_csv(val: str) -> List[str]:
171
+ return [x.strip() for x in re.split(r'[,\s]+', val) if x.strip()]
172
+
173
+ if "fields" in d:
174
+ d["fields"] = _split_csv(d["fields"])
175
+ if "group_by" in d:
176
+ d["group_by"] = _split_csv(d["group_by"])
177
+ if "top" in d:
178
+ try:
179
+ d["top"] = int(re.findall(r'\d+', d["top"])[0])
180
+ except Exception:
181
+ d["top"] = None
182
+ if "sort_dir" in d:
183
+ d["sort_dir"] = "desc" if d["sort_dir"].lower().startswith("d") else "asc"
184
+ if "format" in d:
185
+ d["format"] = d["format"].lower()
186
+ if "chart" in d:
187
+ d["chart"] = d["chart"].lower()
188
+ return d
189
+
190
+ # ------------- Rendering -------------
191
+ @staticmethod
192
+ def _render_task(task: Dict[str, Any], analysis_results: Dict[str, Any]) -> str:
193
+ title, d = task["title"], task["d"]
194
+ section: List[str] = [f"## {title}\n"]
195
+
196
+ # 1) Resolve data
197
+ df, key_used, why = ScenarioEngine._resolve_df(d, analysis_results)
198
+ if df is None:
199
+ section.append("_No matching data for this task._")
200
+ section.append(f"\n> Resolver note: {why}")
201
+ return "\n".join(section)
202
+
203
+ # 2) Filter
204
+ if "filter" in d:
205
+ mask = ScenarioEngine._safe_filter(df, d["filter"])
206
+ df = df.loc[mask].copy()
207
+
208
+ # 3) Derive columns
209
+ if "derive" in d:
210
+ df = ScenarioEngine._apply_derive(df, d["derive"])
211
+
212
+ # 4) Group & aggregate
213
+ if "group_by" in d or "agg" in d:
214
+ df = ScenarioEngine._group_agg(df, d.get("group_by"), d.get("agg"))
215
+
216
+ # 5) Pivot
217
+ if "pivot" in d:
218
+ df = ScenarioEngine._pivot(df, d["pivot"])
219
+
220
+ # 6) Sort + Top
221
+ if "sort_by" in d:
222
+ asc = (d.get("sort_dir", "desc") == "asc")
223
+ df = df.sort_values(by=d["sort_by"], ascending=asc)
224
+ if isinstance(d.get("top"), int) and d["top"] > 0:
225
+ df = df.head(d["top"])
226
+
227
+ # 7) Fields selection
228
+ if "fields" in d:
229
+ cols = [c for c in d["fields"] if c in df.columns]
230
+ if cols:
231
+ df = df[cols]
232
+
233
+ # 8) Render by format
234
+ fmt = d.get("format", "table")
235
+ if fmt == "list":
236
+ section.append(ScenarioEngine._render_list(df))
237
+ elif fmt == "comparison":
238
+ section.append(ScenarioEngine._render_comparison(df))
239
+ elif fmt == "map":
240
+ section.append(ScenarioEngine._render_map(df))
241
+ elif fmt == "narrative":
242
+ section.append(ScenarioEngine._render_narrative(df))
243
+ elif fmt == "chart":
244
+ section.append(ScenarioEngine._render_chart_spec(df, d))
245
+ else:
246
+ section.append(ScenarioEngine._render_table(df))
247
+
248
+ # 9) Per-task provenance (kept minimal)
249
+ section.append("\n**Provenance**")
250
+ section.append(f"- Data key: `{key_used}`")
251
+ section.append(f"- Match note: {why}")
252
+
253
+ return "\n".join(section)
254
+
255
+ # ------------- Data resolution -------------
256
+ @staticmethod
257
+ def _resolve_df(d: Dict[str, Any], analysis_results: Dict[str, Any]) -> Tuple[Optional[pd.DataFrame], Optional[str], str]:
258
+ # explicit key
259
+ if "data_key" in d and d["data_key"] in analysis_results:
260
+ return ScenarioEngine._as_df(analysis_results[d["data_key"]]), d["data_key"], "explicit data_key"
261
+
262
+ # jaccard match on keys using hinted fields + any words in title/sort/agg
263
+ hints = set()
264
+ for k in ("fields", "sort_by"):
265
+ v = d.get(k)
266
+ if isinstance(v, list):
267
+ hints |= set(v)
268
+ elif isinstance(v, str):
269
+ hints |= set(re.findall(r'[A-Za-z0-9_]+', v.lower()))
270
+ best_key, best_score = None, 0.0
271
+ for k in analysis_results:
272
+ words = set(re.findall(r'[A-Za-z0-9_]+', k.lower()))
273
+ if not words:
274
+ continue
275
+ inter = len(hints & words)
276
+ union = len(hints | words) or 1
277
+ score = inter / union
278
+ if score > best_score:
279
+ best_key, best_score = k, score
280
+
281
+ if best_key:
282
+ return ScenarioEngine._as_df(analysis_results[best_key]), best_key, f"keyword match (score={best_score:.2f})"
283
+
284
+ # fallback: first list-of-dicts or dict-like
285
+ for k, v in analysis_results.items():
286
+ df = ScenarioEngine._as_df(v)
287
+ if df is not None and not df.empty:
288
+ return df, k, "fallback first structured"
289
+
290
+ return None, None, "no suitable dataset found"
291
+
292
+ @staticmethod
293
+ def _as_df(v: Any) -> Optional[pd.DataFrame]:
294
+ if isinstance(v, list):
295
+ if not v:
296
+ return pd.DataFrame()
297
+ if isinstance(v[0], dict):
298
+ return pd.DataFrame(v)
299
+ return pd.DataFrame({"value": v})
300
+ if isinstance(v, dict):
301
+ # expand nested dicts into columns where sensible
302
+ flat = {}
303
+ any_scalar = False
304
+ for k, val in v.items():
305
+ if isinstance(val, (int, float, str, bool, type(None))):
306
+ flat[k] = [val]
307
+ any_scalar = True
308
+ if any_scalar:
309
+ return pd.DataFrame(flat)
310
+ # complex dict -> try records
311
+ recs = []
312
+ for k, val in v.items():
313
+ if isinstance(val, dict):
314
+ rec = {"item": k}
315
+ rec.update({kk: valv for kk, valv in val.items()})
316
+ recs.append(rec)
317
+ if recs:
318
+ return pd.DataFrame(recs)
319
+ return None
320
+
321
+ # ------------- Pipeline ops -------------
322
+ @staticmethod
323
+ def _safe_filter(df: pd.DataFrame, expr: str) -> pd.Series:
324
+ try:
325
+ s = _eval_series_expr(expr, df)
326
+ if not isinstance(s, (pd.Series, np.ndarray)):
327
+ raise ValueError("filter must evaluate to a boolean Series/array")
328
+ return pd.Series(s).astype(bool).reindex(df.index, fill_value=False)
329
+ except Exception as e:
330
+ raise ValueError(f"Invalid filter expression: {e}")
331
+
332
+ @staticmethod
333
+ def _apply_derive(df: pd.DataFrame, spec: str) -> pd.DataFrame:
334
+ # e.g., "load = patients / capacity, rate = 100*admits/pop"
335
+ parts = re.split(r'[;,]\s*', spec)
336
+ for p in parts:
337
+ if not p.strip():
338
+ continue
339
+ if "=" not in p:
340
+ raise ValueError(f"derive requires assignments: '{p}'")
341
+ col, expr = p.split("=", 1)
342
+ col = col.strip()
343
+ expr = expr.strip()
344
+ df[col] = _eval_series_expr(expr, df)
345
+ return df
346
+
347
+ @staticmethod
348
+ def _parse_aggs(spec: Optional[str]) -> List[Tuple[str, str]]:
349
+ """
350
+ Returns list of (out_col, func_call_string), e.g. [("avg_wait_time","avg(wait_time)")]
351
+ """
352
+ if not spec:
353
+ return []
354
+ items = [x.strip() for x in spec.split(",") if x.strip()]
355
+ out: List[Tuple[str, str]] = []
356
+ for it in items:
357
+ m = re.match(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*\(([^)]+)\)', it)
358
+ if not m:
359
+ if it.lower() in ("count", "count(*)"):
360
+ out.append(("count", "count(*)"))
361
+ continue
362
+ raise ValueError(f"Bad agg item: '{it}' (use avg(x), median(y), p90(z), sum(a), count(*))")
363
+ func = m.group(1)
364
+ arg = m.group(2).strip()
365
+ out_col = f"{func.lower()}_{arg}"
366
+ out.append((out_col, f"{func}({arg})"))
367
+ return out
368
+
369
+ @staticmethod
370
+ def _group_agg(df: pd.DataFrame, group_by: Optional[List[str]], agg_spec: Optional[str]) -> pd.DataFrame:
371
+ aggs = ScenarioEngine._parse_aggs(agg_spec)
372
+ if not aggs and not group_by:
373
+ return df
374
+ if not group_by:
375
+ # reduce to single row with requested aggs
376
+ res = {}
377
+ for out_col, call in aggs:
378
+ val = ScenarioEngine._apply_agg_call(df, call)
379
+ res[out_col] = val
380
+ return pd.DataFrame([res])
381
+ # grouped
382
+ gb = df.groupby(group_by, dropna=False)
383
+ rows = []
384
+ for keys, g in gb:
385
+ if not isinstance(keys, tuple):
386
+ keys = (keys,)
387
+ rec = {group_by[i]: keys[i] for i in range(len(group_by))}
388
+ for out_col, call in aggs:
389
+ rec[out_col] = ScenarioEngine._apply_agg_call(g, call)
390
+ if not aggs:
391
+ # no aggs? carry counts by default
392
+ rec["count"] = len(g)
393
+ rows.append(rec)
394
+ return pd.DataFrame(rows)
395
+
396
+ @staticmethod
397
+ def _apply_agg_call(df: pd.DataFrame, call: str) -> Any:
398
+ call = call.strip()
399
+ if call.lower() in ("count", "count(*)"):
400
+ return int(len(df))
401
+ m = re.match(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*\(([^)]+)\)', call)
402
+ if not m:
403
+ raise ValueError(f"Bad agg call: {call}")
404
+ func, arg = m.group(1).lower(), m.group(2).strip()
405
+ if arg not in df.columns:
406
+ raise ValueError(f"Unknown column in agg: {arg}")
407
+ col = df[arg].dropna()
408
+ if func in ("avg", "mean"):
409
+ return float(np.mean(col)) if len(col) else float("nan")
410
+ if func == "median":
411
+ return float(np.median(col)) if len(col) else float("nan")
412
+ if func == "sum":
413
+ return float(np.sum(col)) if len(col) else 0.0
414
+ if func in ("min", "max"):
415
+ f = getattr(np, func)
416
+ return float(f(col)) if len(col) else float("nan")
417
+ if func.startswith("p") and func[1:].isdigit():
418
+ q = int(func[1:])
419
+ return float(np.percentile(col, q)) if len(col) else float("nan")
420
+ raise ValueError(f"Unsupported agg function: {func}")
421
+
422
+ @staticmethod
423
+ def _pivot(df: pd.DataFrame, spec: str) -> pd.DataFrame:
424
+ # spec: index=a[,b] columns=c values=v
425
+ parts = dict(re.findall(r'(\w+)\s*=\s*([^\s,]+)', spec))
426
+ idx = parts.get("index")
427
+ cols = parts.get("columns")
428
+ vals = parts.get("values")
429
+ if not (idx and cols and vals):
430
+ raise ValueError("pivot requires 'index=.. columns=.. values=..'")
431
+ idx = [x.strip() for x in idx.split(",")]
432
+ pv = df.pivot_table(index=idx, columns=cols, values=vals, aggfunc="first")
433
+ pv = pv.reset_index()
434
+ # flatten columns if needed
435
+ if isinstance(pv.columns, pd.MultiIndex):
436
+ pv.columns = ["_".join([str(c) for c in tup if c != ""]) for tup in pv.columns]
437
+ return pv
438
+
439
+ # ------------- Output renderers -------------
440
+ @staticmethod
441
+ def _render_table(df: pd.DataFrame) -> str:
442
+ if df.empty:
443
+ return "_No rows to display._"
444
+ # convert all to string-friendly
445
+ dff = df.copy()
446
+ for c in dff.columns:
447
+ dff[c] = dff[c].apply(lambda v: ScenarioEngine._fmt_val(v))
448
+ header = "| " + " | ".join(dff.columns) + " |"
449
+ sep = "|" + "|".join(["---"] * len(dff.columns)) + "|"
450
+ rows = ["| " + " | ".join(map(str, r)) + " |" for r in dff.to_numpy().tolist()]
451
+ return "\n".join([header, sep, *rows])
452
+
453
+ @staticmethod
454
+ def _render_list(df: pd.DataFrame) -> str:
455
+ if df.empty:
456
+ return "_No items._"
457
+ # pick first column as primary
458
+ primary = df.columns[0]
459
+ lines = []
460
+ for i, row in enumerate(df.itertuples(index=False), 1):
461
+ parts = []
462
+ for c, v in zip(df.columns, row):
463
+ if c == primary:
464
+ continue
465
+ parts.append(f"{c}: {ScenarioEngine._fmt_val(v)}")
466
+ extra = f" ({', '.join(parts)})" if parts else ""
467
+ lines.append(f"{i}. {ScenarioEngine._fmt_val(getattr(row, primary))}{extra}")
468
+ return "\n".join(lines)
469
+
470
+ @staticmethod
471
+ def _render_comparison(df: pd.DataFrame) -> str:
472
+ # look for columns named like current/previous
473
+ cols = {c.lower(): c for c in df.columns}
474
+ cur = cols.get("current") or cols.get("now") or cols.get("value")
475
+ prev = cols.get("previous") or cols.get("prior") or cols.get("past")
476
+ name = cols.get("name") or cols.get("metric") or cols.get("item") or df.columns[0]
477
+ if not (cur and prev):
478
+ return "_Comparison format requires columns 'current' and 'previous' (or aliases)._"
479
+ header = "| Item | Current | Previous | Change |"
480
+ sep = "|---|---:|---:|---:|"
481
+ body = []
482
+ for _, r in df.iterrows():
483
+ c, p = r[cur], r[prev]
484
+ change = (c - p) if isinstance(c, (int, float)) and isinstance(p, (int, float)) else "N/A"
485
+ body.append(f"| {ScenarioEngine._fmt_val(r[name])} | {ScenarioEngine._fmt_val(c)} | {ScenarioEngine._fmt_val(p)} | {ScenarioEngine._fmt_val(change)} |")
486
+ return "\n".join([header, sep, *body])
487
+
488
+ @staticmethod
489
+ def _render_map(df: pd.DataFrame) -> str:
490
+ # simple location table
491
+ colmap = {c.lower(): c for c in df.columns}
492
+ name = colmap.get("name") or colmap.get("facility") or colmap.get("title") or df.columns[0]
493
+ zone = colmap.get("zone")
494
+ city = colmap.get("city")
495
+ region = colmap.get("region")
496
+ lat = colmap.get("latitude") or colmap.get("lat")
497
+ lon = colmap.get("longitude") or colmap.get("lon")
498
+ cols = [x for x in [name, city, region, zone, lat, lon] if x]
499
+ if not cols:
500
+ return "_No geographic fields to show._"
501
+ dff = df[cols].copy()
502
+ dff["coordinates"] = np.where((lat is not None) & (lon is not None) & dff[lat].notna() & dff[lon].notna(),
503
+ dff[lat].astype(str) + ", " + dff[lon].astype(str), "N/A")
504
+ show = [name, city or "city", region or "region", zone or "zone", "coordinates"]
505
+ # ensure all exist
506
+ for c in show:
507
+ if c not in dff.columns:
508
+ dff[c] = ""
509
+ dff = dff[show]
510
+ return ScenarioEngine._render_table(dff)
511
+
512
+ @staticmethod
513
+ def _render_narrative(df: pd.DataFrame) -> str:
514
+ if df.empty:
515
+ return "_No content._"
516
+ paras = []
517
+ for i, row in enumerate(df.to_dict(orient="records"), 1):
518
+ parts = [f"**{k}**: {ScenarioEngine._fmt_val(v)}" for k, v in row.items()]
519
+ paras.append(f"{i}. " + "; ".join(parts))
520
+ return "\n".join(paras)
521
+
522
+ @staticmethod
523
+ def _render_chart_spec(df: pd.DataFrame, d: Dict[str, Any]) -> str:
524
+ """
525
+ Emits a Vega-Lite spec in a fenced code block that downstream renderers can plot exactly.
526
+ Accepts: chart (bar|line|area|point), x, y, color, column (facet)
527
+ """
528
+ mark = d.get("chart", "bar")
529
+ spec = {
530
+ "$schema": "https://vega.github.io/schema/vega-lite/v5.json",
531
+ "description": d.get("title") or "Chart",
532
+ "data": {"values": df.to_dict(orient="records")},
533
+ "mark": mark,
534
+ "encoding": {}
535
+ }
536
+ for enc in ("x", "y", "color", "column"):
537
+ if enc in d and d[enc] in df.columns:
538
+ spec["encoding"][enc] = {"field": d[enc], "type": "quantitative" if pd.api.types.is_numeric_dtype(df[d[enc]]) else "nominal"}
539
+ return "```vega-lite\n" + json.dumps(spec, ensure_ascii=False, indent=2) + "\n```"
540
+
541
+ # ------------- Helpers -------------
542
+ @staticmethod
543
+ def _fmt_val(v: Any) -> str:
544
+ if isinstance(v, float):
545
+ if math.isnan(v):
546
+ return "NaN"
547
+ return f"{v:,.4g}"
548
+ if isinstance(v, (int, np.integer)):
549
+ return f"{int(v):,}"
550
+ return str(v)