"""Dev tab — browse and inspect usage logs from HF dataset (local only).""" import json import os import shutil import uuid from datetime import datetime, timezone from pathlib import Path import gradio as gr import numpy as np from config import SEGMENT_AUDIO_DIR, SURAH_INFO_PATH # ── Surah names cache ────────────────────────────────────────────────── _surah_names: dict[int, str] | None = None def _load_surah_names() -> dict[int, str]: global _surah_names if _surah_names is not None: return _surah_names if not SURAH_INFO_PATH.exists(): _surah_names = {} return _surah_names with open(SURAH_INFO_PATH) as f: data = json.load(f) _surah_names = {int(k): v["name_en"] for k, v in data.items()} return _surah_names # ── HF token loading (same pattern as scripts/analyze_logs.py) ───────── def _load_token() -> str | None: token = os.environ.get("HF_TOKEN") if token: return token env_path = Path(__file__).parent.parent.parent / ".env" if env_path.exists(): for line in env_path.read_text().splitlines(): line = line.strip() if line.startswith("HF_TOKEN="): return line.split("=", 1)[1] return None # ── Dataset helpers ──────────────────────────────────────────────────── def _has_valid_segments(segments_str) -> bool: if not segments_str: return False try: runs = json.loads(segments_str) if isinstance(runs, list) and runs: return any(isinstance(run, dict) and run.get("segments") for run in runs) except (json.JSONDecodeError, TypeError): pass return False def _fmt_duration(seconds) -> str: if seconds is None: return "N/A" m, s = divmod(int(seconds), 60) h, m = divmod(m, 60) if h > 0: return f"{h}h {m}m" return f"{m}m {int(s)}s" def _fmt_pct(val) -> str: if val is None: return "N/A" return f"{val * 100:.1f}%" def _fmt_time(val) -> str: if val is None: return "N/A" return f"{val:.1f}s" # ── UI builder ───────────────────────────────────────────────────────── def build_dev_tab_ui(c): """Build the Dev tab UI components and attach them to the namespace.""" with gr.Row(): c.dev_load_btn = gr.Button("Load Logs", variant="primary", size="sm") c.dev_refresh_btn = gr.Button("Refresh", size="sm") c.dev_status = gr.Markdown("Click **Load Logs** to stream metadata from HF dataset.") with gr.Row(): c.dev_filter_device = gr.Dropdown( choices=["All", "GPU", "CPU"], value="All", label="Device", scale=1, ) c.dev_filter_model = gr.Dropdown( choices=["All", "Base", "Large"], value="All", label="Model", scale=1, ) c.dev_filter_status = gr.Dropdown( choices=["All", "All Passed", "Has Failures"], value="All", label="Status", scale=1, ) c.dev_sort = gr.Dropdown( choices=["Newest", "Duration", "Failures"], value="Newest", label="Sort", scale=1, ) c.dev_days_filter = gr.Number( label="Last N Days", value=None, precision=0, minimum=1, scale=1, ) c.dev_table = gr.Dataframe( headers=["#", "Time", "Surah", "Duration", "Segs", "Model", "Device", "Passed", "Failed", "Conf", "T1", "T2", "Audio ID"], datatype=["number", "str", "str", "str", "number", "str", "str", "number", "number", "str", "number", "number", "str"], interactive=False, label="Usage Logs", wrap=True, ) with gr.Row(): c.dev_plots_btn = gr.Button("Show Plots", size="sm") with gr.Row(): c.dev_gpu_plot = gr.Plot(label="GPU: Audio Duration vs Processing Time", visible=False) c.dev_cpu_plot = gr.Plot(label="CPU: Audio Duration vs Processing Time", visible=False) c.dev_detail_html = gr.HTML(value="", label="Log Detail") with gr.Row(): c.dev_compute_ts_btn = gr.Button("Compute Timestamps", variant="secondary", interactive=False, visible=False) c.dev_compute_ts_progress = gr.HTML(value="", visible=False) c.dev_animate_all_html = gr.HTML(value="", visible=False) # State c.dev_all_rows = gr.State(value=[]) c.dev_filtered_indices = gr.State(value=[]) c.dev_segment_dir = gr.State(value=None) c.dev_json_output = gr.State(value=None) # ── Row extraction ───────────────────────────────────────────────────── def _row_to_dict(row) -> dict: """Extract the fields we care about from a dataset row.""" return { "audio_id": row.get("audio_id", ""), "timestamp": row.get("timestamp", ""), "surah": row.get("surah"), "audio_duration_s": row.get("audio_duration_s"), "num_segments": row.get("num_segments"), "asr_model": row.get("asr_model", ""), "device": row.get("device", ""), "segments_passed": row.get("segments_passed"), "segments_failed": row.get("segments_failed"), "mean_confidence": row.get("mean_confidence"), "tier1_retries": row.get("tier1_retries", 0) or 0, "tier1_passed": row.get("tier1_passed", 0) or 0, "tier2_retries": row.get("tier2_retries", 0) or 0, "tier2_passed": row.get("tier2_passed", 0) or 0, "reanchors": row.get("reanchors", 0) or 0, "special_merges": row.get("special_merges", 0) or 0, "total_time": row.get("total_time"), "vad_queue_time": row.get("vad_queue_time"), "vad_gpu_time": row.get("vad_gpu_time"), "asr_gpu_time": row.get("asr_gpu_time"), "dp_total_time": row.get("dp_total_time"), "min_silence_ms": row.get("min_silence_ms"), "min_speech_ms": row.get("min_speech_ms"), "pad_ms": row.get("pad_ms"), "segments": row.get("segments"), "word_timestamps": row.get("word_timestamps"), "char_timestamps": row.get("char_timestamps"), "resegmented": row.get("resegmented"), "retranscribed": row.get("retranscribed"), "error": row.get("error"), } # ── Table building ───────────────────────────────────────────────────── def _build_table_row(row_dict, index, surah_names): """Build a single table row list from a row dict.""" ts = row_dict.get("timestamp", "") try: dt = datetime.fromisoformat(ts) time_display = dt.strftime("%m-%d %H:%M") except (ValueError, TypeError): time_display = str(ts)[:16] if ts else "N/A" surah = row_dict.get("surah") name = surah_names.get(surah, "") if surah else "" surah_display = f"{surah} {name}" if name else str(surah or "?") return [ index + 1, time_display, surah_display, _fmt_duration(row_dict.get("audio_duration_s")), row_dict.get("num_segments") or 0, row_dict.get("asr_model", "?"), row_dict.get("device", "?"), row_dict.get("segments_passed") or 0, row_dict.get("segments_failed") or 0, _fmt_pct(row_dict.get("mean_confidence")), row_dict.get("tier1_retries", 0) or 0, row_dict.get("tier2_retries", 0) or 0, row_dict.get("audio_id", ""), ] def _build_table(rows, indices, surah_names): """Build table data from rows and their display indices.""" return [_build_table_row(rows[i], display_idx, surah_names) for display_idx, i in enumerate(indices)] # ── Handlers ─────────────────────────────────────────────────────────── def load_logs_handler(): """Stream dataset (no audio) and return rows + table.""" token = _load_token() if not token: gr.Warning("HF_TOKEN not found in .env or environment.") return [], [], "HF_TOKEN not found.", gr.update() try: from datasets import load_dataset except ImportError: gr.Warning("'datasets' package not installed.") return [], [], "'datasets' package not installed.", gr.update() surah_names = _load_surah_names() try: ds = load_dataset("hetchyy/quran-aligner-logs", token=token, split="train", streaming=True) ds = ds.remove_columns("audio") except Exception as e: gr.Warning(f"Failed to load dataset: {e}") return [], [], f"Error: {e}", gr.update() rows = [] total = 0 for row in ds: total += 1 if _has_valid_segments(row.get("segments")): rows.append(_row_to_dict(row)) # Sort newest first rows.sort(key=lambda r: r.get("timestamp") or "", reverse=True) indices = list(range(len(rows))) table_data = _build_table(rows, indices, surah_names) status = f"Loaded {len(rows)} rows with segments (out of {total} total)." return rows, indices, status, table_data def filter_and_sort_handler(all_rows, device, model, status_filter, sort_by, days=None): """Filter and sort cached rows, return new table + index mapping.""" if not all_rows: return [], gr.update() surah_names = _load_surah_names() indices = [] # Compute cutoff for days filter cutoff = None if days is not None and days > 0: from datetime import timedelta cutoff = datetime.now(timezone.utc) - timedelta(days=int(days)) for i, row in enumerate(all_rows): # Days filter if cutoff is not None: ts = row.get("timestamp", "") try: row_dt = datetime.fromisoformat(ts) if row_dt.tzinfo is None: row_dt = row_dt.replace(tzinfo=timezone.utc) if row_dt < cutoff: continue except (ValueError, TypeError): continue # Device filter if device != "All": row_device = (row.get("device") or "").lower() if device == "GPU" and row_device not in ("cuda", "gpu"): continue if device == "CPU" and row_device not in ("cpu",): continue # Model filter if model != "All": row_model = row.get("asr_model", "") if model == "Base" and row_model != "Base": continue if model == "Large" and row_model != "Large": continue # Status filter if status_filter == "All Passed": if (row.get("segments_failed") or 0) > 0: continue elif status_filter == "Has Failures": if (row.get("segments_failed") or 0) == 0: continue indices.append(i) # Sort if sort_by == "Duration": indices.sort(key=lambda i: all_rows[i].get("audio_duration_s") or 0, reverse=True) elif sort_by == "Failures": indices.sort(key=lambda i: all_rows[i].get("segments_failed") or 0, reverse=True) # else "Newest" — already sorted by timestamp from load table_data = _build_table(all_rows, indices, surah_names) return indices, table_data def build_profiling_plots_handler(all_rows, filtered_indices): """Build GPU and CPU linear regression scatter plots from filtered data.""" if not all_rows or not filtered_indices: return gr.update(visible=False), gr.update(visible=False) import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt # Collect data points from filtered rows gpu_rows = [] # (audio_dur, vad_gpu, asr_gpu, asr_model) cpu_rows = [] for i in filtered_indices: row = all_rows[i] audio_dur = row.get("audio_duration_s") vad_gpu = row.get("vad_gpu_time") asr_gpu = row.get("asr_gpu_time") device = (row.get("device") or "").lower() asr_model = row.get("asr_model", "") if audio_dur is None or audio_dur <= 0: continue entry = (audio_dur, vad_gpu, asr_gpu, asr_model) if device in ("cuda", "gpu"): gpu_rows.append(entry) elif device == "cpu": cpu_rows.append(entry) def _build_figure(rows, title): """Build a dual y-axis scatter + regression figure for one device type.""" if not rows: return None # Split series vad_x, vad_y = [], [] asr_base_x, asr_base_y = [], [] asr_large_x, asr_large_y = [], [] for audio_dur, vad_t, asr_t, model in rows: if vad_t is not None and vad_t > 0: vad_x.append(audio_dur) vad_y.append(vad_t) if asr_t is not None and asr_t > 0: if model == "Base": asr_base_x.append(audio_dur) asr_base_y.append(asr_t) elif model == "Large": asr_large_x.append(audio_dur) asr_large_y.append(asr_t) if not vad_x and not asr_base_x and not asr_large_x: return None fig, ax_vad = plt.subplots(figsize=(7, 4.5)) ax_asr = ax_vad.twinx() handles, labels = [], [] # VAD series (left y-axis, blue) if vad_x: s = ax_vad.scatter(vad_x, vad_y, color="#4a9eff", alpha=0.5, s=20, zorder=3) handles.append(s) if len(vad_x) >= 2: coeffs = np.polyfit(vad_x, vad_y, 1) x_line = np.array([min(vad_x), max(vad_x)]) y_line = np.polyval(coeffs, x_line) line, = ax_vad.plot(x_line, y_line, color="#4a9eff", linewidth=1.5, zorder=4) labels.append(f"VAD: y={coeffs[0]:.3f}x+{coeffs[1]:.2f}") else: labels.append("VAD") # ASR Base series (right y-axis, orange) if asr_base_x: s = ax_asr.scatter(asr_base_x, asr_base_y, color="#f0ad4e", alpha=0.5, s=20, marker="^", zorder=3) handles.append(s) if len(asr_base_x) >= 2: coeffs = np.polyfit(asr_base_x, asr_base_y, 1) x_line = np.array([min(asr_base_x), max(asr_base_x)]) y_line = np.polyval(coeffs, x_line) ax_asr.plot(x_line, y_line, color="#f0ad4e", linewidth=1.5, zorder=4) labels.append(f"ASR Base: y={coeffs[0]:.3f}x+{coeffs[1]:.2f}") else: labels.append("ASR Base") # ASR Large series (right y-axis, red) if asr_large_x: s = ax_asr.scatter(asr_large_x, asr_large_y, color="#d9534f", alpha=0.5, s=20, marker="s", zorder=3) handles.append(s) if len(asr_large_x) >= 2: coeffs = np.polyfit(asr_large_x, asr_large_y, 1) x_line = np.array([min(asr_large_x), max(asr_large_x)]) y_line = np.polyval(coeffs, x_line) ax_asr.plot(x_line, y_line, color="#d9534f", linewidth=1.5, zorder=4) labels.append(f"ASR Large: y={coeffs[0]:.3f}x+{coeffs[1]:.2f}") else: labels.append("ASR Large") ax_vad.set_xlabel("Audio Duration (s)") ax_vad.set_ylabel("VAD Time (s)", color="#4a9eff") ax_asr.set_ylabel("ASR Time (s)", color="#f0ad4e") ax_vad.tick_params(axis="y", labelcolor="#4a9eff") ax_asr.tick_params(axis="y", labelcolor="#f0ad4e") ax_vad.set_title(title) if handles: fig.legend(handles, labels, loc="upper left", bbox_to_anchor=(0.12, 0.88), fontsize=8, framealpha=0.8) fig.tight_layout() return fig gpu_fig = _build_figure(gpu_rows, "GPU: Audio Duration vs Processing Time") cpu_fig = _build_figure(cpu_rows, "CPU: Audio Duration vs Processing Time") gpu_update = gr.update(value=gpu_fig, visible=True) if gpu_fig else gr.update(visible=False) cpu_update = gr.update(value=cpu_fig, visible=True) if cpu_fig else gr.update(visible=False) # Close figures to free memory plt.close("all") return gpu_update, cpu_update def select_log_row_handler(all_rows, filtered_indices, evt: gr.SelectData): """When a table row is clicked, download audio, render segments, inject timestamps if available. Returns 6-tuple: (dev_detail_html, dev_json_output, dev_segment_dir, dev_compute_ts_btn, dev_animate_all_html, dev_compute_ts_progress) """ _empty = ("", None, None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)) if not all_rows or not filtered_indices: return _empty display_idx = evt.index[0] if isinstance(evt.index, (list, tuple)) else evt.index if display_idx < 0 or display_idx >= len(filtered_indices): return _empty row_idx = filtered_indices[display_idx] row = all_rows[row_idx] audio_id = row.get("audio_id", "") surah_names = _load_surah_names() # Build summary HTML summary_html = _build_summary_html(row, surah_names) # Reconstruct and render segments html, json_segments, segment_dir = _build_segments_from_log(row, audio_id) html = summary_html + html # Check if timestamps exist in the log has_ts = bool(row.get("word_timestamps")) if has_ts and json_segments: try: from src.mfa import inject_timestamps_into_html results = _log_timestamps_to_mfa_results( row.get("word_timestamps"), row.get("char_timestamps") ) seg_to_result_idx = _build_seg_to_result_idx_from_log(json_segments, results) enriched_html, enriched_json = inject_timestamps_into_html( html, json_segments, results, seg_to_result_idx, str(segment_dir) if segment_dir else None, ) animate_btn = '' return ( enriched_html, enriched_json, str(segment_dir) if segment_dir else None, gr.update(visible=False, interactive=False), gr.update(value=animate_btn, visible=True), gr.update(visible=False), ) except Exception as e: print(f"[dev_tools] Timestamp injection from log failed: {e}") import traceback traceback.print_exc() # Fall through to non-timestamp path # No timestamps — build basic json_output and show Compute Timestamps button json_output = {"segments": json_segments} if json_segments else None has_audio = segment_dir is not None return ( html, json_output, str(segment_dir) if segment_dir else None, gr.update(visible=has_audio, interactive=has_audio), gr.update(visible=False), gr.update(visible=False), ) # ── Summary HTML builder ─────────────────────────────────────────────── def _build_summary_html(row, surah_names) -> str: """Build the 4-section summary HTML for a log row.""" surah = row.get("surah") name = surah_names.get(surah, "") if surah else "" surah_display = f"{surah} ({name})" if name else str(surah or "N/A") sections = [] # 1. Summary sections.append(f"""
{row.get('audio_id', 'N/A')}