Spaces:
Sleeping
Sleeping
| # app.py β Thematic Analysis Dashboard | |
| # A specialized Gradio interface for BERTopic-based research. | |
| # Supports light-theme aesthetics and Braun & Clarke (2006) workflow. | |
| import sys | |
| import gradio as gr | |
| import json | |
| import os | |
| import uuid | |
| import glob | |
| import pandas as pd | |
| import traceback | |
| import datetime | |
| import time | |
| from agent import agent | |
| # --- Output Configuration --- | |
| # Ensures emoji and special characters display correctly on all platforms. | |
| try: | |
| sys.stdout.reconfigure(encoding="utf-8", errors="replace") | |
| sys.stderr.reconfigure(encoding="utf-8", errors="replace") | |
| except AttributeError: | |
| pass | |
| # --- Constants & Data Schemas --- | |
| COLUMNS_FOR_REVIEW = [ | |
| "#", "Topic Label", "Top Evidence Sentence", "Reasoning", | |
| "Sent.", "Papers", "Approve", "Rename To", | |
| ] | |
| TEMPLATE_EMPTY_DF = pd.DataFrame( | |
| columns=COLUMNS_FOR_REVIEW, | |
| data=[["", "", "", "", 0, 0, False, ""]], | |
| ) | |
| SYSTEM_DOWNLOAD_FILES = [ | |
| "narrative.txt", "comparison.csv", "themes.json", | |
| "taxonomy_map.json", "labels_abstract.json", "labels_title.json", | |
| ] | |
| STORAGE_FILES_TO_PURGE = [ | |
| "loaded_data.csv", | |
| "summaries_abstract.json", "summaries_title.json", | |
| "emb_abstract.npy", "emb_title.npy", | |
| "labels_abstract.json", "labels_title.json", | |
| "themes.json", "themes_abstract.json", "themes_title.json", | |
| "taxonomy_map.json", "comparison.csv", "narrative.txt", | |
| "chart_abstract_intertopic.html", "chart_abstract_bars.html", | |
| "chart_abstract_hierarchy.html", "chart_abstract_heatmap.html", | |
| "chart_title_intertopic.html", "chart_title_bars.html", | |
| "chart_title_hierarchy.html", "chart_title_heatmap.html", | |
| ] | |
| VISUALIZATION_GALLERY = [ | |
| ("Intertopic Map β Abstract", "chart_abstract_intertopic.html"), | |
| ("Frequency Bars β Abstract", "chart_abstract_bars.html"), | |
| ("Hierarchy / Treemap β Abstract", "chart_abstract_hierarchy.html"), | |
| ("Similarity Heatmap β Abstract", "chart_abstract_heatmap.html"), | |
| ("Intertopic Map β Title", "chart_title_intertopic.html"), | |
| ("Frequency Bars β Title", "chart_title_bars.html"), | |
| ("Hierarchy / Treemap β Title", "chart_title_hierarchy.html"), | |
| ("Similarity Heatmap β Title", "chart_title_heatmap.html"), | |
| ] | |
| WORKFLOW_STEPS = [ | |
| ("1","β Load"), ("2","β‘ Codes"), ("3","β’ Themes"), | |
| ("4","β£ Review"), ("5","β€ Names"), ("5.5","β€Β½ PAJAIS"), ("6","β₯ Report"), | |
| ] | |
| # Patterns representing potential state corruption | |
| ERROR_SIGNATURES = [ | |
| "INVALID_CHAT_HISTORY", | |
| "ToolMessage", | |
| "tool_calls that do not have a corresponding", | |
| ] | |
| # --- Modern Dashboard SaaS Theme (CSS) --- | |
| PREMIUM_SAAS_STYLE = """ | |
| @import url('https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@400;500;600;700;800&display=swap'); | |
| body, .gradio-container { | |
| background-color: #f3f5f8 !important; /* Soft premium gray-blue background */ | |
| font-family: 'Plus Jakarta Sans', sans-serif !important; | |
| color: #1a1d20 !important; | |
| } | |
| .gradio-container { | |
| max-width: 1440px !important; | |
| margin: 20px auto !important; | |
| padding: 0 20px !important; | |
| } | |
| .header-bar { | |
| background: linear-gradient(135deg, #1e293b 0%, #0f172a 100%); | |
| color: #ffffff !important; | |
| padding: 24px 32px; | |
| border-radius: 16px; | |
| margin-bottom: 24px; | |
| box-shadow: 0 10px 25px -5px rgba(15, 23, 42, 0.2); | |
| display: flex; | |
| justify-content: space-between; | |
| align-items: center; | |
| } | |
| .header-bar h1 { | |
| color: #ffffff !important; | |
| font-size: 1.8rem !important; | |
| font-weight: 800 !important; | |
| margin: 0 !important; | |
| letter-spacing: -0.02em; | |
| } | |
| .header-bar p { | |
| color: #94a3b8 !important; | |
| margin: 4px 0 0 0 !important; | |
| font-size: 0.95rem; | |
| } | |
| .dashboard-panel { | |
| background: #ffffff; | |
| border-radius: 16px; | |
| border: 1px solid #e2e8f0; | |
| box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.05), 0 2px 4px -2px rgba(0, 0, 0, 0.025); | |
| padding: 24px; | |
| margin-bottom: 24px; | |
| } | |
| .section-title { | |
| color: #475569 !important; | |
| font-weight: 700 !important; | |
| font-size: 0.75rem !important; | |
| letter-spacing: 0.1em; | |
| text-transform: uppercase; | |
| margin-bottom: 16px; | |
| border-bottom: 2px solid #f1f5f9; | |
| padding-bottom: 8px; | |
| } | |
| .action-btn-primary { | |
| background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%) !important; | |
| border: none !important; | |
| color: white !important; | |
| font-weight: 600 !important; | |
| box-shadow: 0 4px 12px rgba(37, 99, 235, 0.3) !important; | |
| transition: transform 0.2s, box-shadow 0.2s !important; | |
| } | |
| .action-btn-primary:hover { | |
| transform: translateY(-2px) !important; | |
| box-shadow: 0 6px 16px rgba(37, 99, 235, 0.4) !important; | |
| } | |
| .action-btn-success { | |
| background: linear-gradient(135deg, #10b981 0%, #059669 100%) !important; | |
| border: none !important; | |
| color: white !important; | |
| font-weight: 700 !important; | |
| } | |
| /* Chatbot custom styling */ | |
| .bubble-wrap { border-radius: 12px !important; } | |
| .message.user { background-color: #f1f5f9 !important; color: #1e293b !important; border-radius: 12px 12px 0 12px !important; } | |
| .message.bot { background-color: #eff6ff !important; border: 1px solid #bfdbfe !important; color: #1e293b !important; border-radius: 12px 12px 12px 0 !important; } | |
| /* Review Table Styling */ | |
| .review-table { min-height: 400px !important; } | |
| .review-table table { border-collapse: collapse !important; width: 100% !important; } | |
| .review-table td, .review-table th { | |
| padding: 12px !important; | |
| word-wrap: break-word !important; | |
| word-break: break-word !important; | |
| white-space: normal !important; | |
| text-align: left !important; | |
| } | |
| .review-table th { background-color: #f1f5f9 !important; font-weight: 700 !important; color: #1e293b !important; } | |
| .review-table td { border-bottom: 1px solid #e2e8f0 !important; } | |
| footer { display: none !important; } | |
| """ | |
| # --- Helper Functions --- | |
| def create_message_object(role_name: str, text_payload: str) -> dict: | |
| """Builds a schema-compliant message for Gradio 6+.""" | |
| return {"role": role_name, "content": str(text_payload)} | |
| def update_exchange_history(logs: list, user_input: str, agent_output: str) -> list: | |
| """Appends a new conversation turn to the logs.""" | |
| return logs + [create_message_object("user", user_input), create_message_object("assistant", agent_output)] | |
| def record_system_failure(error_msg: str, operation_context: str = "") -> None: | |
| """Logs errors to an external file for persistent debugging.""" | |
| timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| with open("error.txt", "a", encoding="utf-8") as storage_file: | |
| storage_file.write(f"\n{'-'*60}\nTS: {timestamp}\nCTX: {operation_context}\n" | |
| f"MSG: {error_msg}\nTRACE:\n{traceback.format_exc()}\n") | |
| try: | |
| print(f"β [Error] {operation_context}: {str(error_msg)[:100]}") | |
| except: | |
| pass | |
| def format_output_safely(raw_component) -> str: | |
| """Ensures complex agent outputs are converted to clean strings.""" | |
| if raw_component is None: return "" | |
| if isinstance(raw_component, str): return raw_component | |
| if isinstance(raw_component, list): | |
| return "\n".join([format_output_safely(item) for item in raw_component]) | |
| if isinstance(raw_component, dict): | |
| return str(raw_component.get("content", str(raw_component))) | |
| if hasattr(raw_component, "content"): | |
| return format_output_safely(raw_component.content) | |
| return str(raw_component) | |
| def _enrich_with_ai_council(agent_response: str) -> str: | |
| """Extracts and displays AI Council reasoning from labels JSON files.""" | |
| reasoning_data = [] | |
| # Try to load reasoning from labels files | |
| for scenario in ("abstract", "title"): | |
| label_file = f"labels_{scenario}.json" | |
| if os.path.exists(label_file): | |
| try: | |
| with open(label_file, encoding="utf-8") as f: | |
| labels = json.load(f) | |
| for item in labels[:5]: # Show top 5 topics | |
| if item.get("reasoning"): | |
| reasoning_data.append({ | |
| "label": item.get("label", ""), | |
| "reasoning": item.get("reasoning", ""), | |
| "confidence": item.get("confidence", 0), | |
| }) | |
| except: | |
| pass | |
| if reasoning_data: | |
| reasoning_section = "\n\nπ§ **AI COUNCIL REASONING** (Multi-Perspective Analysis):\n\n" | |
| for idx, item in enumerate(reasoning_data, 1): | |
| reasoning_section += f"**Topic {idx}: {item['label']}** (Confidence: {item['confidence']:.2f})\n" | |
| reasoning_section += f"ββ {item['reasoning']}\n\n" | |
| return agent_response + reasoning_section | |
| return agent_response | |
| def check_analysis_milestones() -> dict: | |
| """Probes the filesystem to determine which analysis phases are finished.""" | |
| return { | |
| "1": os.path.exists("loaded_data.csv"), | |
| "2": os.path.exists("labels_abstract.json") or os.path.exists("labels_title.json"), | |
| "3": os.path.exists("themes.json"), | |
| "4": os.path.exists("themes.json"), | |
| "5": os.path.exists("themes.json"), | |
| "5.5": os.path.exists("taxonomy_map.json"), | |
| "6": os.path.exists("narrative.txt"), | |
| } | |
| def generate_progress_indicator(state_map: dict) -> str: | |
| """Renders a visual progress bar based on completed milestones.""" | |
| element_html = "" | |
| for identifier, title in WORKFLOW_STEPS: | |
| is_finished = state_map.get(identifier, False) | |
| bg_fill = "#3b82f6" if is_finished else "#f8fafc" | |
| txt_color = "#ffffff" if is_finished else "#64748b" | |
| border_clr = "#3b82f6" if is_finished else "#cbd5e1" | |
| shadow = "box-shadow: 0 2px 4px rgba(59,130,246,0.3);" if is_finished else "" | |
| element_html += ( | |
| f'<span style="display:inline-block;padding:6px 16px;margin:4px;' | |
| f'background:{bg_fill};border:1px solid {border_clr};border-radius:8px;' | |
| f'font-size:0.85rem;font-weight:600;color:{txt_color};{shadow} transition:all 0.3s;"> ' | |
| f'{"β " if is_finished else ""}{title}</span>' | |
| ) | |
| return ( | |
| f'<div style="background:#ffffff;padding:16px 20px;border-radius:12px;' | |
| f'border:1px solid #e2e8f0;margin-bottom:24px;box-shadow:0 1px 3px rgba(0,0,0,0.05);">' | |
| f'<div style="color:#94a3b8;font-size:0.7rem;font-weight:800;letter-spacing:1px;margin-bottom:8px;text-transform:uppercase;">Analysis Progress</div>' | |
| f'<div style="display:flex;flex-wrap:wrap;">{element_html}</div></div>' | |
| ) | |
| def extract_milestones_from_text(feedback_text, existing_map: dict) -> dict: | |
| """Parses agent responses for 'PHASE_STATUS' markers to update the UI.""" | |
| clean_text = format_output_safely(feedback_text) | |
| updated_tracker = dict(existing_map) | |
| for row in clean_text.splitlines(): | |
| if "PHASE_STATUS:" in row: | |
| payload = row.split("PHASE_STATUS:", 1)[1].strip() | |
| for chunk in payload.split(","): | |
| if "=" in chunk: | |
| p_id, p_val = chunk.split("=", 1) | |
| updated_tracker[p_id.strip()] = "β " in p_val | |
| for k, v in check_analysis_milestones().items(): | |
| updated_tracker[k] = updated_tracker.get(k, False) or v | |
| return updated_tracker | |
| # --- Data Loading Logic --- | |
| def refresh_review_component() -> pd.DataFrame: | |
| """Populates the Review Table based on the latest JSON artifacts.""" | |
| if os.path.exists("taxonomy_map.json"): | |
| raw_map = json.loads(open("taxonomy_map.json", encoding="utf-8").read()) | |
| rows_gen = [] | |
| for i, node in enumerate(raw_map): | |
| note = (f"β NOVEL" if node.get("is_novel", False) | |
| else f"β PAJAIS: {node.get('pajais_match','')}") | |
| rows_gen.append({"#": i, "Topic Label": node.get("theme_name", ""), | |
| "Top Evidence Sentence": note, "Reasoning": node.get("reasoning", ""), | |
| "Sent.": 0, "Papers": 0, "Approve": True, "Rename To": ""}) | |
| return pd.DataFrame(rows_gen, columns=COLUMNS_FOR_REVIEW) if rows_gen else TEMPLATE_EMPTY_DF | |
| if os.path.exists("themes.json"): | |
| theme_set = json.loads(open("themes.json", encoding="utf-8").read()) | |
| rows_gen = [] | |
| for i, th in enumerate(theme_set): | |
| count_val = th.get("total_sentences", 0) | |
| rows_gen.append({"#": i, "Topic Label": th.get("theme_name", ""), | |
| "Top Evidence Sentence": (th.get("representative_sentences", [""])[0][:110] if th.get("representative_sentences") else ""), | |
| "Reasoning": th.get("reasoning", ""), | |
| "Sent.": count_val, "Papers": max(1, count_val // 10), "Approve": False, "Rename To": ""}) | |
| return pd.DataFrame(rows_gen, columns=COLUMNS_FOR_REVIEW) if rows_gen else TEMPLATE_EMPTY_DF | |
| for scenario in ("abstract", "title"): | |
| label_file = f"labels_{scenario}.json" | |
| if os.path.exists(label_file): | |
| label_data = json.loads(open(label_file, encoding="utf-8").read()) | |
| rows_gen = [] | |
| for item in label_data: | |
| sc = item.get("count", 0) | |
| rows_gen.append({"#": item.get("topic_id", 0), "Topic Label": item.get("label", "Concept"), | |
| "Top Evidence Sentence": (item.get("nearest_sentences", [""])[0][:110] if item.get("nearest_sentences") else ""), | |
| "Reasoning": item.get("reasoning", ""), | |
| "Sent.": sc, "Papers": max(1, sc // 10), "Approve": False, "Rename To": ""}) | |
| return pd.DataFrame(rows_gen, columns=COLUMNS_FOR_REVIEW) if rows_gen else TEMPLATE_EMPTY_DF | |
| return TEMPLATE_EMPTY_DF | |
| def fetch_available_downloads(): | |
| """Identifies generated report files for the download box.""" | |
| active_files = [f for f in SYSTEM_DOWNLOAD_FILES if os.path.exists(f)] | |
| return active_files if active_files else None | |
| def get_available_charts() -> list: | |
| """Returns list of available chart files that have been generated.""" | |
| available = [] | |
| for chart_name, chart_file in VISUALIZATION_GALLERY: | |
| if os.path.exists(chart_file): | |
| available.append(chart_name) | |
| return available if available else ["No charts available yet"] | |
| def refresh_charts_display() -> tuple[list, str]: | |
| """Refresh chart selector with available charts and return first chart for display.""" | |
| available_charts = get_available_charts() | |
| first_chart = available_charts[0] if available_charts[0] != "No charts available yet" else None | |
| # Find matching file | |
| chart_file = None | |
| if first_chart: | |
| for name, file in VISUALIZATION_GALLERY: | |
| if name == first_chart: | |
| chart_file = file | |
| break | |
| display_html = embed_thematic_chart(chart_file) if chart_file else embed_thematic_chart(None) | |
| return available_charts, display_html | |
| def embed_thematic_chart(file_name: str) -> str: | |
| """Creates a sandboxed iframe for Plotly charts.""" | |
| if not file_name or not os.path.exists(file_name): | |
| return "<div style='color:#b2bec3;padding:50px;text-align:center;font-weight:bold;'>π No charts generated yet. Complete Phase 2 to unlock visualizations.</div>" | |
| try: | |
| with open(file_name, encoding="utf-8") as f: | |
| raw_html = f.read() | |
| sanitized = raw_html.replace("&", "&").replace('"', """).replace("'", "'") | |
| return (f'<iframe srcdoc="{sanitized}" style="width:100%;height:600px;border:1px solid #e2e8f0;border-radius:10px;" ' | |
| f'sandbox="allow-scripts allow-same-origin"></iframe>') | |
| except Exception as e: | |
| return f"<div style='color:red;padding:20px;'>Error loading chart: {str(e)}</div>" | |
| # --- Interaction Core --- | |
| def invoke_analysis_agent(user_query: str, session_uid: str, retry_limit: int = 3) -> tuple[str, str]: | |
| """Communication bridge between UI and LangGraph Agent. Enriches output with AI Council reasoning.""" | |
| working_uid = session_uid | |
| for attempt in range(retry_limit): | |
| try: | |
| runtime_config = {"configurable": {"thread_id": working_uid}} | |
| agent_result = agent.invoke({"messages": [{"role": "user", "content": user_query}]}, config=runtime_config) | |
| base_response = "" | |
| for response_node in reversed(agent_result.get("messages", [])): | |
| if hasattr(response_node, "type") and response_node.type == "ai": | |
| base_response = format_output_safely(response_node.content) | |
| break | |
| if isinstance(response_node, dict) and response_node.get("role") in ("assistant", "ai"): | |
| base_response = format_output_safely(response_node.get("content", "")) | |
| break | |
| # Enrich with AI Council reasoning if available | |
| enriched_response = _enrich_with_ai_council(base_response) | |
| return enriched_response if enriched_response else (base_response or "Agent standby."), working_uid | |
| except Exception as failure: | |
| trace_str = str(failure) | |
| if any(sig in trace_str for sig in ERROR_SIGNATURES): | |
| new_uid = str(uuid.uuid4()) | |
| record_system_failure(trace_str, f"Session Migration [{working_uid[:6]} -> {new_uid[:6]}]") | |
| working_uid = new_uid | |
| time.sleep(1) | |
| continue | |
| if "429" in trace_str or "limit" in trace_str.lower(): | |
| backoff_time = 35 * (attempt + 1) | |
| time.sleep(backoff_time) | |
| continue | |
| record_system_failure(trace_str, "Agent Link Failure") | |
| return f"Service Error: {trace_str}", working_uid | |
| return "Connection timeout.", working_uid | |
| # --- Event Callback Handlers --- | |
| def handle_file_upload(file_data, chat_log, session_id, progress_data): | |
| """Processes Scopus CSV upload and initializes Phase 1.""" | |
| if file_data is None: | |
| return chat_log, session_id, progress_data, generate_progress_indicator(progress_data), refresh_review_component(), fetch_available_downloads(), gr.update(choices=get_available_charts()), embed_thematic_chart(None) | |
| try: | |
| local_path = file_data.name if hasattr(file_data, "name") else str(file_data) | |
| init_query = (f"System: Data source uploaded at {local_path}. " | |
| "Execute Phase 1: Summary Statistics and Column Profiling.") | |
| bot_reply, updated_id = invoke_analysis_agent(init_query, session_id) | |
| new_history = update_exchange_history(chat_log, "Dataset Uploaded", bot_reply) | |
| new_progress = extract_milestones_from_text(bot_reply, progress_data) | |
| available_charts, chart_html = refresh_charts_display() | |
| return new_history, updated_id, new_progress, generate_progress_indicator(new_progress), refresh_review_component(), fetch_available_downloads(), gr.update(choices=available_charts), chart_html | |
| except Exception as err: | |
| record_system_failure(str(err), "OnUpload") | |
| return chat_log, session_id, progress_data, generate_progress_indicator(progress_data), refresh_review_component(), fetch_available_downloads(), gr.update(choices=get_available_charts()), embed_thematic_chart(None) | |
| def handle_text_submission(user_text, chat_log, session_id, progress_data): | |
| """Handles standard researcher queries and stage transitions.""" | |
| if not user_text.strip(): | |
| available_charts, chart_html = refresh_charts_display() | |
| return chat_log, "", session_id, progress_data, generate_progress_indicator(progress_data), refresh_review_component(), fetch_available_downloads(), gr.update(choices=available_charts), chart_html | |
| try: | |
| bot_reply, updated_id = invoke_analysis_agent(user_text, session_id) | |
| new_history = update_exchange_history(chat_log, user_text, bot_reply) | |
| new_progress = extract_milestones_from_text(bot_reply, progress_data) | |
| available_charts, chart_html = refresh_charts_display() | |
| return new_history, "", updated_id, new_progress, generate_progress_indicator(new_progress), refresh_review_component(), fetch_available_downloads(), gr.update(choices=available_charts), chart_html | |
| except Exception as err: | |
| record_system_failure(str(err), "OnTextSubmit") | |
| available_charts, chart_html = refresh_charts_display() | |
| return chat_log, "", session_id, progress_data, generate_progress_indicator(progress_data), refresh_review_component(), fetch_available_downloads(), gr.update(choices=available_charts), chart_html | |
| def handle_table_submission(review_data, chat_log, session_id, progress_data): | |
| """Processes decisions made by the researcher in the Review Table.""" | |
| try: | |
| current_df = review_data if isinstance(review_data, pd.DataFrame) else pd.DataFrame(review_data) | |
| validated_rows = current_df[current_df["Approve"].astype(bool)] | |
| override_map = {str(r["#"]): r["Rename To"] for _, r in validated_rows.iterrows() if str(r["Rename To"]).strip()} | |
| summary_msg = f"Researcher Decision: {len(validated_rows)} rows verified. Overrides: {list(override_map.values())[:3]}" | |
| agent_instruction = (f"The researcher has finalized decisions on the Review Table.\n" | |
| f"Manual Overrides: {json.dumps(override_map)}\n" | |
| "Transitioning to the next analysis phase.") | |
| bot_reply, updated_id = invoke_analysis_agent(agent_instruction, session_id) | |
| new_history = update_exchange_history(chat_log, "[Table Interaction]", bot_reply) | |
| new_progress = extract_milestones_from_text(bot_reply, progress_data) | |
| available_charts, chart_html = refresh_charts_display() | |
| return new_history, updated_id, new_progress, generate_progress_indicator(new_progress), refresh_review_component(), fetch_available_downloads(), gr.update(choices=available_charts), chart_html | |
| except Exception as err: | |
| record_system_failure(str(err), "OnTableSubmit") | |
| return chat_log, session_id, progress_data, generate_progress_indicator(progress_data), refresh_review_component(), fetch_available_downloads(), gr.update(choices=get_available_charts()), embed_thematic_chart(None) | |
| def handle_clear_session(session_id): | |
| """Purges all caches and restarts the unique session.""" | |
| for artifact in STORAGE_FILES_TO_PURGE: | |
| if os.path.exists(artifact): | |
| try: os.remove(artifact) | |
| except: pass | |
| fresh_id = str(uuid.uuid4()) | |
| default_progress = {k: False for k in ["1", "2", "3", "4", "5", "5.5", "6"]} | |
| return [], fresh_id, default_progress, generate_progress_indicator(default_progress) | |
| # --- UI Construction --- | |
| START_PROGRESS = {k: False for k in ["1","2","3","4","5","5.5","6"]} | |
| with gr.Blocks(title="Nexus Workspace") as thematic_app: | |
| # State Holders | |
| current_session_id = gr.State(str(uuid.uuid4())) | |
| session_history = gr.State([]) | |
| session_progress = gr.State(START_PROGRESS) | |
| # Header Bar | |
| with gr.Column(elem_classes="header-bar"): | |
| gr.Markdown("# π¬ Nexus Research Workspace\nAgentic Analysis & Taxonomy Generation Engine") | |
| # Progress Indicator (Full width now) | |
| stage_bar_component = gr.HTML(value=generate_progress_indicator(START_PROGRESS)) | |
| # Two column layout: Left Sidebar (Controls) | Right Main Panel (Chat & Tables) | |
| with gr.Row(): | |
| # LEFT SIDEBAR | |
| with gr.Column(scale=1, min_width=320, elem_classes="dashboard-panel"): | |
| gr.HTML('<div class="section-title">1. Data Source Config</div>') | |
| scopus_uploader = gr.File(label="Upload Dataset (.csv)", file_types=[".csv"], height=130) | |
| gr.Markdown("*Uploading file immediately triggers Data Profiling (Phase 1).*") | |
| gr.HTML('<div style="margin-top:24px;" class="section-title">Session Management</div>') | |
| wipe_session_btn = gr.Button("ποΈ Restart Analysis Session", variant="secondary") | |
| gr.HTML('<div style="margin-top:24px;" class="section-title">System Artifacts</div>') | |
| download_handler = gr.File(value=fetch_available_downloads(), label="Generated Reports & Export", file_count="multiple", interactive=False, height=180) | |
| # RIGHT MAIN PANEL | |
| with gr.Column(scale=3): | |
| with gr.Tabs(): | |
| with gr.Tab("π¬ AI Workspace & Command Center", elem_classes="dashboard-panel"): | |
| chat_display = gr.Chatbot(label="Agent Dialogue", height=450, show_label=False, avatar_images=(None, "https://huggingface.co/front/assets/huggingface_logo-noborder.svg")) | |
| with gr.Row(): | |
| chat_input_box = gr.Textbox(placeholder="Prompt the agent (e.g., 'run abstract', 'continue')...", scale=5, container=False) | |
| chat_send_btn = gr.Button("Execute Task π", variant="primary", scale=1, elem_classes="action-btn-primary") | |
| with gr.Tab("π Data Verification & Results", elem_classes="dashboard-panel"): | |
| with gr.Group(): | |
| gr.Markdown("#### Review & Approve Topics with AI Council Reasoning") | |
| interactive_review_table = gr.Dataframe( | |
| value=refresh_review_component(), | |
| headers=COLUMNS_FOR_REVIEW, | |
| datatype=["number", "str", "str", "str", "number", "number", "bool", "str"], | |
| interactive=True, wrap=True, row_count=(10, "dynamic"), | |
| column_widths=["5%", "15%", "20%", "30%", "8%", "8%", "7%", "15%"], | |
| elem_classes="review-table" | |
| ) | |
| table_submit_btn = gr.Button("β Confirm Selections & Proceed", variant="primary", size="lg", elem_classes="action-btn-success") | |
| with gr.Tab("π Intelligence Visuals", elem_classes="dashboard-panel"): | |
| visual_selector = gr.Dropdown(choices=[v[0] for v in VISUALIZATION_GALLERY], label="Select Chart View") | |
| visual_frame = gr.HTML("<div style='color:#94a3b8;padding:60px;text-align:center;font-size:1.1rem;'>No visualizations generated yet.<br/>Complete Phase 2 to unlock interactive charts.</div>") | |
| # --- Communication Links (Logic Binding) --- | |
| scopus_uploader.change( | |
| fn=handle_file_upload, | |
| inputs=[scopus_uploader, session_history, current_session_id, session_progress], | |
| outputs=[chat_display, current_session_id, session_progress, stage_bar_component, interactive_review_table, download_handler, visual_selector, visual_frame] | |
| ) | |
| chat_send_btn.click( | |
| fn=handle_text_submission, | |
| inputs=[chat_input_box, session_history, current_session_id, session_progress], | |
| outputs=[chat_display, chat_input_box, current_session_id, session_progress, stage_bar_component, interactive_review_table, download_handler, visual_selector, visual_frame] | |
| ) | |
| chat_input_box.submit( | |
| fn=handle_text_submission, | |
| inputs=[chat_input_box, session_history, current_session_id, session_progress], | |
| outputs=[chat_display, chat_input_box, current_session_id, session_progress, stage_bar_component, interactive_review_table, download_handler, visual_selector, visual_frame] | |
| ) | |
| table_submit_btn.click( | |
| fn=handle_table_submission, | |
| inputs=[interactive_review_table, session_history, current_session_id, session_progress], | |
| outputs=[chat_display, current_session_id, session_progress, stage_bar_component, interactive_review_table, download_handler, visual_selector, visual_frame] | |
| ) | |
| visual_selector.change( | |
| fn=lambda chart_name: embed_thematic_chart(next((f for n, f in VISUALIZATION_GALLERY if n == chart_name), None)), | |
| inputs=visual_selector, | |
| outputs=visual_frame | |
| ) | |
| wipe_session_btn.click(fn=handle_clear_session, inputs=[current_session_id], outputs=[chat_display, current_session_id, session_progress, stage_bar_component]) | |
| # --- Execution --- | |
| if __name__ == "__main__": | |
| thematic_app.launch(ssr_mode=False, show_error=True, css=PREMIUM_SAAS_STYLE, theme=gr.themes.Default(primary_hue="blue")) |