Spaces:
Sleeping
Sleeping
| # app.py | |
| import os, re, json, traceback, pathlib | |
| from functools import lru_cache | |
| from typing import List, Dict, Any, Tuple | |
| import gradio as gr | |
| import torch | |
| import regex as re2 # robust control-char sanitizer | |
| from settings import SNAPSHOT_PATH, PERSIST_CONTENT | |
| from audit_log import log_event, hash_summary | |
| from privacy import redact_text | |
| # ---------- Writable caches (HF Spaces-safe) ---------- | |
| HOME = pathlib.Path.home() | |
| HF_HOME = str(HOME / ".cache" / "huggingface") | |
| HF_HUB_CACHE = str(HOME / ".cache" / "huggingface" / "hub") | |
| HF_TRANSFORMERS = str(HOME / ".cache" / "huggingface" / "transformers") | |
| ST_HOME = str(HOME / ".cache" / "sentence-transformers") | |
| GRADIO_TMP = str(HOME / "app" / "gradio") | |
| GRADIO_CACHE = GRADIO_TMP | |
| os.environ.setdefault("HF_HOME", HF_HOME) | |
| os.environ.setdefault("HF_HUB_CACHE", HF_HUB_CACHE) | |
| os.environ.setdefault("TRANSFORMERS_CACHE", HF_TRANSFORMERS) | |
| os.environ.setdefault("SENTENCE_TRANSFORMERS_HOME", ST_HOME) | |
| os.environ.setdefault("GRADIO_TEMP_DIR", GRADIO_TMP) | |
| os.environ.setdefault("GRADIO_CACHE_DIR", GRADIO_CACHE) | |
| os.environ.setdefault("HF_HUB_ENABLE_XET", "0") | |
| os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1") | |
| for p in [HF_HOME, HF_HUB_CACHE, HF_TRANSFORMERS, ST_HOME, GRADIO_TMP, GRADIO_CACHE]: | |
| try: | |
| os.makedirs(p, exist_ok=True) | |
| except Exception: | |
| pass | |
| # Optional Cohere | |
| try: | |
| import cohere | |
| _HAS_COHERE = True | |
| except Exception: | |
| _HAS_COHERE = False | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from huggingface_hub import login | |
| from safety import safety_filter, refusal_reply | |
| from retriever import init_retriever, retrieve_context | |
| from decision_math import compute_operational_numbers | |
| from prompt_templates import build_system_preamble | |
| from upload_ingest import extract_text_from_files | |
| from session_rag import SessionRAG | |
| # NEW: dynamic data analysis framework | |
| from data_registry import DataRegistry | |
| from schema_mapper import map_concepts, build_phase1_questions | |
| from auto_metrics import build_data_findings_markdown | |
| # ---------- Config ---------- | |
| MODEL_ID = os.getenv("MODEL_ID", "microsoft/Phi-3-mini-4k-instruct") # fallback | |
| HF_TOKEN = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN") | |
| COHERE_API_KEY = os.getenv("COHERE_API_KEY") | |
| USE_HOSTED_COHERE = bool(COHERE_API_KEY and _HAS_COHERE) | |
| # Larger output budget for Phase 2 | |
| MAX_NEW_TOKENS = int(os.getenv("MAX_NEW_TOKENS", "2048")) | |
| # ---------- Generic System Prompt ---------- | |
| SYSTEM_MASTER = """ | |
| SYSTEM ROLE | |
| You are an AI analytical system that provides data-driven insights for any scenario. | |
| Absolute rules: | |
| - Use ONLY information provided in this conversation (scenario text + uploaded files + user answers). | |
| - Never invent data. If something required is missing after clarifications, write the literal token: INSUFFICIENT_DATA. | |
| - Provide clear analysis with calculations, evidence, and reasoning. | |
| - Maintain privacy safeguards (aggregate data; suppress small cohorts <10). | |
| - Adapt your analysis approach to the specific scenario and data provided. | |
| Formatting rules for structured analysis: | |
| - Start with the header: "Structured Analysis" | |
| - Organize analysis into logical sections based on the scenario requirements | |
| - End with concrete recommendations and a brief "Provenance" mapping outputs to scenario text, uploaded files, and answers. | |
| """.strip() | |
| # ---------- Helpers ---------- | |
| def pick_dtype_and_map(): | |
| if torch.cuda.is_available(): | |
| return torch.float16, "auto" | |
| if torch.backends.mps.is_available(): | |
| return torch.float16, {"": "mps"} | |
| return torch.float32, "cpu" | |
| def is_identity_query(message, history): | |
| patterns = [ | |
| r"\bwho\s+are\s+you\b", r"\bwhat\s+are\s+you\b", r"\bwhat\s+is\s+your\s+name\b", | |
| r"\bwho\s+is\s+this\b", r"\bidentify\s+yourself\b", r"\btell\s+me\s+about\s+yourself\b", | |
| r"\bdescribe\s+yourself\b", r"\band\s+you\s*\?\b", r"\byour\s+name\b", | |
| r"\bwho\s+am\s+i\s+chatting\s+with\b", | |
| ] | |
| def match(t): return any(re.search(p, (t or "").strip().lower()) for p in patterns) | |
| if match(message): return True | |
| if history: | |
| last_user = history[-1][0] if isinstance(history[-1], (list, tuple)) else None | |
| if match(last_user): return True | |
| return False | |
| def _iter_user_assistant(history): | |
| for item in (history or []): | |
| if isinstance(item, (list, tuple)): | |
| u = item[0] if len(item) > 0 else "" | |
| a = item[1] if len(item) > 1 else "" | |
| yield u, a | |
| def _sanitize_text(s: str) -> str: | |
| if not isinstance(s, str): | |
| return s | |
| return re2.sub(r'[\p{C}--[\n\t]]+', '', s) | |
| def is_scenario_triggered(text: str, uploaded_files_paths) -> bool: | |
| """Detect if this should be treated as a scenario analysis request.""" | |
| t = (text or "").lower() | |
| # Scenario keywords | |
| scenario_keywords = [ | |
| "scenario", "analysis", "analyze", "assess", "evaluate", "recommendation", | |
| "strategy", "plan", "solution", "decision", "priority", "allocate", "resource" | |
| ] | |
| has_keyword = any(keyword in t for keyword in scenario_keywords) | |
| has_files = bool(uploaded_files_paths) | |
| # If files are uploaded, assume scenario mode | |
| # If certain analytical keywords are present, assume scenario mode | |
| return has_files or has_keyword | |
| # ---------- Cohere first ---------- | |
| def cohere_chat(message, history): | |
| if not USE_HOSTED_COHERE: | |
| return None | |
| try: | |
| client = cohere.Client(api_key=COHERE_API_KEY) | |
| parts = [] | |
| for u, a in _iter_user_assistant(history): | |
| if u: parts.append(f"User: {u}") | |
| if a: parts.append(f"Assistant: {a}") | |
| parts.append(f"User: {message}") | |
| prompt = "\n".join(parts) + "\nAssistant:" | |
| resp = client.chat( | |
| model="command-r7b-12-2024", | |
| message=prompt, | |
| temperature=0.3, | |
| max_tokens=MAX_NEW_TOKENS, | |
| ) | |
| if hasattr(resp, "text") and resp.text: return resp.text.strip() | |
| if hasattr(resp, "reply") and resp.reply: return resp.reply.strip() | |
| if hasattr(resp, "generations") and resp.generations: return resp.generations[0].text.strip() | |
| return None | |
| except Exception: | |
| return None | |
| # ---------- Local model (HF) ---------- | |
| def load_local_model(): | |
| if not HF_TOKEN: | |
| raise RuntimeError("HUGGINGFACE_HUB_TOKEN is not set.") | |
| login(token=HF_TOKEN, add_to_git_credential=False) | |
| dtype, device_map = pick_dtype_and_map() | |
| tok = AutoTokenizer.from_pretrained( | |
| MODEL_ID, token=HF_TOKEN, use_fast=True, model_max_length=8192, | |
| padding_side="left", trust_remote_code=True, | |
| cache_dir=os.environ.get("TRANSFORMERS_CACHE") | |
| ) | |
| try: | |
| mdl = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, token=HF_TOKEN, device_map=device_map, | |
| low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True, | |
| cache_dir=os.environ.get("TRANSFORMERS_CACHE") | |
| ) | |
| except Exception: | |
| mdl = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, token=HF_TOKEN, | |
| low_cpu_mem_usage=True, torch_dtype=dtype, trust_remote_code=True, | |
| cache_dir=os.environ.get("TRANSFORMERS_CACHE") | |
| ) | |
| mdl.to("cuda" if torch.cuda.is_available() else "cpu") | |
| if mdl.config.eos_token_id is None and tok.eos_token_id is not None: | |
| mdl.config.eos_token_id = tok.eos_token_id | |
| return mdl, tok | |
| def build_inputs(tokenizer, message, history): | |
| msgs = [{"role": "system", "content": SYSTEM_MASTER}] | |
| for u, a in _iter_user_assistant(history): | |
| if u: msgs.append({"role": "user", "content": u}) | |
| if a: msgs.append({"role": "assistant", "content": a}) | |
| msgs.append({"role": "user", "content": message}) | |
| return tokenizer.apply_chat_template( | |
| msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt" | |
| ) | |
| def local_generate(model, tokenizer, input_ids, max_new_tokens=MAX_NEW_TOKENS): | |
| input_ids = input_ids.to(model.device) | |
| with torch.no_grad(): | |
| out = model.generate( | |
| input_ids=input_ids, max_new_tokens=max_new_tokens, | |
| do_sample=True, temperature=0.3, top_p=0.9, | |
| repetition_penalty=1.15, | |
| pad_token_id=tokenizer.eos_token_id, | |
| eos_token_id=tokenizer.eos_token_id, | |
| ) | |
| gen_only = out[0, input_ids.shape[-1]:] | |
| return tokenizer.decode(gen_only, skip_special_tokens=True).strip() | |
| # ---------- Snapshot & retrieval ---------- | |
| def _load_snapshot(path=SNAPSHOT_PATH): | |
| """Load operational snapshot if available.""" | |
| try: | |
| with open(path, "r", encoding="utf-8") as f: | |
| return json.load(f) | |
| except Exception: | |
| return {} # Return empty dict if no snapshot available | |
| init_retriever() | |
| _session_rag = SessionRAG() | |
| # NEW: session-scoped data registry | |
| _data_registry = DataRegistry() | |
| # ---------- Core chat logic (generic scenario handling) ---------- | |
| def clarityops_reply(user_msg, history, tz, uploaded_files_paths, awaiting_answers=False): | |
| try: | |
| log_event("user_message", None, {"sizes": {"chars": len(user_msg or "")}}) | |
| safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input") | |
| if blocked_in: | |
| ans = refusal_reply(reason_in) | |
| return history + [(user_msg, ans)], awaiting_answers | |
| if is_identity_query(safe_in, history): | |
| ans = "I am an AI analytical system designed to help you analyze scenarios and make data-driven decisions." | |
| return history + [(user_msg, ans)], awaiting_answers | |
| # 1) Ingest uploads into RAG AND DataRegistry | |
| artifacts = [] | |
| if uploaded_files_paths: | |
| ing = extract_text_from_files(uploaded_files_paths) | |
| chunks = ing.get("chunks", []) if isinstance(ing, dict) else (ing or []) | |
| artifacts = ing.get("artifacts", []) if isinstance(ing, dict) else [] | |
| if chunks: | |
| _session_rag.add_docs(chunks) | |
| if artifacts: | |
| _session_rag.register_artifacts(artifacts) | |
| # register parsable tables into DataRegistry | |
| for p in uploaded_files_paths: | |
| _data_registry.add_path(p) | |
| log_event("uploads_added", None, { | |
| "chunks": len(chunks), "artifacts": len(artifacts), "tables": len(_data_registry.names()) | |
| }) | |
| # Quick helper for column inspection | |
| if re.search(r"\b(columns?|headers?)\b", (safe_in or "").lower()): | |
| cols = _session_rag.get_latest_csv_columns() | |
| if cols: | |
| return history + [(user_msg, "Here are the column names from your most recent CSV upload:\n\n- " + "\n- ".join(cols))], awaiting_answers | |
| # 2) Decide mode | |
| scenario_mode = is_scenario_triggered(safe_in, uploaded_files_paths) | |
| if not scenario_mode: | |
| # ---------- Normal conversational chat ---------- | |
| out = cohere_chat(safe_in, history) if USE_HOSTED_COHERE else None | |
| if not out: | |
| model, tokenizer = load_local_model() | |
| tiny = [{"role": "system", "content": "You are a helpful assistant."}] | |
| for u, a in _iter_user_assistant(history): | |
| if u: tiny.append({"role": "user", "content": u}) | |
| if a: tiny.append({"role": "assistant", "content": a}) | |
| tiny.append({"role": "user", "content": safe_in}) | |
| inputs = tokenizer.apply_chat_template(tiny, tokenize=True, add_generation_prompt=True, return_tensors="pt") | |
| out = local_generate(model, tokenizer, inputs, max_new_tokens=MAX_NEW_TOKENS) | |
| out = _sanitize_text(out or "") | |
| safe_out, blocked_out, reason_out = safety_filter(out, mode="output") | |
| if blocked_out: | |
| safe_out = refusal_reply(reason_out) | |
| log_event("assistant_reply", None, { | |
| **hash_summary("prompt", safe_in if not PERSIST_CONTENT else ""), | |
| **hash_summary("reply", safe_out if not PERSIST_CONTENT else ""), | |
| "mode": "normal_chat", | |
| }) | |
| return history + [(user_msg, safe_out)], awaiting_answers | |
| # ---------- Generic Scenario Analysis Mode ---------- | |
| # 3) Build dynamic concept mapping from scenario + data | |
| mapping = map_concepts(safe_in, _data_registry) | |
| if not awaiting_answers: | |
| # PHASE 1: ask for missing/ambiguous information | |
| phase1 = build_phase1_questions(scenario_text=safe_in, registry=_data_registry, mapping=mapping) | |
| phase1 = _sanitize_text(phase1) | |
| log_event("assistant_reply", None, { | |
| **hash_summary("prompt", safe_in if not PERSIST_CONTENT else ""), | |
| **hash_summary("reply", phase1 if not PERSIST_CONTENT else ""), | |
| "mode": "scenario_phase1", | |
| "awaiting_next_phase": True | |
| }) | |
| return history + [(user_msg, phase1)], True | |
| # PHASE 2: compute data analysis and generate structured response | |
| data_findings_md, missing_keys = build_data_findings_markdown(_data_registry, mapping) | |
| # Build context for analysis | |
| insufficient_data_note = "" | |
| if missing_keys: | |
| insufficient_data_note = ( | |
| "\n\nData limitations: Missing or uncomputable: " | |
| + ", ".join(sorted(set(missing_keys))) | |
| + ". Where these are essential to analysis, write INSUFFICIENT_DATA." | |
| ) | |
| # Get relevant context from uploaded documents | |
| # Extract key terms from scenario to improve retrieval | |
| scenario_terms = _extract_key_terms_from_scenario(safe_in) | |
| session_snips = "\n---\n".join(_session_rag.retrieve(scenario_terms, k=6)) | |
| # Load any available operational data | |
| snapshot = _load_snapshot() | |
| computed_numbers = compute_operational_numbers(snapshot) if snapshot else {} | |
| # Get general policy/context if available | |
| policy_context = retrieve_context(scenario_terms) | |
| # Build comprehensive data summary for analysis | |
| registry_summary = _data_registry.summarize_for_prompt() | |
| artifact_block = "Uploaded Data Files:\n" + registry_summary if registry_summary else "No data files uploaded." | |
| scenario_block = safe_in if len((safe_in or "")) > 0 else "" | |
| system_preamble = build_system_preamble( | |
| snapshot=snapshot, | |
| policy_context=policy_context, | |
| computed_numbers=computed_numbers, | |
| scenario_text=scenario_block + f"\n\n{artifact_block}\n\n{data_findings_md}" + insufficient_data_note, | |
| session_snips=session_snips | |
| ) | |
| directive = ( | |
| "\n\n[ANALYSIS INSTRUCTION]\n" | |
| "Provide a structured analysis appropriate to this scenario. Begin with 'Structured Analysis' and " | |
| "organize your response into logical sections based on what the scenario requires. Use the data " | |
| "provided as ground truth. When information is missing, write INSUFFICIENT_DATA. Show your reasoning " | |
| "and calculations. End with concrete recommendations and a brief Provenance section.\n" | |
| ) | |
| augmented_user = SYSTEM_MASTER + "\n\n" + system_preamble + "\n\nScenario and context:\n" + safe_in + directive | |
| out = cohere_chat(augmented_user, history) | |
| if not out: | |
| model, tokenizer = load_local_model() | |
| inputs = build_inputs(tokenizer, augmented_user, history) | |
| out = local_generate(model, tokenizer, inputs, max_new_tokens=MAX_NEW_TOKENS) | |
| if isinstance(out, str): | |
| for tag in ("Assistant:", "System:", "User:"): | |
| if out.startswith(tag): | |
| out = out[len(tag):].strip() | |
| out = _sanitize_text(out or "") | |
| safe_out, blocked_out, reason_out = safety_filter(out, mode="output") | |
| if blocked_out: | |
| safe_out = refusal_reply(reason_out) | |
| log_event("assistant_reply", None, { | |
| **hash_summary("prompt", augmented_user if not PERSIST_CONTENT else ""), | |
| **hash_summary("reply", safe_out if not PERSIST_CONTENT else ""), | |
| "mode": "scenario_phase2", | |
| "awaiting_next_phase": False | |
| }) | |
| return history + [(user_msg, safe_out)], False | |
| except Exception as e: | |
| err = f"Error: {e}" | |
| try: | |
| traceback.print_exc() | |
| except Exception: | |
| pass | |
| return history + [(user_msg, err)], awaiting_answers | |
| def _extract_key_terms_from_scenario(scenario_text: str) -> str: | |
| """Extract key terms from scenario text for better context retrieval.""" | |
| if not scenario_text: | |
| return "" | |
| # Simple extraction of important words (remove common stop words) | |
| stop_words = { | |
| 'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', | |
| 'is', 'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did', | |
| 'a', 'an', 'this', 'that', 'these', 'those', 'i', 'you', 'he', 'she', 'it', 'we', 'they' | |
| } | |
| words = re.findall(r'\b[a-zA-Z]{3,}\b', scenario_text.lower()) | |
| key_terms = [word for word in words if word not in stop_words] | |
| # Return first 10-15 key terms | |
| return ' '.join(key_terms[:15]) | |
| # ---------- Theme & CSS ---------- | |
| theme = gr.themes.Soft(primary_hue="teal", neutral_hue="slate", radius_size=gr.themes.sizes.radius_lg) | |
| custom_css = """ | |
| :root { --brand-bg: #0f172a; --brand-accent: #0d9488; --brand-text: #0f172a; --brand-text-light: #ffffff; } | |
| html, body, .gradio-container { height: 100vh; } | |
| .gradio-container { background: var(--brand-bg); display: flex; flex-direction: column; } | |
| /* HERO (landing) */ | |
| #hero-wrap { height: 70vh; display: grid; place-items: center; } | |
| #hero { text-align: center; } | |
| #hero h2 { color: #0f172a; font-weight: 800; font-size: 32px; margin-bottom: 22px; } | |
| #hero .search-row { width: min(860px, 92vw); margin: 0 auto; display: flex; gap: 8px; align-items: stretch; } | |
| #hero .search-row .hero-box { flex: 1 1 auto; } | |
| #hero .search-row .hero-box textarea { height: 52px !important; } | |
| #hero-send > button { height: 52px !important; padding: 0 18px !important; border-radius: 12px !important; } | |
| #hero .hint { color: #334155; margin-top: 10px; font-size: 13px; opacity: 0.9; } | |
| /* CHAT */ | |
| #chat-container { position: relative; } | |
| .chatbot header, .chatbot .label, .chatbot .label-wrap { display: none !important; } | |
| .message.user, .message.bot { background: var(--brand-accent) !important; color: var(--brand-text-light) !important; border-radius: 12px !important; padding: 8px 12px !important; } | |
| textarea, input, .gr-input { border-radius: 12px !important; } | |
| /* Chat input row equal heights */ | |
| #chat-input-row { align-items: stretch; } | |
| #chat-msg textarea { height: 52px !important; } | |
| #chat-send > button, #chat-clear > button { height: 52px !important; padding: 0 18px !important; border-radius: 12px !important; } | |
| """ | |
| # ---------- UI ---------- | |
| with gr.Blocks(theme=theme, css=custom_css, analytics_enabled=False) as demo: | |
| # --- HERO (initial screen) --- | |
| with gr.Column(elem_id="hero-wrap", visible=True) as hero_wrap: | |
| with gr.Column(elem_id="hero"): | |
| gr.HTML("<h2>What scenario can I help you analyze?</h2>") | |
| with gr.Row(elem_classes="search-row"): | |
| hero_msg = gr.Textbox( | |
| placeholder="Describe your scenario or ask any question (upload files for data analysis)…", | |
| show_label=False, | |
| lines=1, | |
| elem_classes="hero-box" | |
| ) | |
| hero_send = gr.Button("➤", scale=0, elem_id="hero-send") | |
| gr.Markdown('<div class="hint">Upload files and describe your scenario for comprehensive analysis. The system will ask clarifying questions, then provide structured insights.</div>') | |
| # --- MAIN APP (hidden until first message) --- | |
| with gr.Column(elem_id="chat-container", visible=False) as app_wrap: | |
| chat = gr.Chatbot(label="", show_label=False, height="80vh") | |
| with gr.Row(): | |
| uploads = gr.Files( | |
| label="Upload data files (PDF, DOCX, CSV, PNG, JPG)", | |
| file_types=["file"], file_count="multiple", height=68 | |
| ) | |
| with gr.Row(elem_id="chat-input-row"): | |
| msg = gr.Textbox( | |
| label="", | |
| show_label=False, | |
| placeholder="Continue the conversation. Provide additional details or answer clarifying questions.", | |
| scale=10, | |
| elem_id="chat-msg", | |
| lines=1, | |
| ) | |
| send = gr.Button("Send", scale=1, elem_id="chat-send") | |
| clear = gr.Button("Clear chat", scale=1, elem_id="chat-clear") | |
| # ---- State | |
| state_history = gr.State(value=[]) | |
| state_uploaded = gr.State(value=[]) | |
| state_awaiting = gr.State(value=False) | |
| # ---- Uploads | |
| def _store_uploads(files, current): | |
| paths = [] | |
| for f in (files or []): | |
| paths.append(getattr(f, "name", None) or f) | |
| return (current or []) + paths | |
| uploads.change(fn=_store_uploads, inputs=[uploads, state_uploaded], outputs=state_uploaded) | |
| # ---- Core send (used by both hero input and chat input) | |
| def _on_send(user_msg, history, up_paths, awaiting): | |
| try: | |
| if not user_msg or not user_msg.strip(): | |
| return history, "", history, awaiting | |
| new_history, new_awaiting = clarityops_reply( | |
| user_msg.strip(), history or [], None, up_paths or [], awaiting_answers=awaiting | |
| ) | |
| return new_history, "", new_history, new_awaiting | |
| except Exception as e: | |
| err = f"Error: {e}" | |
| try: traceback.print_exc() | |
| except Exception: pass | |
| new_hist = (history or []) + [(user_msg or "", err)] | |
| return new_hist, "", new_hist, awaiting | |
| # ---- Hero -> App transition + first send | |
| def _hero_start(user_msg, history, up_paths, awaiting): | |
| chat_o, msg_o, hist_o, await_o = _on_send(user_msg, history, up_paths, awaiting) | |
| return ( | |
| chat_o, msg_o, hist_o, await_o, | |
| gr.update(visible=False), | |
| gr.update(visible=True), | |
| "" | |
| ) | |
| hero_send.click( | |
| _hero_start, | |
| inputs=[hero_msg, state_history, state_uploaded, state_awaiting], | |
| outputs=[chat, msg, state_history, state_awaiting, hero_wrap, app_wrap, hero_msg], | |
| concurrency_limit=2, queue=True | |
| ) | |
| hero_msg.submit( | |
| _hero_start, | |
| inputs=[hero_msg, state_history, state_uploaded, state_awaiting], | |
| outputs=[chat, msg, state_history, state_awaiting, hero_wrap, app_wrap, hero_msg], | |
| concurrency_limit=2, queue=True | |
| ) | |
| # ---- Normal chat interactions after hero is gone | |
| send.click(_on_send, inputs=[msg, state_history, state_uploaded, state_awaiting], | |
| outputs=[chat, msg, state_history, state_awaiting], | |
| concurrency_limit=2, queue=True) | |
| msg.submit(_on_send, inputs=[msg, state_history, state_uploaded, state_awaiting], | |
| outputs=[chat, msg, state_history, state_awaiting], | |
| concurrency_limit=2, queue=True) | |
| def _on_clear(): | |
| # Clear the in-memory data registry for a fresh scenario | |
| _data_registry.clear() | |
| _session_rag.clear() # Also clear RAG session if available | |
| return ( | |
| [], "", [], False, | |
| gr.update(visible=True), | |
| gr.update(visible=False), | |
| "" | |
| ) | |
| clear.click(_on_clear, None, [chat, msg, state_history, state_awaiting, hero_wrap, app_wrap, hero_msg]) | |
| if __name__ == "__main__": | |
| port = int(os.environ.get("PORT", "7860")) | |
| demo.launch(server_name="0.0.0.0", server_port=port, show_api=False, max_threads=40) | |