st192011 commited on
Commit
4c3fbce
·
verified ·
1 Parent(s): 41e455c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -219
app.py CHANGED
@@ -1,166 +1,141 @@
1
  import gradio as gr
2
- import torch
 
3
  import json
4
  import os
5
- from transformers import AutoModelForCausalLM, AutoTokenizer
6
- from peft import PeftModel
7
- from huggingface_hub import InferenceClient
8
 
9
  # ==============================================================================
10
  # 1. CONFIGURATION
11
  # ==============================================================================
12
- # NOTE: You must set 'HF_TOKEN' in your Hugging Face Space Secrets!
13
- HF_TOKEN = os.getenv("HF_TOKEN")
14
 
15
- PROJECT_TITLE = "The Janus Interface: Semantic Decoupling Architecture"
16
-
17
- # Models
18
- # We use the official Microsoft repo for CPU compatibility
19
- BASE_MODEL_ID = "microsoft/Phi-3.5-mini-instruct"
20
- ADAPTER_ID = "st192011/janus-gold-lora"
21
- CLOUD_MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"
22
 
23
  # ==============================================================================
24
- # 2. ENGINE INITIALIZATION (CPU Optimized)
25
  # ==============================================================================
26
- print("⏳ Initializing Neural Backbone (CPU Mode)...")
 
 
 
 
 
 
 
 
 
 
27
 
 
28
  try:
29
- # Load Tokenizer
30
- tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID)
31
-
32
- # Load Base Model (bfloat16 saves RAM on Free Tier Spaces)
33
- base_model = AutoModelForCausalLM.from_pretrained(
34
- BASE_MODEL_ID,
35
- torch_dtype=torch.bfloat16,
36
- device_map="cpu",
37
- trust_remote_code=True
38
  )
39
-
40
- # Load Adapter
41
- print(f"⏳ Mounting Janus Adapter ({ADAPTER_ID})...")
42
- model = PeftModel.from_pretrained(base_model, ADAPTER_ID)
43
- model.eval() # Set to inference mode
44
  print("✅ System Online.")
45
-
46
  except Exception as e:
47
- print(f"❌ Error loading model: {e}")
48
  raise e
49
 
50
- # Cloud Client
51
- hf_client = InferenceClient(model=CLOUD_MODEL_ID, token=HF_TOKEN)
52
-
53
  # ==============================================================================
54
  # 3. KERNEL LOGIC
55
  # ==============================================================================
56
 
57
  def clean_output(text):
58
  """Sanitizes output to prevent chain-reaction failures."""
59
- # Remove special tokens
60
  clean = text.replace("<|end|>", "").replace("<|endoftext|>", "")
61
-
62
- # Remove conversational filler lines
63
  if "Output:" in clean: clean = clean.split("Output:")[-1]
64
 
65
  lines = clean.split('\n')
66
- # Keep lines that look like protocol code or normal text, remove "Here is..."
67
  valid_lines = [line for line in lines if "Note" not in line and "Here is" not in line]
68
  return " ".join(valid_lines).strip()
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  def kernel_scout(raw_input):
71
- """Mode A: Local Logic Extraction"""
72
  try:
73
- prompt = f"""<|system|>
74
- SYSTEM_ROLE: Janus Extractor.
75
  TASK: Refactor clinical notes into JanusScript Logic.
76
  SYNTAX: Object.action(params) -> Result.
77
  OBJECTS: Hx, Sx, Dx, Tx, Lab, Crs, Plan.
78
- CONSTRAINTS: No PII. Use relative time (Day1, Day2).
79
- <|end|>
80
- <|user|>
81
- RAW NOTE:
82
- {raw_input}<|end|>
83
- <|assistant|>"""
84
-
85
- inputs = tokenizer(prompt, return_tensors="pt")
86
 
87
- with torch.no_grad():
88
- outputs = model.generate(
89
- **inputs,
90
- max_new_tokens=256,
91
- temperature=0.1,
92
- do_sample=True,
93
- use_cache=False
94
- )
95
 
96
- text = tokenizer.batch_decode(outputs)[0]
97
- raw_output = text.split("<|assistant|>")[-1]
98
  return clean_output(raw_output)
99
- except Exception as e: return f"Error: {str(e)}"
100
-
101
- def kernel_cloud_expert(scenario_prompt):
102
- """Mode B: Cloud Bridge"""
103
- try:
104
- sys_prompt = """You are a Clinical Logic Engine.
105
- Task: Convert the scenario into 'JanusScript' code.
106
- Syntax: Object.action(parameter);
107
- Objects: Dx, Sx, Tx, Lab, Plan.
108
- Rules: No PII. Use PascalCase.
109
-
110
- Example:
111
- Input: Pt has pneumonia. Given antibiotics.
112
- Output: Dx(Pneumonia); Sx(Fever+Cough); Tx(Meds).action(Antibiotics); Plan(Discharge.Home);"""
113
-
114
- messages = [
115
- {"role": "system", "content": sys_prompt},
116
- {"role": "user", "content": f"Input: {scenario_prompt}"}
117
- ]
118
-
119
- response = hf_client.chat_completion(messages, max_tokens=512, temperature=0.1)
120
- return clean_output(response.choices[0].message.content)
121
- except Exception as e: return f"API Error: {str(e)}"
122
 
123
  def kernel_vault(protocol, secure_json):
124
- """Shared Terminal: Reconstruction"""
125
  try:
126
  try: db_str = json.dumps(json.loads(secure_json), ensure_ascii=False)
127
  except: return "❌ Error: Invalid JSON."
128
 
129
- prompt = f"""<|system|>
130
- SYSTEM_ROLE: Janus Constructor.
131
  TASK: Interpret JanusScript and PrivateDB to write Discharge Summary.
132
- TEMPLATE: Header -> Dates -> History -> Hospital Course -> Plan.
133
- <|end|>
134
- <|user|>
135
- PROTOCOL:
136
- {protocol}
137
-
138
- PRIVATE_DB:
139
- {db_str}<|end|>
140
- <|assistant|>"""
141
-
142
- inputs = tokenizer(prompt, return_tensors="pt")
143
 
144
- with torch.no_grad():
145
- outputs = model.generate(
146
- **inputs,
147
- max_new_tokens=1024,
148
- temperature=0.1,
149
- repetition_penalty=1.05,
150
- do_sample=True,
151
- use_cache=False
152
- )
153
 
154
- text = tokenizer.batch_decode(outputs)[0]
155
- doc = text.split("<|assistant|>")[-1].replace("<|end|>", "").replace("<|endoftext|>", "").strip()
156
- return doc
157
- except Exception as e: return f"Error: {str(e)}"
158
 
159
  # ==============================================================================
160
- # 4. DEMO SAMPLES
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  # ==============================================================================
162
 
163
- # Case 1: Appendicitis (Local)
164
  sample_note = """Pt ID 8899-A.
165
  History: 28yo male presented with RLQ pain & fever.
166
  Workup: CT scan confirmed acute appy.
@@ -178,133 +153,43 @@ sample_db = """{
178
  "prov_specialty": "General Surgery"
179
  }"""
180
 
181
- # Case 2: Sepsis (Cloud)
182
- sample_scenario = """Patient admitted for Urosepsis.
183
- Culture: E. coli resistant to Cipro.
184
- Treatment: Started on Zosyn IV. Transferred to ICU for one day for hypotension.
185
- Transition: Switched to oral Augmentin.
186
- Outcome: Stable, Afebrile. Discharge to finish 14 day course."""
187
-
188
- sample_db_cloud = """{
189
- "pt_name": "Sarah Connor",
190
- "pt_mrn": "SKY-NET",
191
- "pt_dob": "1965-05-10",
192
- "pt_sex": "F",
193
- "adm_date": "2025-12-01",
194
- "dis_date": "2025-12-05",
195
- "prov_attending": "Dr. Silberman",
196
- "prov_specialty": "Internal Medicine"
197
- }"""
198
-
199
  # ==============================================================================
200
- # 5. TECHNICAL REPORT
201
  # ==============================================================================
202
- report_md = """
203
- # 🏛️ The Janus Interface: Research & Technical Analysis
204
- **Project Status:** Research Prototype v2.0 (Gold Standard)
205
-
206
- ---
207
-
208
- ### 1. Research Motivation: The Privacy-Utility Paradox
209
- In regulated domains (Healthcare, Legal, Finance), Generative AI adoption is stalled by a fundamental conflict:
210
- * **Utility:** Large Cloud Models (GPT-4, Claude) offer superior reasoning but require sending data off-premise.
211
- * **Privacy:** Local Small Models (SLMs) ensure data sovereignty but often lack deep domain knowledge.
212
- * **The Solution:** **Semantic Decoupling**. We propose separating the **"Logic"** of a case from the **"Identity"** of the subject.
213
-
214
- ### 2. Architectural Design: The Twin-Protocol
215
- The system utilizes a **Multi-Task Adapter** trained to switch between two distinct cognitive modes based on the System Prompt.
216
-
217
- #### **Mode A: The Scout (Logic Extractor)**
218
- * **Function:** Reads raw, messy clinical notes.
219
- * **Constraint:** Trained via Loss Masking to extract *only* clinical entities (`Dx`, `Tx`, `Plan`) into a sanitized code string called **JanusScript**.
220
- * **Security:** It treats names, dates, and locations as noise to be discarded.
221
-
222
- #### **Mode B: The Cloud Bridge (Knowledge Injection)**
223
- * **Function:** Allows an external Cloud LLM to reason about a generic, anonymized scenario.
224
- * **Innovation:** The Cloud Model generates the **JanusScript** code. This code acts as a firewall—no PII ever leaves the local environment, but the *intelligence* of the cloud is captured in the script.
225
-
226
- #### **The Vault (Reconstructor)**
227
- * **Function:** A secure, offline engine that accepts the JanusScript and a Local SQL Database record.
228
- * **Output:** It merges the abstract logic with the concrete identity to generate the final, human-readable document.
229
-
230
- ---
231
-
232
- ### 3. Data Engineering: The "Gold Standard" Pipeline
233
- To achieve high fidelity without using private patient data, we developed a **Synthesized Data Pipeline**:
234
-
235
- 1. **Synthesis:** We generated **306 high-quality clinical scenarios** using Large Language Models (LLMs).
236
- 2. **Alignment:** Unlike previous iterations where headers were random, this dataset ensured strict mathematical alignment between the Identity Header (Age/Sex) and the Clinical Narrative.
237
- 3. **Result:** This eliminated the "hallucination" issues seen in earlier tests where the model would confuse patient gender or age due to conflicting training signals.
238
-
239
- ### 4. Training Methodology
240
- * **Base Model:** Microsoft Phi-3.5-mini-instruct (3.8B Parameters).
241
- * **Framework:** **Unsloth** (Optimized QLoRA).
242
- * **Technique:** **DoRA (Weight-Decomposed Low-Rank Adaptation)**.
243
- * *Why DoRA?* Standard LoRA struggles with strict syntax/coding tasks. DoRA updates both magnitude and direction vectors, allowing the model to learn the strict `JanusScript` grammar effectively.
244
- * **Loss Masking:** We used `train_on_responses_only`. The model was **never** trained on the input text, only on the output. This prevents the model from memorizing patient PII from the training set.
245
- * **Hyperparameters:** Rank 16, Alpha 16, Learning Rate 2e-4, **2 Epochs** (approx 78 steps used for final checkpoint).
246
-
247
- ### 5. Results & Conclusion
248
- * **Zero-Trust Validation:** The "Vault" successfully reconstructs documents using *only* the database for identity.
249
- * **Semantic Expansion:** The model demonstrates the ability to take a concise code (`Dx(Pneumonia)`) and expand it into fluent medical narrative ("Patient presented with symptoms consistent with Pneumonia...").
250
- """
251
-
252
- # ==============================================================================
253
- # 6. LAUNCHER
254
- # ==============================================================================
255
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="emerald"), title=PROJECT_TITLE) as demo:
256
  gr.Markdown(f"# 🏛️ {PROJECT_TITLE}")
257
 
258
  with gr.Tabs():
259
 
260
- # --- TAB 1 ---
261
- with gr.TabItem("🛡️ Mode A: Local Air-Gap"):
262
- with gr.Row():
263
- with gr.Column(scale=1):
264
- inp_a = gr.Textbox(label="Raw Sensitive Note", lines=12, value=sample_note)
265
- btn_a = gr.Button("Execute Scout (Local) ➔", variant="primary")
266
-
267
- with gr.Column(scale=1):
268
- out_proto_a = gr.Textbox(label="JanusScript Protocol", lines=6, interactive=True)
269
-
270
- gr.Markdown("---")
271
 
272
  with gr.Row():
273
  with gr.Column(scale=1):
274
- inp_db_a = gr.Textbox(label="Secure Identity Record", lines=12, value=sample_db)
275
- btn_final_a = gr.Button("Execute Vault (Local) ➔", variant="secondary")
276
 
277
  with gr.Column(scale=1):
278
- out_final_a = gr.Textbox(label="Output: Reconstructed Document", lines=25)
279
-
280
- btn_a.click(kernel_scout, inputs=inp_a, outputs=out_proto_a)
281
- btn_final_a.click(kernel_vault, inputs=[out_proto_a, inp_db_a], outputs=out_final_a)
282
-
283
- # --- TAB 2 ---
284
- with gr.TabItem("🧠 Mode B: Cloud Bridge"):
285
- with gr.Row():
286
- with gr.Column(scale=1):
287
- inp_b = gr.Textbox(label="Clinical Scenario (Anonymized)", lines=12, value=sample_scenario)
288
- btn_b = gr.Button("Execute Cloud API (Llama-3) ➔", variant="primary")
289
-
290
- with gr.Column(scale=1):
291
- out_proto_b = gr.Textbox(label="JanusScript Protocol", lines=6, interactive=True)
292
 
293
  gr.Markdown("---")
294
 
295
  with gr.Row():
296
  with gr.Column(scale=1):
297
- inp_db_b = gr.Textbox(label="Secure Identity Record", lines=12, value=sample_db_cloud)
298
- btn_final_b = gr.Button("Execute Vault (Local) ➔", variant="secondary")
299
 
300
  with gr.Column(scale=1):
301
- out_final_b = gr.Textbox(label="Output: Reconstructed Document", lines=25)
302
-
303
- btn_b.click(kernel_cloud_expert, inputs=inp_b, outputs=out_proto_b)
304
- btn_final_b.click(kernel_vault, inputs=[out_proto_b, inp_db_b], outputs=out_final_b)
 
305
 
306
- # --- TAB 3 ---
307
- with gr.TabItem("📄 Technical Report"):
308
  gr.Markdown(report_md)
309
 
310
  demo.launch()
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
3
+ from huggingface_hub import hf_hub_download
4
  import json
5
  import os
 
 
 
6
 
7
  # ==============================================================================
8
  # 1. CONFIGURATION
9
  # ==============================================================================
10
+ HF_TOKEN = os.getenv("HF_TOKEN")
11
+ PROJECT_TITLE = "The Janus Interface: Local Air-Gap Architecture"
12
 
13
+ # GGUF Model (The Unified Adapter you converted)
14
+ REPO_ID = "st192011/janus-unified-gguf"
15
+ FILENAME = "janus_unified_q4_k_m.gguf"
 
 
 
 
16
 
17
  # ==============================================================================
18
+ # 2. ENGINE INITIALIZATION
19
  # ==============================================================================
20
+ print("⏳ Downloading GGUF Model...")
21
+ try:
22
+ model_path = hf_hub_download(
23
+ repo_id=REPO_ID,
24
+ filename=FILENAME,
25
+ token=HF_TOKEN
26
+ )
27
+ print(f"✅ Model downloaded to: {model_path}")
28
+ except Exception as e:
29
+ print(f"❌ Error downloading model: {e}")
30
+ raise e
31
 
32
+ print("⏳ Loading Llama.cpp Engine...")
33
  try:
34
+ # n_ctx=4096 matches training context
35
+ llm = Llama(
36
+ model_path=model_path,
37
+ n_ctx=4096,
38
+ n_threads=2, # Optimized for Free Tier
39
+ verbose=False
 
 
 
40
  )
 
 
 
 
 
41
  print("✅ System Online.")
 
42
  except Exception as e:
43
+ print(f"❌ Error initializing Llama.cpp: {e}")
44
  raise e
45
 
 
 
 
46
  # ==============================================================================
47
  # 3. KERNEL LOGIC
48
  # ==============================================================================
49
 
50
  def clean_output(text):
51
  """Sanitizes output to prevent chain-reaction failures."""
 
52
  clean = text.replace("<|end|>", "").replace("<|endoftext|>", "")
 
 
53
  if "Output:" in clean: clean = clean.split("Output:")[-1]
54
 
55
  lines = clean.split('\n')
 
56
  valid_lines = [line for line in lines if "Note" not in line and "Here is" not in line]
57
  return " ".join(valid_lines).strip()
58
 
59
+ def generate_response(system_prompt, user_input, max_tokens=512):
60
+ """Helper for GGUF Generation"""
61
+ prompt = f"<|system|>\n{system_prompt}<|end|>\n<|user|>\n{user_input}<|end|>\n<|assistant|>\n"
62
+
63
+ output = llm(
64
+ prompt,
65
+ max_tokens=max_tokens,
66
+ temperature=0.1,
67
+ top_p=0.95,
68
+ stop=["<|end|>", "<|endoftext|>"],
69
+ echo=False
70
+ )
71
+ return output['choices'][0]['text']
72
+
73
  def kernel_scout(raw_input):
74
+ """Role: The Scout (Sanitizer)"""
75
  try:
76
+ sys_prompt = """SYSTEM_ROLE: Janus Extractor.
 
77
  TASK: Refactor clinical notes into JanusScript Logic.
78
  SYNTAX: Object.action(params) -> Result.
79
  OBJECTS: Hx, Sx, Dx, Tx, Lab, Crs, Plan.
80
+ CONSTRAINTS: No PII. Use relative time (Day1, Day2)."""
 
 
 
 
 
 
 
81
 
82
+ user_input = f"RAW NOTE:\n{raw_input}"
 
 
 
 
 
 
 
83
 
84
+ raw_output = generate_response(sys_prompt, user_input, max_tokens=256)
 
85
  return clean_output(raw_output)
86
+ except Exception as e: return f"Scout Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  def kernel_vault(protocol, secure_json):
89
+ """Role: The Vault (Reconstructor)"""
90
  try:
91
  try: db_str = json.dumps(json.loads(secure_json), ensure_ascii=False)
92
  except: return "❌ Error: Invalid JSON."
93
 
94
+ sys_prompt = """SYSTEM_ROLE: Janus Constructor.
 
95
  TASK: Interpret JanusScript and PrivateDB to write Discharge Summary.
96
+ TEMPLATE: Header -> Dates -> History -> Hospital Course -> Plan."""
 
 
 
 
 
 
 
 
 
 
97
 
98
+ user_input = f"PROTOCOL:\n{protocol}\n\nPRIVATE_DB:\n{db_str}"
 
 
 
 
 
 
 
 
99
 
100
+ raw_output = generate_response(sys_prompt, user_input, max_tokens=1024)
101
+ return clean_output(raw_output)
102
+ except Exception as e: return f"Vault Error: {str(e)}"
 
103
 
104
  # ==============================================================================
105
+ # 4. TECHNICAL REPORT (Corrected)
106
+ # ==============================================================================
107
+ report_md = """
108
+ # 🏛️ The Janus Interface: Technical Analysis
109
+ **Research Prototype v2.0**
110
+
111
+ ### 1. Architectural Goal: Zero-Trust Processing
112
+ The objective is to process sensitive clinical data using Generative AI without exposing **Private Identity Information (PII)** to the reasoning engine. We achieve this via **Semantic Decoupling**:
113
+ 1. **The Scout (Logic Layer):** Extracts clinical entities (`DX`, `PROC`) into an anonymous Protocol.
114
+ 2. **The Air Gap:** Only the Protocol string crosses the boundary.
115
+ 3. **The Vault (Context Layer):** Reconstructs the document by merging the Protocol with a local SQL Database.
116
+
117
+ ### 2. Training Methodology
118
+ * **Model:** Microsoft Phi-3.5-mini (3.8B Parameters).
119
+ * **Technique:** Multi-Task Instruction Tuning using Unsloth (QLoRA + DoRA).
120
+ * **Dataset:** 306 Fully Synthesized Clinical Scenarios.
121
+ * **Strategy:** The model was trained to switch behaviors based on the `SYSTEM_ROLE` prompt ("Extractor" vs "Constructor").
122
+
123
+ ### 3. Data Engineering: Synthetic Alignment Pipeline
124
+ To eliminate the hallucinations observed in earlier iterations (where the model confused patient gender or age), we utilized a **Fully Synthetic Data Pipeline**:
125
+ 1. **Generation:** We used Large Language Models (LLMs) to generate 306 unique clinical scenarios.
126
+ 2. **Alignment:** Unlike real-world datasets where headers might be missing or mismatched, this synthetic dataset ensured strict mathematical consistency between the Identity Header (Age/Sex) and the Clinical Narrative.
127
+ 3. **Result:** This alignment forced the model to respect the Database variables during inference, successfully resolving the "Gender Mismatch" and "Age Hallucination" issues.
128
+
129
+ ### 4. Limitations & Constraints
130
+ * **Vocabulary:** The model is trained on a specific subset of medical cases (Surgery, Internal Medicine). It may struggle with out-of-distribution specialties (e.g., Oncology, Psychiatry) not present in the 306 samples.
131
+ * **Compression Loss:** The "Scout" layer intentionally compresses text. Nuanced emotional context or very specific non-medical details in the raw note may be lost during the conversion to JanusScript.
132
+ """
133
+
134
+ # ==============================================================================
135
+ # 5. DEMO SAMPLES
136
  # ==============================================================================
137
 
138
+ # Case: Appendicitis (Matches Training Distribution)
139
  sample_note = """Pt ID 8899-A.
140
  History: 28yo male presented with RLQ pain & fever.
141
  Workup: CT scan confirmed acute appy.
 
153
  "prov_specialty": "General Surgery"
154
  }"""
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  # ==============================================================================
157
+ # 6. GRADIO LAUNCHER
158
  # ==============================================================================
159
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="zinc"), title=PROJECT_TITLE) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  gr.Markdown(f"# 🏛️ {PROJECT_TITLE}")
161
 
162
  with gr.Tabs():
163
 
164
+ # --- TAB 1: DEMO ---
165
+ with gr.TabItem("🛡️ Live Demo"):
166
+ gr.Markdown("### Zero-Trust Clinical Documentation")
 
 
 
 
 
 
 
 
167
 
168
  with gr.Row():
169
  with gr.Column(scale=1):
170
+ inp_scout = gr.Textbox(label="1. Raw Sensitive Note (Unsecured)", lines=12, value=sample_note)
171
+ btn_scout = gr.Button("Execute Scout (Extract Logic) ➔", variant="primary")
172
 
173
  with gr.Column(scale=1):
174
+ out_proto = gr.Textbox(label="2. Intermediate Protocol (The Air-Gap)", lines=6, interactive=True,
175
+ info="This code string contains NO identity data.")
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
  gr.Markdown("---")
178
 
179
  with gr.Row():
180
  with gr.Column(scale=1):
181
+ inp_db = gr.Textbox(label="3. Secure Identity Record (Local DB)", lines=12, value=sample_db)
182
+ btn_vault = gr.Button("Execute Vault (Reconstruct) ➔", variant="secondary")
183
 
184
  with gr.Column(scale=1):
185
+ out_final = gr.Textbox(label="4. Final Document", lines=25)
186
+
187
+ # Wiring
188
+ btn_scout.click(kernel_scout, inputs=inp_scout, outputs=out_proto)
189
+ btn_vault.click(kernel_vault, inputs=[out_proto, inp_db], outputs=out_final)
190
 
191
+ # --- TAB 2: REPORT ---
192
+ with gr.TabItem("📄 Technical Analysis"):
193
  gr.Markdown(report_md)
194
 
195
  demo.launch()