st192011 commited on
Commit
3ab903a
·
verified ·
1 Parent(s): 4c3fbce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +217 -104
app.py CHANGED
@@ -1,141 +1,164 @@
1
  import gradio as gr
2
- from llama_cpp import Llama
3
- from huggingface_hub import hf_hub_download
4
  import json
5
  import os
 
 
 
6
 
7
  # ==============================================================================
8
  # 1. CONFIGURATION
9
  # ==============================================================================
10
- HF_TOKEN = os.getenv("HF_TOKEN")
11
- PROJECT_TITLE = "The Janus Interface: Local Air-Gap Architecture"
12
 
13
- # GGUF Model (The Unified Adapter you converted)
14
- REPO_ID = "st192011/janus-unified-gguf"
15
- FILENAME = "janus_unified_q4_k_m.gguf"
 
 
 
 
16
 
17
  # ==============================================================================
18
- # 2. ENGINE INITIALIZATION
19
  # ==============================================================================
20
- print("⏳ Downloading GGUF Model...")
21
- try:
22
- model_path = hf_hub_download(
23
- repo_id=REPO_ID,
24
- filename=FILENAME,
25
- token=HF_TOKEN
26
- )
27
- print(f"✅ Model downloaded to: {model_path}")
28
- except Exception as e:
29
- print(f"❌ Error downloading model: {e}")
30
- raise e
31
 
32
- print("⏳ Loading Llama.cpp Engine...")
33
  try:
34
- # n_ctx=4096 matches training context
35
- llm = Llama(
36
- model_path=model_path,
37
- n_ctx=4096,
38
- n_threads=2, # Optimized for Free Tier
39
- verbose=False
 
 
 
40
  )
 
 
 
 
 
41
  print("✅ System Online.")
 
42
  except Exception as e:
43
- print(f"❌ Error initializing Llama.cpp: {e}")
44
  raise e
45
 
 
 
 
46
  # ==============================================================================
47
  # 3. KERNEL LOGIC
48
  # ==============================================================================
49
 
50
  def clean_output(text):
51
  """Sanitizes output to prevent chain-reaction failures."""
 
52
  clean = text.replace("<|end|>", "").replace("<|endoftext|>", "")
 
 
53
  if "Output:" in clean: clean = clean.split("Output:")[-1]
54
 
55
  lines = clean.split('\n')
 
56
  valid_lines = [line for line in lines if "Note" not in line and "Here is" not in line]
57
  return " ".join(valid_lines).strip()
58
 
59
- def generate_response(system_prompt, user_input, max_tokens=512):
60
- """Helper for GGUF Generation"""
61
- prompt = f"<|system|>\n{system_prompt}<|end|>\n<|user|>\n{user_input}<|end|>\n<|assistant|>\n"
62
-
63
- output = llm(
64
- prompt,
65
- max_tokens=max_tokens,
66
- temperature=0.1,
67
- top_p=0.95,
68
- stop=["<|end|>", "<|endoftext|>"],
69
- echo=False
70
- )
71
- return output['choices'][0]['text']
72
-
73
  def kernel_scout(raw_input):
74
- """Role: The Scout (Sanitizer)"""
75
  try:
76
- sys_prompt = """SYSTEM_ROLE: Janus Extractor.
 
77
  TASK: Refactor clinical notes into JanusScript Logic.
78
  SYNTAX: Object.action(params) -> Result.
79
  OBJECTS: Hx, Sx, Dx, Tx, Lab, Crs, Plan.
80
- CONSTRAINTS: No PII. Use relative time (Day1, Day2)."""
 
 
 
 
 
 
 
81
 
82
- user_input = f"RAW NOTE:\n{raw_input}"
 
 
 
 
 
 
83
 
84
- raw_output = generate_response(sys_prompt, user_input, max_tokens=256)
 
85
  return clean_output(raw_output)
86
- except Exception as e: return f"Scout Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  def kernel_vault(protocol, secure_json):
89
- """Role: The Vault (Reconstructor)"""
90
  try:
91
  try: db_str = json.dumps(json.loads(secure_json), ensure_ascii=False)
92
  except: return "❌ Error: Invalid JSON."
93
 
94
- sys_prompt = """SYSTEM_ROLE: Janus Constructor.
 
95
  TASK: Interpret JanusScript and PrivateDB to write Discharge Summary.
96
- TEMPLATE: Header -> Dates -> History -> Hospital Course -> Plan."""
 
 
 
 
 
 
 
 
 
 
97
 
98
- user_input = f"PROTOCOL:\n{protocol}\n\nPRIVATE_DB:\n{db_str}"
 
 
 
 
 
 
 
99
 
100
- raw_output = generate_response(sys_prompt, user_input, max_tokens=1024)
101
- return clean_output(raw_output)
102
- except Exception as e: return f"Vault Error: {str(e)}"
 
103
 
104
  # ==============================================================================
105
- # 4. TECHNICAL REPORT (Corrected)
106
- # ==============================================================================
107
- report_md = """
108
- # 🏛️ The Janus Interface: Technical Analysis
109
- **Research Prototype v2.0**
110
-
111
- ### 1. Architectural Goal: Zero-Trust Processing
112
- The objective is to process sensitive clinical data using Generative AI without exposing **Private Identity Information (PII)** to the reasoning engine. We achieve this via **Semantic Decoupling**:
113
- 1. **The Scout (Logic Layer):** Extracts clinical entities (`DX`, `PROC`) into an anonymous Protocol.
114
- 2. **The Air Gap:** Only the Protocol string crosses the boundary.
115
- 3. **The Vault (Context Layer):** Reconstructs the document by merging the Protocol with a local SQL Database.
116
-
117
- ### 2. Training Methodology
118
- * **Model:** Microsoft Phi-3.5-mini (3.8B Parameters).
119
- * **Technique:** Multi-Task Instruction Tuning using Unsloth (QLoRA + DoRA).
120
- * **Dataset:** 306 Fully Synthesized Clinical Scenarios.
121
- * **Strategy:** The model was trained to switch behaviors based on the `SYSTEM_ROLE` prompt ("Extractor" vs "Constructor").
122
-
123
- ### 3. Data Engineering: Synthetic Alignment Pipeline
124
- To eliminate the hallucinations observed in earlier iterations (where the model confused patient gender or age), we utilized a **Fully Synthetic Data Pipeline**:
125
- 1. **Generation:** We used Large Language Models (LLMs) to generate 306 unique clinical scenarios.
126
- 2. **Alignment:** Unlike real-world datasets where headers might be missing or mismatched, this synthetic dataset ensured strict mathematical consistency between the Identity Header (Age/Sex) and the Clinical Narrative.
127
- 3. **Result:** This alignment forced the model to respect the Database variables during inference, successfully resolving the "Gender Mismatch" and "Age Hallucination" issues.
128
-
129
- ### 4. Limitations & Constraints
130
- * **Vocabulary:** The model is trained on a specific subset of medical cases (Surgery, Internal Medicine). It may struggle with out-of-distribution specialties (e.g., Oncology, Psychiatry) not present in the 306 samples.
131
- * **Compression Loss:** The "Scout" layer intentionally compresses text. Nuanced emotional context or very specific non-medical details in the raw note may be lost during the conversion to JanusScript.
132
- """
133
-
134
- # ==============================================================================
135
- # 5. DEMO SAMPLES
136
  # ==============================================================================
137
 
138
- # Case: Appendicitis (Matches Training Distribution)
139
  sample_note = """Pt ID 8899-A.
140
  History: 28yo male presented with RLQ pain & fever.
141
  Workup: CT scan confirmed acute appy.
@@ -153,43 +176,133 @@ sample_db = """{
153
  "prov_specialty": "General Surgery"
154
  }"""
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  # ==============================================================================
157
- # 6. GRADIO LAUNCHER
158
  # ==============================================================================
159
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="zinc"), title=PROJECT_TITLE) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  gr.Markdown(f"# 🏛️ {PROJECT_TITLE}")
161
 
162
  with gr.Tabs():
163
 
164
- # --- TAB 1: DEMO ---
165
- with gr.TabItem("🛡️ Live Demo"):
166
- gr.Markdown("### Zero-Trust Clinical Documentation")
167
-
168
  with gr.Row():
169
  with gr.Column(scale=1):
170
- inp_scout = gr.Textbox(label="1. Raw Sensitive Note (Unsecured)", lines=12, value=sample_note)
171
- btn_scout = gr.Button("Execute Scout (Extract Logic) ➔", variant="primary")
172
 
173
  with gr.Column(scale=1):
174
- out_proto = gr.Textbox(label="2. Intermediate Protocol (The Air-Gap)", lines=6, interactive=True,
175
- info="This code string contains NO identity data.")
176
 
177
  gr.Markdown("---")
178
 
179
  with gr.Row():
180
  with gr.Column(scale=1):
181
- inp_db = gr.Textbox(label="3. Secure Identity Record (Local DB)", lines=12, value=sample_db)
182
- btn_vault = gr.Button("Execute Vault (Reconstruct) ➔", variant="secondary")
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  with gr.Column(scale=1):
185
- out_final = gr.Textbox(label="4. Final Document", lines=25)
186
 
187
- # Wiring
188
- btn_scout.click(kernel_scout, inputs=inp_scout, outputs=out_proto)
189
- btn_vault.click(kernel_vault, inputs=[out_proto, inp_db], outputs=out_final)
 
 
 
 
 
 
 
 
 
190
 
191
- # --- TAB 2: REPORT ---
192
- with gr.TabItem("📄 Technical Analysis"):
193
  gr.Markdown(report_md)
194
 
195
  demo.launch()
 
1
  import gradio as gr
2
+ import torch
 
3
  import json
4
  import os
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ from peft import PeftModel
7
+ from huggingface_hub import InferenceClient
8
 
9
  # ==============================================================================
10
  # 1. CONFIGURATION
11
  # ==============================================================================
12
+ # NOTE: You must set 'HF_TOKEN' in your Hugging Face Space Secrets!
13
+ HF_TOKEN = os.getenv("HF_TOKEN")
14
 
15
+ PROJECT_TITLE = "The Janus Interface: Semantic Decoupling Architecture"
16
+
17
+ # Models
18
+ # We use the official Microsoft repo for CPU compatibility
19
+ BASE_MODEL_ID = "microsoft/Phi-3.5-mini-instruct"
20
+ ADAPTER_ID = "st192011/janus-gold-lora"
21
+ CLOUD_MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"
22
 
23
  # ==============================================================================
24
+ # 2. ENGINE INITIALIZATION (CPU Optimized)
25
  # ==============================================================================
26
+ print("⏳ Initializing Neural Backbone (CPU Mode)...")
 
 
 
 
 
 
 
 
 
 
27
 
 
28
  try:
29
+ # Load Tokenizer
30
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID)
31
+
32
+ # Load Base Model (bfloat16 saves RAM on Free Tier Spaces)
33
+ base_model = AutoModelForCausalLM.from_pretrained(
34
+ BASE_MODEL_ID,
35
+ torch_dtype=torch.bfloat16,
36
+ device_map="cpu",
37
+ trust_remote_code=True
38
  )
39
+
40
+ # Load Adapter
41
+ print(f"⏳ Mounting Janus Adapter ({ADAPTER_ID})...")
42
+ model = PeftModel.from_pretrained(base_model, ADAPTER_ID)
43
+ model.eval() # Set to inference mode
44
  print("✅ System Online.")
45
+
46
  except Exception as e:
47
+ print(f"❌ Error loading model: {e}")
48
  raise e
49
 
50
+ # Cloud Client
51
+ hf_client = InferenceClient(model=CLOUD_MODEL_ID, token=HF_TOKEN)
52
+
53
  # ==============================================================================
54
  # 3. KERNEL LOGIC
55
  # ==============================================================================
56
 
57
  def clean_output(text):
58
  """Sanitizes output to prevent chain-reaction failures."""
59
+ # Remove special tokens
60
  clean = text.replace("<|end|>", "").replace("<|endoftext|>", "")
61
+
62
+ # Remove conversational filler lines
63
  if "Output:" in clean: clean = clean.split("Output:")[-1]
64
 
65
  lines = clean.split('\n')
66
+ # Keep lines that look like protocol code or normal text, remove "Here is..."
67
  valid_lines = [line for line in lines if "Note" not in line and "Here is" not in line]
68
  return " ".join(valid_lines).strip()
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  def kernel_scout(raw_input):
71
+ """Mode A: Local Logic Extraction"""
72
  try:
73
+ prompt = f"""<|system|>
74
+ SYSTEM_ROLE: Janus Extractor.
75
  TASK: Refactor clinical notes into JanusScript Logic.
76
  SYNTAX: Object.action(params) -> Result.
77
  OBJECTS: Hx, Sx, Dx, Tx, Lab, Crs, Plan.
78
+ CONSTRAINTS: No PII. Use relative time (Day1, Day2).
79
+ <|end|>
80
+ <|user|>
81
+ RAW NOTE:
82
+ {raw_input}<|end|>
83
+ <|assistant|>"""
84
+
85
+ inputs = tokenizer(prompt, return_tensors="pt")
86
 
87
+ with torch.no_grad():
88
+ outputs = model.generate(
89
+ **inputs,
90
+ max_new_tokens=256,
91
+ temperature=0.1,
92
+ do_sample=True
93
+ )
94
 
95
+ text = tokenizer.batch_decode(outputs)[0]
96
+ raw_output = text.split("<|assistant|>")[-1]
97
  return clean_output(raw_output)
98
+ except Exception as e: return f"Error: {str(e)}"
99
+
100
+ def kernel_cloud_expert(scenario_prompt):
101
+ """Mode B: Cloud Bridge"""
102
+ try:
103
+ sys_prompt = """You are a Clinical Logic Engine.
104
+ Task: Convert the scenario into 'JanusScript' code.
105
+ Syntax: Object.action(parameter);
106
+ Objects: Dx, Sx, Tx, Lab, Plan.
107
+ Rules: No PII. Use PascalCase.
108
+
109
+ Example:
110
+ Input: Pt has pneumonia. Given antibiotics.
111
+ Output: Dx(Pneumonia); Sx(Fever+Cough); Tx(Meds).action(Antibiotics); Plan(Discharge.Home);"""
112
+
113
+ messages = [
114
+ {"role": "system", "content": sys_prompt},
115
+ {"role": "user", "content": f"Input: {scenario_prompt}"}
116
+ ]
117
+
118
+ response = hf_client.chat_completion(messages, max_tokens=512, temperature=0.1)
119
+ return clean_output(response.choices[0].message.content)
120
+ except Exception as e: return f"API Error: {str(e)}"
121
 
122
  def kernel_vault(protocol, secure_json):
123
+ """Shared Terminal: Reconstruction"""
124
  try:
125
  try: db_str = json.dumps(json.loads(secure_json), ensure_ascii=False)
126
  except: return "❌ Error: Invalid JSON."
127
 
128
+ prompt = f"""<|system|>
129
+ SYSTEM_ROLE: Janus Constructor.
130
  TASK: Interpret JanusScript and PrivateDB to write Discharge Summary.
131
+ TEMPLATE: Header -> Dates -> History -> Hospital Course -> Plan.
132
+ <|end|>
133
+ <|user|>
134
+ PROTOCOL:
135
+ {protocol}
136
+
137
+ PRIVATE_DB:
138
+ {db_str}<|end|>
139
+ <|assistant|>"""
140
+
141
+ inputs = tokenizer(prompt, return_tensors="pt")
142
 
143
+ with torch.no_grad():
144
+ outputs = model.generate(
145
+ **inputs,
146
+ max_new_tokens=1024,
147
+ temperature=0.1,
148
+ repetition_penalty=1.05,
149
+ do_sample=True
150
+ )
151
 
152
+ text = tokenizer.batch_decode(outputs)[0]
153
+ doc = text.split("<|assistant|>")[-1].replace("<|end|>", "").replace("<|endoftext|>", "").strip()
154
+ return doc
155
+ except Exception as e: return f"Error: {str(e)}"
156
 
157
  # ==============================================================================
158
+ # 4. DEMO SAMPLES
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  # ==============================================================================
160
 
161
+ # Case 1: Appendicitis (Local)
162
  sample_note = """Pt ID 8899-A.
163
  History: 28yo male presented with RLQ pain & fever.
164
  Workup: CT scan confirmed acute appy.
 
176
  "prov_specialty": "General Surgery"
177
  }"""
178
 
179
+ # Case 2: Sepsis (Cloud)
180
+ sample_scenario = """Patient admitted for Urosepsis.
181
+ Culture: E. coli resistant to Cipro.
182
+ Treatment: Started on Zosyn IV. Transferred to ICU for one day for hypotension.
183
+ Transition: Switched to oral Augmentin.
184
+ Outcome: Stable, Afebrile. Discharge to finish 14 day course."""
185
+
186
+ sample_db_cloud = """{
187
+ "pt_name": "Sarah Connor",
188
+ "pt_mrn": "SKY-NET",
189
+ "pt_dob": "1965-05-10",
190
+ "pt_sex": "F",
191
+ "adm_date": "2025-12-01",
192
+ "dis_date": "2025-12-05",
193
+ "prov_attending": "Dr. Silberman",
194
+ "prov_specialty": "Internal Medicine"
195
+ }"""
196
+
197
  # ==============================================================================
198
+ # 5. TECHNICAL REPORT
199
  # ==============================================================================
200
+ report_md = """
201
+ # 🏛️ The Janus Interface: Research & Technical Analysis
202
+ **Project Status:** Research Prototype v2.0 (Gold Standard)
203
+
204
+ ---
205
+
206
+ ### 1. Research Motivation: The Privacy-Utility Paradox
207
+ In regulated domains (Healthcare, Legal, Finance), Generative AI adoption is stalled by a fundamental conflict:
208
+ * **Utility:** Large Cloud Models (GPT-4, Claude) offer superior reasoning but require sending data off-premise.
209
+ * **Privacy:** Local Small Models (SLMs) ensure data sovereignty but often lack deep domain knowledge.
210
+ * **The Solution:** **Semantic Decoupling**. We propose separating the **"Logic"** of a case from the **"Identity"** of the subject.
211
+
212
+ ### 2. Architectural Design: The Twin-Protocol
213
+ The system utilizes a **Multi-Task Adapter** trained to switch between two distinct cognitive modes based on the System Prompt.
214
+
215
+ #### **Mode A: The Scout (Logic Extractor)**
216
+ * **Function:** Reads raw, messy clinical notes.
217
+ * **Constraint:** Trained via Loss Masking to extract *only* clinical entities (`Dx`, `Tx`, `Plan`) into a sanitized code string called **JanusScript**.
218
+ * **Security:** It treats names, dates, and locations as noise to be discarded.
219
+
220
+ #### **Mode B: The Cloud Bridge (Knowledge Injection)**
221
+ * **Function:** Allows an external Cloud LLM to reason about a generic, anonymized scenario.
222
+ * **Innovation:** The Cloud Model generates the **JanusScript** code. This code acts as a firewall—no PII ever leaves the local environment, but the *intelligence* of the cloud is captured in the script.
223
+
224
+ #### **The Vault (Reconstructor)**
225
+ * **Function:** A secure, offline engine that accepts the JanusScript and a Local SQL Database record.
226
+ * **Output:** It merges the abstract logic with the concrete identity to generate the final, human-readable document.
227
+
228
+ ---
229
+
230
+ ### 3. Data Engineering: The "Gold Standard" Pipeline
231
+ To achieve high fidelity without using private patient data, we developed a **Synthesized Data Pipeline**:
232
+
233
+ 1. **Synthesis:** We generated **306 high-quality clinical scenarios** using Large Language Models (LLMs).
234
+ 2. **Alignment:** Unlike previous iterations where headers were random, this dataset ensured strict mathematical alignment between the Identity Header (Age/Sex) and the Clinical Narrative.
235
+ 3. **Result:** This eliminated the "hallucination" issues seen in earlier tests where the model would confuse patient gender or age due to conflicting training signals.
236
+
237
+ ### 4. Training Methodology
238
+ * **Base Model:** Microsoft Phi-3.5-mini-instruct (3.8B Parameters).
239
+ * **Framework:** **Unsloth** (Optimized QLoRA).
240
+ * **Technique:** **DoRA (Weight-Decomposed Low-Rank Adaptation)**.
241
+ * *Why DoRA?* Standard LoRA struggles with strict syntax/coding tasks. DoRA updates both magnitude and direction vectors, allowing the model to learn the strict `JanusScript` grammar effectively.
242
+ * **Loss Masking:** We used `train_on_responses_only`. The model was **never** trained on the input text, only on the output. This prevents the model from memorizing patient PII from the training set.
243
+ * **Hyperparameters:** Rank 16, Alpha 16, Learning Rate 2e-4, **2 Epochs** (approx 78 steps used for final checkpoint).
244
+
245
+ ### 5. Results & Conclusion
246
+ * **Zero-Trust Validation:** The "Vault" successfully reconstructs documents using *only* the database for identity.
247
+ * **Semantic Expansion:** The model demonstrates the ability to take a concise code (`Dx(Pneumonia)`) and expand it into fluent medical narrative ("Patient presented with symptoms consistent with Pneumonia...").
248
+ """
249
+
250
+ # ==============================================================================
251
+ # 6. LAUNCHER
252
+ # ==============================================================================
253
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="emerald"), title=PROJECT_TITLE) as demo:
254
  gr.Markdown(f"# 🏛️ {PROJECT_TITLE}")
255
 
256
  with gr.Tabs():
257
 
258
+ # --- TAB 1 ---
259
+ with gr.TabItem("🛡️ Mode A: Local Air-Gap"):
 
 
260
  with gr.Row():
261
  with gr.Column(scale=1):
262
+ inp_a = gr.Textbox(label="Raw Sensitive Note", lines=12, value=sample_note)
263
+ btn_a = gr.Button("Execute Scout (Local) ➔", variant="primary")
264
 
265
  with gr.Column(scale=1):
266
+ out_proto_a = gr.Textbox(label="JanusScript Protocol", lines=6, interactive=True)
 
267
 
268
  gr.Markdown("---")
269
 
270
  with gr.Row():
271
  with gr.Column(scale=1):
272
+ inp_db_a = gr.Textbox(label="Secure Identity Record", lines=12, value=sample_db)
273
+ btn_final_a = gr.Button("Execute Vault (Local) ➔", variant="secondary")
274
+
275
+ with gr.Column(scale=1):
276
+ out_final_a = gr.Textbox(label="Output: Reconstructed Document", lines=25)
277
+
278
+ btn_a.click(kernel_scout, inputs=inp_a, outputs=out_proto_a)
279
+ btn_final_a.click(kernel_vault, inputs=[out_proto_a, inp_db_a], outputs=out_final_a)
280
+
281
+ # --- TAB 2 ---
282
+ with gr.TabItem("🧠 Mode B: Cloud Bridge"):
283
+ with gr.Row():
284
+ with gr.Column(scale=1):
285
+ inp_b = gr.Textbox(label="Clinical Scenario (Anonymized)", lines=12, value=sample_scenario)
286
+ btn_b = gr.Button("Execute Cloud API (Llama-3) ➔", variant="primary")
287
 
288
  with gr.Column(scale=1):
289
+ out_proto_b = gr.Textbox(label="JanusScript Protocol", lines=6, interactive=True)
290
 
291
+ gr.Markdown("---")
292
+
293
+ with gr.Row():
294
+ with gr.Column(scale=1):
295
+ inp_db_b = gr.Textbox(label="Secure Identity Record", lines=12, value=sample_db_cloud)
296
+ btn_final_b = gr.Button("Execute Vault (Local) ➔", variant="secondary")
297
+
298
+ with gr.Column(scale=1):
299
+ out_final_b = gr.Textbox(label="Output: Reconstructed Document", lines=25)
300
+
301
+ btn_b.click(kernel_cloud_expert, inputs=inp_b, outputs=out_proto_b)
302
+ btn_final_b.click(kernel_vault, inputs=[out_proto_b, inp_db_b], outputs=out_final_b)
303
 
304
+ # --- TAB 3 ---
305
+ with gr.TabItem("📄 Technical Report"):
306
  gr.Markdown(report_md)
307
 
308
  demo.launch()