raselmeya94 commited on
Commit
4d92a77
·
1 Parent(s): a2161ee
Files changed (1) hide show
  1. app.py +651 -121
app.py CHANGED
@@ -1,21 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # # ============================================================
2
  # # 🧠 Medical Dialogue → SOAP Note Generator (Fine-tuned Phi-3)
3
  # # ============================================================
4
 
5
  # import gradio as gr
6
  # import torch
7
- # import transformers
8
- # from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
9
  # from peft import PeftModel, PeftConfig
10
  # import os
11
  # import gc
12
- # if hasattr(transformers, "DynamicCache"):
13
- # transformers.DynamicCache.seen_tokens = property(lambda self: None)
14
 
15
  # # ------------------------------------------------------------
16
  # # ⚙️ Environment setup for memory management
17
  # # ------------------------------------------------------------
18
- # os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True"
19
  # os.environ["TOKENIZERS_PARALLELISM"] = "false"
20
 
21
  # # ------------------------------------------------------------
@@ -25,26 +184,15 @@
25
  # fine_tuned_model_name = "raselmeya2194/med_dialogue2soap"
26
 
27
  # # ------------------------------------------------------------
28
- # # 🧮 Quantization (4-bit for memory efficiency if GPU is present)
29
- # # ------------------------------------------------------------
30
- # bnb_config = BitsAndBytesConfig(
31
- # load_in_4bit=True,
32
- # bnb_4bit_use_double_quant=True,
33
- # bnb_4bit_quant_type="nf4",
34
- # bnb_4bit_compute_dtype=torch.bfloat16
35
- # ) if torch.cuda.is_available() else None
36
-
37
  # # ------------------------------------------------------------
38
- # # 🧠 Load Base Model (with automatic device mapping)
39
- # # ------------------------------------------------------------
40
- # print("🔹 Loading base model...")
41
  # try:
42
  # fine_tuned_model = AutoModelForCausalLM.from_pretrained(
43
  # base_model_name,
44
- # quantization_config=bnb_config,
45
- # device_map="auto" if torch.cuda.is_available() else None,
46
  # trust_remote_code=True,
47
- # torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
48
  # )
49
  # print("✅ Base model loaded successfully.")
50
  # except Exception as e:
@@ -68,15 +216,11 @@
68
  # # 🧩 Load LoRA Adapter (PEFT fine-tuned weights)
69
  # # ------------------------------------------------------------
70
  # print("🔹 Loading LoRA adapter...")
71
- # offload_dir = "./offload_folder"
72
- # os.makedirs(offload_dir, exist_ok=True)
73
-
74
  # try:
75
  # peft_config = PeftConfig.from_pretrained(fine_tuned_model_name)
76
  # fine_tuned_model = PeftModel.from_pretrained(
77
  # fine_tuned_model,
78
  # fine_tuned_model_name,
79
- # offload_folder=offload_dir,
80
  # config=peft_config
81
  # )
82
  # print("✅ LoRA adapter loaded successfully.")
@@ -88,26 +232,21 @@
88
  # # 🚀 Prepare model for inference
89
  # # ------------------------------------------------------------
90
  # fine_tuned_model.eval()
91
- # device = "cuda" if torch.cuda.is_available() else "cpu"
92
  # fine_tuned_model.to(device)
93
 
94
- # if torch.cuda.device_count() > 1:
95
- # print(f"🚀 Using {torch.cuda.device_count()} GPUs via DataParallel!")
96
- # fine_tuned_model = torch.nn.DataParallel(fine_tuned_model)
97
-
98
- # if torch.cuda.is_available():
99
- # torch.cuda.empty_cache()
100
  # gc.collect()
 
101
 
102
  # # ------------------------------------------------------------
103
  # # 🧾 SOAP Note Generation Function
104
  # # ------------------------------------------------------------
105
  # def generate_soap(input_text: str, temperature: float = 0.7, max_new_tokens: int = 512):
106
  # """
107
- # Generates a SOAP note from a doctor-patient dialogue using the fine-tuned model.
108
  # """
109
  # try:
110
- # # 🧩 Format input
111
  # prompt = (
112
  # f"<|user|>\n"
113
  # f"Generate a structured SOAP note based on the following doctor-patient dialogue:\n\n"
@@ -115,40 +254,36 @@
115
  # f"<|end|>\n<|assistant|>SOAP Notes:\n"
116
  # )
117
 
118
- # # 🔠 Tokenize input
119
  # inputs = tokenizer(
120
  # prompt,
121
  # return_tensors="pt",
122
  # truncation=True,
123
- # max_length=4096,
124
  # padding=True
125
  # ).to(device)
126
 
127
- # autocast_device = "cuda" if torch.cuda.is_available() else "cpu"
128
-
129
- # # ⚡ Inference
130
  # with torch.no_grad():
131
- # with torch.amp.autocast(autocast_device):
132
- # model = fine_tuned_model.module if isinstance(fine_tuned_model, torch.nn.DataParallel) else fine_tuned_model
133
-
134
- # outputs = model.generate(
135
- # input_ids=inputs["input_ids"],
136
- # attention_mask=inputs.get("attention_mask"),
137
- # max_new_tokens=max_new_tokens,
138
- # temperature=temperature,
139
- # top_p=0.9,
140
- # top_k=50,
141
- # pad_token_id=tokenizer.pad_token_id,
142
- # eos_token_id=tokenizer.eos_token_id,
143
- # do_sample=True,
144
- # no_repeat_ngram_size=3
145
- # )
146
-
147
- # # 🔎 Extract and decode generated tokens
148
  # generated_ids = outputs[0][inputs["input_ids"].shape[-1]:]
149
  # generated_text = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
150
 
151
- # # 🧼 Clean output
152
  # if "SOAP Notes:" in generated_text:
153
  # generated_text = generated_text.split("SOAP Notes:")[-1].strip()
154
 
@@ -157,10 +292,381 @@
157
  # except Exception as e:
158
  # return f"❌ Error generating SOAP note: {str(e)}"
159
 
 
 
160
  # # ------------------------------------------------------------
161
  # # 🎨 Gradio Interface
162
  # # ------------------------------------------------------------
163
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  # ============================================================
165
  # 🧠 Medical Dialogue → SOAP Note Generator (Fine-tuned Phi-3)
166
  # ============================================================
@@ -293,11 +799,9 @@ def generate_soap(input_text: str, temperature: float = 0.7, max_new_tokens: int
293
  return f"❌ Error generating SOAP note: {str(e)}"
294
 
295
 
296
-
297
  # ------------------------------------------------------------
298
- # 🎨 Gradio Interface
299
  # ------------------------------------------------------------
300
-
301
  custom_css = """
302
  /* Import Google Fonts */
303
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
@@ -458,7 +962,6 @@ button {
458
  font-size: 1.1rem !important;
459
  font-weight: 600 !important;
460
  margin-top: 1.5rem !important;
461
- width: 100% !important;
462
  transition: all 0.3s ease !important;
463
  box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
464
  letter-spacing: 0.5px;
@@ -573,57 +1076,80 @@ button:active {
573
  }
574
  """
575
 
576
- # Gradio interface with Generate button
577
- iface = gr.Interface(
578
- fn=generate_soap,
579
- title="🩺 SOAP Note Generator",
580
- description=(
581
- "Transform doctor-patient dialogues into professional, structured SOAP notes instantly. "
582
- "Powered by advanced AI to ensure accuracy and medical formatting standards."
 
583
  ),
584
- inputs=[
585
- gr.Textbox(
586
- label="📝 Doctor-Patient Dialogue",
587
- placeholder="Paste the complete conversation between doctor and patient here...\n\nExample:\nDoctor: Hello, what brings you in today?\nPatient: I've been having chest pain for the past week...",
588
- lines=10,
589
- max_lines=20,
590
- show_label=True,
591
- interactive=True,
592
- elem_classes="input-group"
593
- ),
594
- gr.Slider(
595
- minimum=0,
596
- maximum=1,
597
- step=0.05,
598
- value=0.7,
599
- label="🎨 Temperature (Creativity Level)",
600
- info="Lower values = More focused and consistent | Higher values = More creative and varied output"
601
- ),
602
- gr.Slider(
603
- minimum=128,
604
- maximum=4096,
605
- step=128,
606
- value=512,
607
- label="📏 Max Length (Tokens)",
608
- info="Controls the maximum length of the generated SOAP note (1 token ≈ 0.75 words)"
609
- ),
610
- ],
611
- outputs=[
612
- gr.Textbox(
613
- label="📋 Generated SOAP Note",
614
- placeholder="Your professionally formatted SOAP note will appear here...\n\n✓ Subjective findings\n✓ Objective observations\n✓ Assessment\n✓ Plan of care",
615
- lines=18,
616
- max_lines=25,
617
- interactive=False,
618
- show_label=True,
619
- show_copy_button=True
620
- )
621
- ],
622
- allow_flagging="never",
623
- live=False,
624
- cache_examples=False,
625
- examples=[
626
- ["""Doctor: Hello, can you please tell me about your past medical history?
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  Patient: Hi, I don't have any past medical history.
628
  Doctor: Okay. What brings you in today?
629
  Patient: I've been experiencing painless blurry vision in my right eye for a week now. I've also had intermittent fevers, headache, body aches, and a nonpruritic maculopapular rash on my lower legs for the past 6 months.
@@ -638,25 +1164,29 @@ Patient: Okay. What does that mean?
638
  Doctor: These results could indicate an underlying inflammatory or infectious process. We also performed a lumbar puncture, which showed clear and colorless fluid, 2 red blood cells per microliter, and 56 white blood cells per microliter.
639
  Patient: So, what's the next step?
640
  Doctor: We need to investigate further to determine the cause of your symptoms. We'll run additional tests and consult with a specialist to get a clearer understanding of your condition. In the meantime, we'll monitor your symptoms and provide supportive care. We'll keep you informed about any new findings and discuss the best course of treatment.
641
- Patient: Alright, thank you, Doctor.""",
642
- 0.7, 512]
643
- ],
644
- theme=gr.themes.Soft(
645
- primary_hue="indigo",
646
- secondary_hue="purple",
647
- neutral_hue="slate",
648
- ),
649
- css=custom_css
650
- )
 
 
 
651
 
652
  # Print message to confirm interface launch
653
  print("🌐 Launching Enhanced Gradio Interface...")
654
  print("✨ New features: Modern gradient design, smooth animations, better UX")
 
655
 
656
  # Launch the Gradio interface
657
  if __name__ == "__main__":
658
  try:
659
- iface.launch(
660
  server_name="0.0.0.0",
661
  server_port=7860,
662
  debug=True
 
1
+ # # # ============================================================
2
+ # # # 🧠 Medical Dialogue → SOAP Note Generator (Fine-tuned Phi-3)
3
+ # # # ============================================================
4
+
5
+ # # import gradio as gr
6
+ # # import torch
7
+ # # import transformers
8
+ # # from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
9
+ # # from peft import PeftModel, PeftConfig
10
+ # # import os
11
+ # # import gc
12
+ # # if hasattr(transformers, "DynamicCache"):
13
+ # # transformers.DynamicCache.seen_tokens = property(lambda self: None)
14
+
15
+ # # # ------------------------------------------------------------
16
+ # # # ⚙️ Environment setup for memory management
17
+ # # # ------------------------------------------------------------
18
+ # # os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True"
19
+ # # os.environ["TOKENIZERS_PARALLELISM"] = "false"
20
+
21
+ # # # ------------------------------------------------------------
22
+ # # # 🏗️ Model identifiers
23
+ # # # ------------------------------------------------------------
24
+ # # base_model_name = "microsoft/phi-3-mini-4k-instruct"
25
+ # # fine_tuned_model_name = "raselmeya2194/med_dialogue2soap"
26
+
27
+ # # # ------------------------------------------------------------
28
+ # # # 🧮 Quantization (4-bit for memory efficiency if GPU is present)
29
+ # # # ------------------------------------------------------------
30
+ # # bnb_config = BitsAndBytesConfig(
31
+ # # load_in_4bit=True,
32
+ # # bnb_4bit_use_double_quant=True,
33
+ # # bnb_4bit_quant_type="nf4",
34
+ # # bnb_4bit_compute_dtype=torch.bfloat16
35
+ # # ) if torch.cuda.is_available() else None
36
+
37
+ # # # ------------------------------------------------------------
38
+ # # # 🧠 Load Base Model (with automatic device mapping)
39
+ # # # ------------------------------------------------------------
40
+ # # print("🔹 Loading base model...")
41
+ # # try:
42
+ # # fine_tuned_model = AutoModelForCausalLM.from_pretrained(
43
+ # # base_model_name,
44
+ # # quantization_config=bnb_config,
45
+ # # device_map="auto" if torch.cuda.is_available() else None,
46
+ # # trust_remote_code=True,
47
+ # # torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
48
+ # # )
49
+ # # print("✅ Base model loaded successfully.")
50
+ # # except Exception as e:
51
+ # # print(f"❌ Error loading base model: {e}")
52
+ # # raise
53
+
54
+ # # # ------------------------------------------------------------
55
+ # # # 🔤 Load Tokenizer
56
+ # # # ------------------------------------------------------------
57
+ # # print("🔹 Loading tokenizer...")
58
+ # # try:
59
+ # # tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
60
+ # # if tokenizer.pad_token is None:
61
+ # # tokenizer.pad_token = tokenizer.eos_token
62
+ # # print("✅ Tokenizer loaded successfully.")
63
+ # # except Exception as e:
64
+ # # print(f"❌ Error loading tokenizer: {e}")
65
+ # # raise
66
+
67
+ # # # ------------------------------------------------------------
68
+ # # # 🧩 Load LoRA Adapter (PEFT fine-tuned weights)
69
+ # # # ------------------------------------------------------------
70
+ # # print("🔹 Loading LoRA adapter...")
71
+ # # offload_dir = "./offload_folder"
72
+ # # os.makedirs(offload_dir, exist_ok=True)
73
+
74
+ # # try:
75
+ # # peft_config = PeftConfig.from_pretrained(fine_tuned_model_name)
76
+ # # fine_tuned_model = PeftModel.from_pretrained(
77
+ # # fine_tuned_model,
78
+ # # fine_tuned_model_name,
79
+ # # offload_folder=offload_dir,
80
+ # # config=peft_config
81
+ # # )
82
+ # # print("✅ LoRA adapter loaded successfully.")
83
+ # # except Exception as e:
84
+ # # print(f"❌ Error loading LoRA adapter: {e}")
85
+ # # raise
86
+
87
+ # # # ------------------------------------------------------------
88
+ # # # 🚀 Prepare model for inference
89
+ # # # ------------------------------------------------------------
90
+ # # fine_tuned_model.eval()
91
+ # # device = "cuda" if torch.cuda.is_available() else "cpu"
92
+ # # fine_tuned_model.to(device)
93
+
94
+ # # if torch.cuda.device_count() > 1:
95
+ # # print(f"🚀 Using {torch.cuda.device_count()} GPUs via DataParallel!")
96
+ # # fine_tuned_model = torch.nn.DataParallel(fine_tuned_model)
97
+
98
+ # # if torch.cuda.is_available():
99
+ # # torch.cuda.empty_cache()
100
+ # # gc.collect()
101
+
102
+ # # # ------------------------------------------------------------
103
+ # # # 🧾 SOAP Note Generation Function
104
+ # # # ------------------------------------------------------------
105
+ # # def generate_soap(input_text: str, temperature: float = 0.7, max_new_tokens: int = 512):
106
+ # # """
107
+ # # Generates a SOAP note from a doctor-patient dialogue using the fine-tuned model.
108
+ # # """
109
+ # # try:
110
+ # # # 🧩 Format input
111
+ # # prompt = (
112
+ # # f"<|user|>\n"
113
+ # # f"Generate a structured SOAP note based on the following doctor-patient dialogue:\n\n"
114
+ # # f"{input_text.strip()}\n"
115
+ # # f"<|end|>\n<|assistant|>SOAP Notes:\n"
116
+ # # )
117
+
118
+ # # # 🔠 Tokenize input
119
+ # # inputs = tokenizer(
120
+ # # prompt,
121
+ # # return_tensors="pt",
122
+ # # truncation=True,
123
+ # # max_length=4096,
124
+ # # padding=True
125
+ # # ).to(device)
126
+
127
+ # # autocast_device = "cuda" if torch.cuda.is_available() else "cpu"
128
+
129
+ # # # ⚡ Inference
130
+ # # with torch.no_grad():
131
+ # # with torch.amp.autocast(autocast_device):
132
+ # # model = fine_tuned_model.module if isinstance(fine_tuned_model, torch.nn.DataParallel) else fine_tuned_model
133
+
134
+ # # outputs = model.generate(
135
+ # # input_ids=inputs["input_ids"],
136
+ # # attention_mask=inputs.get("attention_mask"),
137
+ # # max_new_tokens=max_new_tokens,
138
+ # # temperature=temperature,
139
+ # # top_p=0.9,
140
+ # # top_k=50,
141
+ # # pad_token_id=tokenizer.pad_token_id,
142
+ # # eos_token_id=tokenizer.eos_token_id,
143
+ # # do_sample=True,
144
+ # # no_repeat_ngram_size=3
145
+ # # )
146
+
147
+ # # # 🔎 Extract and decode generated tokens
148
+ # # generated_ids = outputs[0][inputs["input_ids"].shape[-1]:]
149
+ # # generated_text = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
150
+
151
+ # # # 🧼 Clean output
152
+ # # if "SOAP Notes:" in generated_text:
153
+ # # generated_text = generated_text.split("SOAP Notes:")[-1].strip()
154
+
155
+ # # return generated_text
156
+
157
+ # # except Exception as e:
158
+ # # return f"❌ Error generating SOAP note: {str(e)}"
159
+
160
+ # # # ------------------------------------------------------------
161
+ # # # 🎨 Gradio Interface
162
+ # # # ------------------------------------------------------------
163
+
164
  # # ============================================================
165
  # # 🧠 Medical Dialogue → SOAP Note Generator (Fine-tuned Phi-3)
166
  # # ============================================================
167
 
168
  # import gradio as gr
169
  # import torch
170
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
 
171
  # from peft import PeftModel, PeftConfig
172
  # import os
173
  # import gc
 
 
174
 
175
  # # ------------------------------------------------------------
176
  # # ⚙️ Environment setup for memory management
177
  # # ------------------------------------------------------------
 
178
  # os.environ["TOKENIZERS_PARALLELISM"] = "false"
179
 
180
  # # ------------------------------------------------------------
 
184
  # fine_tuned_model_name = "raselmeya2194/med_dialogue2soap"
185
 
186
  # # ------------------------------------------------------------
187
+ # # 🧠 Load Base Model (CPU mode)
 
 
 
 
 
 
 
 
188
  # # ------------------------------------------------------------
189
+ # print("🔹 Loading base model on CPU...")
 
 
190
  # try:
191
  # fine_tuned_model = AutoModelForCausalLM.from_pretrained(
192
  # base_model_name,
193
+ # device_map=None, # Force CPU
 
194
  # trust_remote_code=True,
195
+ # torch_dtype=torch.float32
196
  # )
197
  # print("✅ Base model loaded successfully.")
198
  # except Exception as e:
 
216
  # # 🧩 Load LoRA Adapter (PEFT fine-tuned weights)
217
  # # ------------------------------------------------------------
218
  # print("🔹 Loading LoRA adapter...")
 
 
 
219
  # try:
220
  # peft_config = PeftConfig.from_pretrained(fine_tuned_model_name)
221
  # fine_tuned_model = PeftModel.from_pretrained(
222
  # fine_tuned_model,
223
  # fine_tuned_model_name,
 
224
  # config=peft_config
225
  # )
226
  # print("✅ LoRA adapter loaded successfully.")
 
232
  # # 🚀 Prepare model for inference
233
  # # ------------------------------------------------------------
234
  # fine_tuned_model.eval()
235
+ # device = torch.device("cpu")
236
  # fine_tuned_model.to(device)
237
 
 
 
 
 
 
 
238
  # gc.collect()
239
+ # torch.cuda.empty_cache()
240
 
241
  # # ------------------------------------------------------------
242
  # # 🧾 SOAP Note Generation Function
243
  # # ------------------------------------------------------------
244
  # def generate_soap(input_text: str, temperature: float = 0.7, max_new_tokens: int = 512):
245
  # """
246
+ # Generates a SOAP note from a doctor-patient dialogue using the fine-tuned model (CPU).
247
  # """
248
  # try:
249
+ # # Format input
250
  # prompt = (
251
  # f"<|user|>\n"
252
  # f"Generate a structured SOAP note based on the following doctor-patient dialogue:\n\n"
 
254
  # f"<|end|>\n<|assistant|>SOAP Notes:\n"
255
  # )
256
 
257
+ # # Tokenize input
258
  # inputs = tokenizer(
259
  # prompt,
260
  # return_tensors="pt",
261
  # truncation=True,
262
+ # max_length=2048,
263
  # padding=True
264
  # ).to(device)
265
 
266
+ # # Run model (no autocast or GPU)
 
 
267
  # with torch.no_grad():
268
+ # outputs = fine_tuned_model.generate(
269
+ # input_ids=inputs["input_ids"],
270
+ # attention_mask=inputs.get("attention_mask"),
271
+ # max_new_tokens=max_new_tokens,
272
+ # temperature=temperature,
273
+ # top_p=0.9,
274
+ # top_k=50,
275
+ # pad_token_id=tokenizer.pad_token_id,
276
+ # eos_token_id=tokenizer.eos_token_id,
277
+ # do_sample=True,
278
+ # no_repeat_ngram_size=3,
279
+ # use_cache=False
280
+ # )
281
+
282
+ # # Extract and decode generated tokens
 
 
283
  # generated_ids = outputs[0][inputs["input_ids"].shape[-1]:]
284
  # generated_text = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
285
 
286
+ # # Clean up final output
287
  # if "SOAP Notes:" in generated_text:
288
  # generated_text = generated_text.split("SOAP Notes:")[-1].strip()
289
 
 
292
  # except Exception as e:
293
  # return f"❌ Error generating SOAP note: {str(e)}"
294
 
295
+
296
+
297
  # # ------------------------------------------------------------
298
  # # 🎨 Gradio Interface
299
  # # ------------------------------------------------------------
300
 
301
+ # custom_css = """
302
+ # /* Import Google Fonts */
303
+ # @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
304
+
305
+ # /* Global Styles */
306
+ # body {
307
+ # background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
308
+ # font-family: 'Inter', sans-serif;
309
+ # margin: 0;
310
+ # padding: 0;
311
+ # min-height: 100vh;
312
+ # }
313
+
314
+ # .gradio-container {
315
+ # max-width: 1200px !important;
316
+ # margin: 0 auto !important;
317
+ # padding: 2rem !important;
318
+ # }
319
+
320
+ # /* Main Container Styling */
321
+ # .contain {
322
+ # background: rgba(255, 255, 255, 0.95) !important;
323
+ # backdrop-filter: blur(10px);
324
+ # border-radius: 24px !important;
325
+ # box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3) !important;
326
+ # padding: 3rem !important;
327
+ # animation: fadeIn 0.6s ease-in-out;
328
+ # }
329
+
330
+ # @keyframes fadeIn {
331
+ # from {
332
+ # opacity: 0;
333
+ # transform: translateY(20px);
334
+ # }
335
+ # to {
336
+ # opacity: 1;
337
+ # transform: translateY(0);
338
+ # }
339
+ # }
340
+
341
+ # /* Title Styles */
342
+ # h1 {
343
+ # background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
344
+ # -webkit-background-clip: text;
345
+ # -webkit-text-fill-color: transparent;
346
+ # background-clip: text;
347
+ # font-size: 3rem !important;
348
+ # font-weight: 700 !important;
349
+ # text-align: center !important;
350
+ # margin-bottom: 1rem !important;
351
+ # letter-spacing: -0.5px;
352
+ # }
353
+
354
+ # /* Description Styles */
355
+ # .description {
356
+ # background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
357
+ # color: #2c3e50 !important;
358
+ # font-size: 1.1rem !important;
359
+ # padding: 1.5rem 2rem !important;
360
+ # border-radius: 16px !important;
361
+ # text-align: center !important;
362
+ # margin: 0 auto 2.5rem !important;
363
+ # border-left: 4px solid #667eea;
364
+ # box-shadow: 0 4px 6px rgba(0, 0, 0, 0.07);
365
+ # line-height: 1.6;
366
+ # }
367
+
368
+ # /* Label Styles */
369
+ # label {
370
+ # font-weight: 600 !important;
371
+ # color: #2c3e50 !important;
372
+ # font-size: 0.95rem !important;
373
+ # margin-bottom: 0.5rem !important;
374
+ # display: block !important;
375
+ # }
376
+
377
+ # /* Input/Textarea Styles */
378
+ # textarea, input[type="text"] {
379
+ # background: #ffffff !important;
380
+ # border: 2px solid #e0e7ff !important;
381
+ # border-radius: 12px !important;
382
+ # padding: 1rem !important;
383
+ # font-size: 1rem !important;
384
+ # font-family: 'Inter', sans-serif !important;
385
+ # transition: all 0.3s ease !important;
386
+ # color: #2c3e50 !important;
387
+ # text-align: left !important;
388
+ # }
389
+
390
+ # textarea:focus, input[type="text"]:focus {
391
+ # border-color: #667eea !important;
392
+ # outline: none !important;
393
+ # box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
394
+ # transform: translateY(-2px);
395
+ # }
396
+
397
+ # textarea::placeholder, input::placeholder {
398
+ # color: #94a3b8 !important;
399
+ # font-style: italic;
400
+ # }
401
+
402
+ # /* Slider Container */
403
+ # .slider-container {
404
+ # margin: 1.5rem 0 !important;
405
+ # }
406
+
407
+ # /* Slider Styles */
408
+ # input[type="range"] {
409
+ # width: 100% !important;
410
+ # height: 8px !important;
411
+ # border-radius: 5px !important;
412
+ # background: linear-gradient(to right, #667eea 0%, #764ba2 100%) !important;
413
+ # outline: none !important;
414
+ # opacity: 0.9 !important;
415
+ # transition: opacity 0.2s !important;
416
+ # }
417
+
418
+ # input[type="range"]:hover {
419
+ # opacity: 1 !important;
420
+ # }
421
+
422
+ # input[type="range"]::-webkit-slider-thumb {
423
+ # width: 20px !important;
424
+ # height: 20px !important;
425
+ # border-radius: 50% !important;
426
+ # background: #ffffff !important;
427
+ # cursor: pointer !important;
428
+ # box-shadow: 0 2px 6px rgba(102, 126, 234, 0.4) !important;
429
+ # border: 3px solid #667eea !important;
430
+ # }
431
+
432
+ # input[type="range"]::-moz-range-thumb {
433
+ # width: 20px !important;
434
+ # height: 20px !important;
435
+ # border-radius: 50% !important;
436
+ # background: #ffffff !important;
437
+ # cursor: pointer !important;
438
+ # box-shadow: 0 2px 6px rgba(102, 126, 234, 0.4) !important;
439
+ # border: 3px solid #667eea !important;
440
+ # }
441
+
442
+ # /* Info Text for Sliders */
443
+ # .info {
444
+ # color: #64748b !important;
445
+ # font-size: 0.875rem !important;
446
+ # margin-top: 0.5rem !important;
447
+ # font-style: italic;
448
+ # }
449
+
450
+ # /* Button Styles */
451
+ # button {
452
+ # background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
453
+ # color: white !important;
454
+ # border: none !important;
455
+ # padding: 1rem 2.5rem !important;
456
+ # border-radius: 12px !important;
457
+ # cursor: pointer !important;
458
+ # font-size: 1.1rem !important;
459
+ # font-weight: 600 !important;
460
+ # margin-top: 1.5rem !important;
461
+ # width: 100% !important;
462
+ # transition: all 0.3s ease !important;
463
+ # box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
464
+ # letter-spacing: 0.5px;
465
+ # }
466
+
467
+ # button:hover {
468
+ # transform: translateY(-2px) !important;
469
+ # box-shadow: 0 6px 20px rgba(102, 126, 234, 0.6) !important;
470
+ # }
471
+
472
+ # button:active {
473
+ # transform: translateY(0) !important;
474
+ # }
475
+
476
+ # /* Output Container */
477
+ # .output-textbox {
478
+ # background: #f8fafc !important;
479
+ # border: 2px solid #e0e7ff !important;
480
+ # border-radius: 12px !important;
481
+ # padding: 1.5rem !important;
482
+ # font-size: 1rem !important;
483
+ # margin-top: 1.5rem !important;
484
+ # line-height: 1.8 !important;
485
+ # box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.06) !important;
486
+ # text-align: left !important;
487
+ # }
488
+
489
+ # /* Example Section Styling */
490
+ # .examples {
491
+ # background: #f8fafc !important;
492
+ # border-radius: 16px !important;
493
+ # padding: 1.5rem !important;
494
+ # margin-top: 2rem !important;
495
+ # border: 2px dashed #cbd5e1 !important;
496
+ # }
497
+
498
+ # .examples h4 {
499
+ # color: #475569 !important;
500
+ # font-weight: 600 !important;
501
+ # margin-bottom: 1rem !important;
502
+ # }
503
+
504
+ # /* Loading Animation */
505
+ # .loading {
506
+ # border: 3px solid #f3f4f6;
507
+ # border-top: 3px solid #667eea;
508
+ # border-radius: 50%;
509
+ # width: 40px;
510
+ # height: 40px;
511
+ # animation: spin 1s linear infinite;
512
+ # margin: 2rem auto;
513
+ # }
514
+
515
+ # @keyframes spin {
516
+ # 0% { transform: rotate(0deg); }
517
+ # 100% { transform: rotate(360deg); }
518
+ # }
519
+
520
+ # /* Card-like sections */
521
+ # .input-group {
522
+ # background: #ffffff;
523
+ # padding: 1.5rem;
524
+ # border-radius: 12px;
525
+ # margin-bottom: 1.5rem;
526
+ # box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
527
+ # border: 1px solid #f1f5f9;
528
+ # }
529
+
530
+ # /* Responsive Design */
531
+ # @media screen and (max-width: 768px) {
532
+ # h1 {
533
+ # font-size: 2rem !important;
534
+ # }
535
+
536
+ # .description {
537
+ # font-size: 1rem !important;
538
+ # padding: 1.25rem !important;
539
+ # }
540
+
541
+ # .contain {
542
+ # padding: 1.5rem !important;
543
+ # }
544
+
545
+ # button {
546
+ # font-size: 1rem !important;
547
+ # padding: 0.875rem 2rem !important;
548
+ # }
549
+ # }
550
+
551
+ # /* Smooth transitions for all interactive elements */
552
+ # * {
553
+ # transition: all 0.2s ease;
554
+ # }
555
+
556
+ # /* Custom scrollbar */
557
+ # ::-webkit-scrollbar {
558
+ # width: 10px;
559
+ # }
560
+
561
+ # ::-webkit-scrollbar-track {
562
+ # background: #f1f5f9;
563
+ # border-radius: 10px;
564
+ # }
565
+
566
+ # ::-webkit-scrollbar-thumb {
567
+ # background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
568
+ # border-radius: 10px;
569
+ # }
570
+
571
+ # ::-webkit-scrollbar-thumb:hover {
572
+ # background: linear-gradient(135deg, #764ba2 0%, #667eea 100%);
573
+ # }
574
+ # """
575
+
576
+ # # Gradio interface with Generate button
577
+ # iface = gr.Interface(
578
+ # fn=generate_soap,
579
+ # title="🩺 SOAP Note Generator",
580
+ # description=(
581
+ # "Transform doctor-patient dialogues into professional, structured SOAP notes instantly. "
582
+ # "Powered by advanced AI to ensure accuracy and medical formatting standards."
583
+ # ),
584
+ # inputs=[
585
+ # gr.Textbox(
586
+ # label="📝 Doctor-Patient Dialogue",
587
+ # placeholder="Paste the complete conversation between doctor and patient here...\n\nExample:\nDoctor: Hello, what brings you in today?\nPatient: I've been having chest pain for the past week...",
588
+ # lines=10,
589
+ # max_lines=20,
590
+ # show_label=True,
591
+ # interactive=True,
592
+ # elem_classes="input-group"
593
+ # ),
594
+ # gr.Slider(
595
+ # minimum=0,
596
+ # maximum=1,
597
+ # step=0.05,
598
+ # value=0.7,
599
+ # label="🎨 Temperature (Creativity Level)",
600
+ # info="Lower values = More focused and consistent | Higher values = More creative and varied output"
601
+ # ),
602
+ # gr.Slider(
603
+ # minimum=128,
604
+ # maximum=4096,
605
+ # step=128,
606
+ # value=512,
607
+ # label="📏 Max Length (Tokens)",
608
+ # info="Controls the maximum length of the generated SOAP note (1 token ≈ 0.75 words)"
609
+ # ),
610
+ # ],
611
+ # outputs=[
612
+ # gr.Textbox(
613
+ # label="📋 Generated SOAP Note",
614
+ # placeholder="Your professionally formatted SOAP note will appear here...\n\n✓ Subjective findings\n✓ Objective observations\n✓ Assessment\n✓ Plan of care",
615
+ # lines=18,
616
+ # max_lines=25,
617
+ # interactive=False,
618
+ # show_label=True,
619
+ # show_copy_button=True
620
+ # )
621
+ # ],
622
+ # allow_flagging="never",
623
+ # live=False,
624
+ # cache_examples=False,
625
+ # examples=[
626
+ # ["""Doctor: Hello, can you please tell me about your past medical history?
627
+ # Patient: Hi, I don't have any past medical history.
628
+ # Doctor: Okay. What brings you in today?
629
+ # Patient: I've been experiencing painless blurry vision in my right eye for a week now. I've also had intermittent fevers, headache, body aches, and a nonpruritic maculopapular rash on my lower legs for the past 6 months.
630
+ # Doctor: Thank you for sharing that. Have you had any other symptoms such as neck stiffness, nausea, vomiting, Raynaud's phenomenon, oral ulcerations, chest pain, shortness of breath, abdominal pain, or photosensitivity?
631
+ # Patient: No, only an isolated episode of left knee swelling and testicular swelling in the past.
632
+ # Doctor: Do you work with any toxic substances or have any habits like smoking, drinking, or illicit drug use?
633
+ # Patient: No, I work as a flooring installer and I don't have any toxic habits.
634
+ # Doctor: Alright. We checked your vital signs and they were normal. During the physical exam, we found bilateral papilledema and optic nerve erythema in your right eye, which was greater than in your left eye. You also have a right inferior nasal quadrant visual field defect and a right afferent pupillary defect. Your muscle strength and reflexes were normal, and your sensation to light touch, pinprick, vibration, and proprioception was intact. We also noticed the maculopapular rash on your bilateral lower extremities.
635
+ # Patient: Oh, I see.
636
+ # Doctor: Your admitting labs showed some abnormal results. You have microcytic anemia with a hemoglobin of 11.6 gm/dL, hematocrit of 35.3%, and mean corpuscular volume of 76.9 fL. You also have hyponatremia with a sodium level of 133 mmol/L. Your erythrocyte sedimentation rate (ESR) is elevated at 33 mm/hr, and your C-reactive protein (CRP) is also elevated at 13.3 mg/L. Your urinalysis did not show any protein or blood.
637
+ # Patient: Okay. What does that mean?
638
+ # Doctor: These results could indicate an underlying inflammatory or infectious process. We also performed a lumbar puncture, which showed clear and colorless fluid, 2 red blood cells per microliter, and 56 white blood cells per microliter.
639
+ # Patient: So, what's the next step?
640
+ # Doctor: We need to investigate further to determine the cause of your symptoms. We'll run additional tests and consult with a specialist to get a clearer understanding of your condition. In the meantime, we'll monitor your symptoms and provide supportive care. We'll keep you informed about any new findings and discuss the best course of treatment.
641
+ # Patient: Alright, thank you, Doctor.""",
642
+ # 0.7, 512]
643
+ # ],
644
+ # theme=gr.themes.Soft(
645
+ # primary_hue="indigo",
646
+ # secondary_hue="purple",
647
+ # neutral_hue="slate",
648
+ # ),
649
+ # css=custom_css
650
+ # )
651
+
652
+ # # Print message to confirm interface launch
653
+ # print("🌐 Launching Enhanced Gradio Interface...")
654
+ # print("✨ New features: Modern gradient design, smooth animations, better UX")
655
+
656
+ # # Launch the Gradio interface
657
+ # if __name__ == "__main__":
658
+ # try:
659
+ # iface.launch(
660
+ # server_name="0.0.0.0",
661
+ # server_port=7860,
662
+ # debug=True
663
+ # )
664
+ # except Exception as e:
665
+ # print(f"❌ Error launching Gradio: {e}")
666
+ # raise
667
+
668
+
669
+
670
  # ============================================================
671
  # 🧠 Medical Dialogue → SOAP Note Generator (Fine-tuned Phi-3)
672
  # ============================================================
 
799
  return f"❌ Error generating SOAP note: {str(e)}"
800
 
801
 
 
802
  # ------------------------------------------------------------
803
+ # 🎨 Custom CSS
804
  # ------------------------------------------------------------
 
805
  custom_css = """
806
  /* Import Google Fonts */
807
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
 
962
  font-size: 1.1rem !important;
963
  font-weight: 600 !important;
964
  margin-top: 1.5rem !important;
 
965
  transition: all 0.3s ease !important;
966
  box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
967
  letter-spacing: 0.5px;
 
1076
  }
1077
  """
1078
 
1079
+ # ------------------------------------------------------------
1080
+ # 🎨 Gradio Blocks Interface (Better Control)
1081
+ # ------------------------------------------------------------
1082
+ with gr.Blocks(
1083
+ theme=gr.themes.Soft(
1084
+ primary_hue="indigo",
1085
+ secondary_hue="purple",
1086
+ neutral_hue="slate",
1087
  ),
1088
+ css=custom_css,
1089
+ title="🩺 SOAP Note Generator"
1090
+ ) as demo:
1091
+
1092
+ # Header
1093
+ gr.Markdown("# 🩺 SOAP Note Generator")
1094
+ gr.Markdown(
1095
+ """<div class='description'>
1096
+ Transform doctor-patient dialogues into professional, structured SOAP notes instantly.
1097
+ Powered by advanced AI to ensure accuracy and medical formatting standards.
1098
+ </div>""",
1099
+ elem_classes="description"
1100
+ )
1101
+
1102
+ # Input Section
1103
+ with gr.Row():
1104
+ with gr.Column():
1105
+ input_dialogue = gr.Textbox(
1106
+ label="📝 Doctor-Patient Dialogue",
1107
+ placeholder="Paste the complete conversation between doctor and patient here...\n\nExample:\nDoctor: Hello, what brings you in today?\nPatient: I've been having chest pain for the past week...",
1108
+ lines=10,
1109
+ max_lines=20,
1110
+ show_label=True,
1111
+ interactive=True,
1112
+ elem_classes="input-group"
1113
+ )
1114
+
1115
+ with gr.Row():
1116
+ temperature_slider = gr.Slider(
1117
+ minimum=0,
1118
+ maximum=1,
1119
+ step=0.05,
1120
+ value=0.7,
1121
+ label="🎨 Temperature (Creativity Level)",
1122
+ info="Lower values = More focused and consistent | Higher values = More creative and varied output"
1123
+ )
1124
+
1125
+ max_length_slider = gr.Slider(
1126
+ minimum=128,
1127
+ maximum=4096,
1128
+ step=128,
1129
+ value=512,
1130
+ label="📏 Max Length (Tokens)",
1131
+ info="Controls the maximum length of the generated SOAP note (1 token ≈ 0.75 words)"
1132
+ )
1133
+
1134
+ generate_btn = gr.Button("🚀 Generate SOAP Note", variant="primary", size="lg")
1135
+
1136
+ # Output Section
1137
+ with gr.Row():
1138
+ with gr.Column():
1139
+ output_soap = gr.Textbox(
1140
+ label="📋 Generated SOAP Note",
1141
+ placeholder="Your professionally formatted SOAP note will appear here...\n\n✓ Subjective findings\n✓ Objective observations\n✓ Assessment\n✓ Plan of care",
1142
+ lines=18,
1143
+ max_lines=25,
1144
+ interactive=False,
1145
+ show_label=True,
1146
+ show_copy_button=True
1147
+ )
1148
+
1149
+ # Examples Section
1150
+ gr.Examples(
1151
+ examples=[
1152
+ ["""Doctor: Hello, can you please tell me about your past medical history?
1153
  Patient: Hi, I don't have any past medical history.
1154
  Doctor: Okay. What brings you in today?
1155
  Patient: I've been experiencing painless blurry vision in my right eye for a week now. I've also had intermittent fevers, headache, body aches, and a nonpruritic maculopapular rash on my lower legs for the past 6 months.
 
1164
  Doctor: These results could indicate an underlying inflammatory or infectious process. We also performed a lumbar puncture, which showed clear and colorless fluid, 2 red blood cells per microliter, and 56 white blood cells per microliter.
1165
  Patient: So, what's the next step?
1166
  Doctor: We need to investigate further to determine the cause of your symptoms. We'll run additional tests and consult with a specialist to get a clearer understanding of your condition. In the meantime, we'll monitor your symptoms and provide supportive care. We'll keep you informed about any new findings and discuss the best course of treatment.
1167
+ Patient: Alright, thank you, Doctor.""", 0.7, 512]
1168
+ ],
1169
+ inputs=[input_dialogue, temperature_slider, max_length_slider],
1170
+ label="💡 Try this example",
1171
+ examples_per_page=1
1172
+ )
1173
+
1174
+ # Button click event - ONLY triggers on button click
1175
+ generate_btn.click(
1176
+ fn=generate_soap,
1177
+ inputs=[input_dialogue, temperature_slider, max_length_slider],
1178
+ outputs=output_soap
1179
+ )
1180
 
1181
  # Print message to confirm interface launch
1182
  print("🌐 Launching Enhanced Gradio Interface...")
1183
  print("✨ New features: Modern gradient design, smooth animations, better UX")
1184
+ print("🔒 Manual trigger: Generation only starts when you click the button")
1185
 
1186
  # Launch the Gradio interface
1187
  if __name__ == "__main__":
1188
  try:
1189
+ demo.launch(
1190
  server_name="0.0.0.0",
1191
  server_port=7860,
1192
  debug=True