sehatguard commited on
Commit
3d66dd9
Β·
verified Β·
1 Parent(s): 258ed91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -12
app.py CHANGED
@@ -11,7 +11,7 @@ client = OpenAI(
11
  api_key=os.environ.get("NVIDIA_API_KEY"),
12
  )
13
 
14
- MODEL = "openai/gpt-oss-20b" # GPT-4o via NVIDIA Build
15
 
16
  # ── System prompts ────────────────────────────────────────────────────────────
17
  SYSTEM_PROMPTS = {
@@ -81,8 +81,10 @@ def pdf_to_base64_images(pdf_path: str) -> list:
81
  def build_messages(system, history, user_text, image_b64_list=None):
82
  messages = [{"role": "system", "content": system}]
83
  for human, assistant in history:
84
- messages.append({"role": "user", "content": human})
85
- messages.append({"role": "assistant", "content": assistant})
 
 
86
 
87
  if image_b64_list:
88
  content = []
@@ -119,22 +121,27 @@ def chat(message, history, mode, file_upload, age, gender, weight, conditions, a
119
  display_msg = message.strip() or "πŸ“„ File uploaded β€” please analyze"
120
 
121
  try:
122
- image_b64_list = None
123
-
124
  if file_upload is not None:
125
  ext = os.path.splitext(file_upload)[1].lower()
126
  if ext == ".pdf":
127
- image_b64_list = pdf_to_base64_images(file_upload)[:6]
128
- display_msg = f"πŸ“„ PDF uploaded" + (f" β€” {message.strip()}" if message.strip() else "")
 
 
 
 
 
 
129
  elif ext in [".jpg", ".jpeg", ".png", ".webp", ".bmp"]:
130
- image_b64_list = [encode_image_to_base64(file_upload)]
131
- display_msg = f"πŸ–ΌοΈ Image uploaded" + (f" β€” {message.strip()}" if message.strip() else "")
132
  else:
133
- history.append((display_msg, "❌ Unsupported file type. Please upload PDF, JPG, or PNG."))
134
  yield "", history
135
  return
136
 
137
- messages = build_messages(system, history, user_text, image_b64_list)
138
 
139
  # ── Streaming response ──
140
  stream = client.chat.completions.create(
@@ -283,4 +290,4 @@ with gr.Blocks(css=CSS, title="Sehat Guard β€” AI Medical Assistant") as demo:
283
  send_btn.click(fn=chat, inputs=inputs, outputs=outputs)
284
  user_input.submit(fn=chat, inputs=inputs, outputs=outputs)
285
 
286
- demo.launch()
 
11
  api_key=os.environ.get("NVIDIA_API_KEY"),
12
  )
13
 
14
+ MODEL = "openai/gpt-4o-mini" # GPT via NVIDIA Build β€” change this to match your NVIDIA model name
15
 
16
  # ── System prompts ────────────────────────────────────────────────────────────
17
  SYSTEM_PROMPTS = {
 
81
  def build_messages(system, history, user_text, image_b64_list=None):
82
  messages = [{"role": "system", "content": system}]
83
  for human, assistant in history:
84
+ if human is not None:
85
+ messages.append({"role": "user", "content": str(human)})
86
+ if assistant is not None:
87
+ messages.append({"role": "assistant", "content": str(assistant)})
88
 
89
  if image_b64_list:
90
  content = []
 
121
  display_msg = message.strip() or "πŸ“„ File uploaded β€” please analyze"
122
 
123
  try:
124
+ # NVIDIA Build does not support vision β€” text only
 
125
  if file_upload is not None:
126
  ext = os.path.splitext(file_upload)[1].lower()
127
  if ext == ".pdf":
128
+ # Extract text from PDF instead
129
+ doc = fitz.open(file_upload)
130
+ pdf_text = ""
131
+ for page in doc:
132
+ pdf_text += page.get_text()
133
+ doc.close()
134
+ user_text = f"The patient has uploaded a lab report. Here is the extracted text:\n\n{pdf_text[:4000]}\n\n{user_text}"
135
+ display_msg = "πŸ“„ PDF uploaded" + (f" β€” {message.strip()}" if message.strip() else "")
136
  elif ext in [".jpg", ".jpeg", ".png", ".webp", ".bmp"]:
137
+ user_text = "The patient has uploaded an image of a medical document (lab report/prescription/X-ray). Unfortunately I cannot read images directly β€” please type out the key values from the report and I will analyze them for you.\n\n" + user_text
138
+ display_msg = "πŸ–ΌοΈ Image uploaded" + (f" β€” {message.strip()}" if message.strip() else "")
139
  else:
140
+ history.append((message or "File upload", "❌ Unsupported file type. Please upload PDF, JPG, or PNG."))
141
  yield "", history
142
  return
143
 
144
+ messages = build_messages(system, history, user_text)
145
 
146
  # ── Streaming response ──
147
  stream = client.chat.completions.create(
 
290
  send_btn.click(fn=chat, inputs=inputs, outputs=outputs)
291
  user_input.submit(fn=chat, inputs=inputs, outputs=outputs)
292
 
293
+ demo.launch()