Muhammadidrees commited on
Commit
dbaef5a
·
verified ·
1 Parent(s): 27ee687

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -31
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import gradio as gr
3
  from openai import OpenAI
4
 
5
- # --- Initialize client securely ---
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
 
8
  if not HF_TOKEN:
@@ -14,31 +14,56 @@ client = OpenAI(
14
  )
15
 
16
  # --- Chat handler ---
17
- def chat_with_model(message, history):
18
- # Build messages list safely
19
- messages = []
20
-
21
- if history:
22
- for msg in history:
23
- # Handle both dict and tuple formats
24
- if isinstance(msg, dict):
25
- # Keep only allowed keys
26
- messages.append({
27
- "role": msg.get("role", "user"),
28
- "content": msg.get("content", "")
29
- })
30
- elif isinstance(msg, (list, tuple)) and len(msg) == 2:
31
- messages.append({"role": "user", "content": msg[0]})
32
- messages.append({"role": "assistant", "content": msg[1]})
33
-
34
- # Add latest user message
35
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  try:
 
38
  response = client.chat.completions.create(
39
  model="openai/gpt-oss-120b:fireworks-ai",
40
- messages=messages,
 
 
 
41
  temperature=0.7,
 
42
  )
43
  reply = response.choices[0].message.content
44
  except Exception as e:
@@ -46,14 +71,57 @@ def chat_with_model(message, history):
46
 
47
  return reply
48
 
49
- # --- Gradio Interface ---
50
- chatbot_ui = gr.ChatInterface(
51
- fn=chat_with_model,
52
- title="🧠 GPT-OSS 120B (Fireworks)",
53
- description="Chat with the OSS 120B model hosted via Hugging Face router.",
54
- examples=["Hello!", "Tell me a joke.", "Explain AI in simple terms."],
55
- type="messages", # required
56
- )
57
 
58
- if __name__ == "__main__":
59
- chatbot_ui.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
  from openai import OpenAI
4
 
5
+ # --- Initialize Hugging Face router client ---
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
 
8
  if not HF_TOKEN:
 
14
  )
15
 
16
  # --- Chat handler ---
17
+ def generate_report(age, gender, height, weight, albumin, creatinine, glucose, crp, mcv, rdw, alp, wbc, lymphocytes, hb, pv):
18
+ # --- System Prompt ---
19
+ system = """You are an advanced Medical Insight Generation AI trained to analyze clinical biomarkers, urine analysis, and lab test results.
20
+ Your goal is to generate a medically accurate, empathetic, and client-friendly health report in the following structured format:
21
+
22
+ 1. Executive Summary
23
+ 2. System-Specific Analysis
24
+ 3. Personalized Action Plan
25
+ 4. Interaction Alerts
26
+ 5. Longevity Metrics
27
+ 6. Tabular Mapping
28
+ 7. Enhanced AI Insight
29
+ 8. AI Insights & Longitudinal Risk Assessment
30
+ 9. Predictive Longevity Risk Profile
31
+ 10. Actionable Next Steps
32
+
33
+ Maintain a professional, compassionate tone and explain medical reasoning in accessible language.
34
+ """
35
+
36
+ # --- User message ---
37
+ user_message = (
38
+ f"Patient Info:\n"
39
+ f"- Age: {age}\n"
40
+ f"- Gender: {gender}\n"
41
+ f"- Height: {height} cm\n"
42
+ f"- Weight: {weight} kg\n\n"
43
+ f"Biomarkers:\n"
44
+ f"- Albumin: {albumin} g/dL\n"
45
+ f"- Creatinine: {creatinine} mg/dL\n"
46
+ f"- Glucose: {glucose} mg/dL\n"
47
+ f"- CRP: {crp} mg/L\n"
48
+ f"- MCV: {mcv} fL\n"
49
+ f"- RDW: {rdw} %\n"
50
+ f"- ALP: {alp} U/L\n"
51
+ f"- WBC: {wbc} x10^3/μL\n"
52
+ f"- Lymphocytes: {lymphocytes} %\n"
53
+ f"- Hemoglobin: {hb} g/dL\n"
54
+ f"- Plasma (PV): {pv} mL\n"
55
+ )
56
 
57
  try:
58
+ # --- Send request to model ---
59
  response = client.chat.completions.create(
60
  model="openai/gpt-oss-120b:fireworks-ai",
61
+ messages=[
62
+ {"role": "system", "content": system},
63
+ {"role": "user", "content": user_message},
64
+ ],
65
  temperature=0.7,
66
+ max_tokens=2000,
67
  )
68
  reply = response.choices[0].message.content
69
  except Exception as e:
 
71
 
72
  return reply
73
 
 
 
 
 
 
 
 
 
74
 
75
+ # --- Gradio UI ---
76
+ with gr.Blocks(title="🧬 Biomarker Medical Insight Chatbot") as demo:
77
+ gr.Markdown(
78
+ """
79
+ ## 🧠 AI-Powered Biomarker Report Generator
80
+ Enter the patient details and biomarkers below.
81
+ The AI will generate a **comprehensive medical report** with insights, risk assessment, and personalized recommendations.
82
+ """
83
+ )
84
+
85
+ with gr.Row():
86
+ age = gr.Number(label="Age", value=45)
87
+ gender = gr.Radio(["Male", "Female"], label="Gender", value="Male")
88
+
89
+ with gr.Row():
90
+ height = gr.Number(label="Height (cm)", value=175)
91
+ weight = gr.Number(label="Weight (kg)", value=72)
92
+
93
+ gr.Markdown("### 🧫 Biomarker Inputs (Demo Values Pre-filled)")
94
+
95
+ with gr.Row():
96
+ albumin = gr.Number(label="Albumin (g/dL)", value=4.2)
97
+ creatinine = gr.Number(label="Creatinine (mg/dL)", value=1.1)
98
+ glucose = gr.Number(label="Glucose (mg/dL)", value=98)
99
+
100
+ with gr.Row():
101
+ crp = gr.Number(label="CRP (mg/L)", value=2.5)
102
+ mcv = gr.Number(label="MCV (fL)", value=90.5)
103
+ rdw = gr.Number(label="RDW (%)", value=13.2)
104
+
105
+ with gr.Row():
106
+ alp = gr.Number(label="ALP (U/L)", value=110)
107
+ wbc = gr.Number(label="WBC (x10^3/μL)", value=6.8)
108
+ lymphocytes = gr.Number(label="Lymphocytes (%)", value=35)
109
+
110
+ with gr.Row():
111
+ hb = gr.Number(label="Hemoglobin (g/dL)", value=14.5)
112
+ pv = gr.Number(label="Plasma (PV) (mL)", value=3000)
113
+
114
+ submit_btn = gr.Button("📤 Generate Medical Report")
115
+ output_box = gr.Textbox(label="AI-Generated Medical Report", lines=25)
116
+
117
+ submit_btn.click(
118
+ generate_report,
119
+ inputs=[
120
+ age, gender, height, weight,
121
+ albumin, creatinine, glucose, crp, mcv,
122
+ rdw, alp, wbc, lymphocytes, hb, pv
123
+ ],
124
+ outputs=output_box
125
+ )
126
+
127
+ demo.launch()