Srikar00007 commited on
Commit
9682ca1
·
verified ·
1 Parent(s): ed1cb89

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -23
app.py CHANGED
@@ -79,11 +79,11 @@ import torch
79
  # -----------------------------
80
  # 1. Load models
81
  # -----------------------------
82
- yolo_model = YOLO("best.pt") # put your YOLO model in root directory
83
  chatbot = pipeline("text-generation", model="microsoft/BioGPT-Large")
84
 
85
  # -----------------------------
86
- # 2. Define the logic
87
  # -----------------------------
88
  def medical_chat(image, question):
89
  if image is None:
@@ -94,38 +94,40 @@ def medical_chat(image, question):
94
  predicted_class = results[0].names[int(results[0].probs.top1)]
95
  yolo_output = f"The YOLO model classified the image as: **{predicted_class}**."
96
 
97
- # Direct YOLO query
98
  if "output of yolo" in question.lower():
99
  return yolo_output
100
 
101
- # Explanation via LLM
102
  prompt = f"The YOLO model detected {predicted_class}. {question}"
103
  response = chatbot(prompt, max_new_tokens=200)
104
  answer = response[0]['generated_text']
105
  return f"{yolo_output}\n\n**Explanation:** {answer}"
106
 
107
  # -----------------------------
108
- # 3. Build a Gradio Interface (not ChatInterface)
109
  # -----------------------------
110
- demo = gr.Interface(
111
- fn=medical_chat,
112
- inputs=[
113
- gr.Image(type="pil", label="🩻 Upload Medical Image"),
114
- gr.Textbox(label="💬 Ask your question", placeholder="e.g., What is the output of YOLO? or Explain this disease.")
115
- ],
116
- outputs="text",
117
- title="🧠 YOLO + Medical Chatbot",
118
- description="Upload a medical image → YOLO detects the disease → Chatbot explains or answers questions.",
119
- examples=[
120
- ["sample_xray.jpg", "What is the output of YOLO?"],
121
- ["sample_xray.jpg", "Explain this disease."]
122
- ]
123
- )
 
 
 
 
124
 
125
  # -----------------------------
126
- # 4. Launch
127
  # -----------------------------
128
  if __name__ == "__main__":
129
- demo.launch()
130
-
131
-
 
79
  # -----------------------------
80
  # 1. Load models
81
  # -----------------------------
82
+ yolo_model = YOLO("best.pt") # make sure 'best.pt' is uploaded to root
83
  chatbot = pipeline("text-generation", model="microsoft/BioGPT-Large")
84
 
85
  # -----------------------------
86
+ # 2. Define main logic
87
  # -----------------------------
88
  def medical_chat(image, question):
89
  if image is None:
 
94
  predicted_class = results[0].names[int(results[0].probs.top1)]
95
  yolo_output = f"The YOLO model classified the image as: **{predicted_class}**."
96
 
97
+ # Direct YOLO question
98
  if "output of yolo" in question.lower():
99
  return yolo_output
100
 
101
+ # Explanation using LLM
102
  prompt = f"The YOLO model detected {predicted_class}. {question}"
103
  response = chatbot(prompt, max_new_tokens=200)
104
  answer = response[0]['generated_text']
105
  return f"{yolo_output}\n\n**Explanation:** {answer}"
106
 
107
  # -----------------------------
108
+ # 3. Build custom UI using Blocks (forces correct layout)
109
  # -----------------------------
110
+ with gr.Blocks(title="🧠 YOLO + Medical Chatbot") as demo:
111
+ gr.Markdown("## 🩺 Medical Image Analyzer & Chatbot")
112
+ gr.Markdown("Upload an image → YOLO detects disease → Chatbot explains it.")
113
+
114
+ with gr.Row():
115
+ with gr.Column():
116
+ image_input = gr.Image(type="pil", label="🩻 Upload Medical Image")
117
+ text_input = gr.Textbox(
118
+ label="💬 Ask your question",
119
+ placeholder="e.g., What is the output of YOLO? or Explain this disease."
120
+ )
121
+ submit_btn = gr.Button("Analyze & Ask")
122
+
123
+ with gr.Column():
124
+ output_text = gr.Textbox(label="🧠 Chatbot Response", lines=10)
125
+
126
+ # When user clicks button
127
+ submit_btn.click(fn=medical_chat, inputs=[image_input, text_input], outputs=output_text)
128
 
129
  # -----------------------------
130
+ # 4. Launch the app
131
  # -----------------------------
132
  if __name__ == "__main__":
133
+ demo.launch()