PRUTHVIn commited on
Commit
660ccf8
·
verified ·
1 Parent(s): c4f6e45

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -0
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from main import final_pipeline # Changed from utils to main
3
+ from PIL import Image
4
+ import torch
5
+
6
+ CSS = """
7
+ .gradio-container {
8
+ max-width: 1200px !important;
9
+ }
10
+ """
11
+
12
+ def predict(image, question):
13
+ if image is None:
14
+ return "Please upload an image! 📸"
15
+ if not question.strip():
16
+ return "Please enter a question! ❓"
17
+
18
+ try:
19
+ result = final_pipeline(image, question)
20
+ return result
21
+ except Exception as e:
22
+ return f"❌ Error: {str(e)}"
23
+
24
+ with gr.Blocks(css=CSS, title="🩻 Medical VQA - MultiLanguage") as demo:
25
+ gr.Markdown("""
26
+ # 🩻 **Medical VQA - MultiLanguage**
27
+ Upload X-ray, MRI, CT scans and ask questions in **ANY language**!
28
+
29
+ **Supports:** English, Hindi, Tamil, Telugu, Kannada, Malayalam
30
+ """)
31
+
32
+ with gr.Row():
33
+ with gr.Column(scale=1):
34
+ image_input = gr.Image(type="pil", label="Upload Medical Image")
35
+ question_input = gr.Textbox(
36
+ label="Question (Any Language)",
37
+ placeholder="What is shown in this X-ray? / இதில் என்ன உள்ளது?",
38
+ lines=2
39
+ )
40
+ predict_btn = gr.Button("🔍 Analyze Image", variant="primary", size="lg")
41
+
42
+ with gr.Column(scale=1):
43
+ answer_output = gr.Markdown(label="🤖 AI Answer")
44
+
45
+ predict_btn.click(predict, [image_input, question_input], answer_output)
46
+
47
+ gr.Examples(
48
+ examples=[
49
+ ["What is this fracture?", None],
50
+ ["இதில் எந்த உடல்நலக் குறைபாடு உள்ளது?", None],
51
+ ["ఈ చిత్రంలో ఏముంది?", None],
52
+ ["Describe this X-ray", None]
53
+ ],
54
+ inputs=[question_input, image_input],
55
+ outputs=answer_output
56
+ )
57
+
58
+ gr.Markdown("---")
59
+ gr.Markdown("""
60
+ **Powered by:** Custom VQA (ResNet18+LSTM) + BLIP2 + NLLB-200
61
+ **Dataset:** VQA-RAD (Medical Images)
62
+ """)
63
+
64
+ if __name__ == "__main__":
65
+ demo.launch()