iffazainab commited on
Commit
a28fd95
·
verified ·
1 Parent(s): b4d7757

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -0
app.py CHANGED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Tuple
3
+ import torch
4
+ import gradio as gr
5
+ from PIL import Image
6
+ from transformers import AutoProcessor, AutoModelForVision2Seq
7
+
8
+ # ---------- Config ----------
9
+ MODEL_NAME = os.getenv("MODEL_NAME", "Qwen/Qwen2-VL-2B-Instruct") # Smaller CPU-friendly model
10
+ MAX_NEW_TOKENS = int(os.getenv("MAX_NEW_TOKENS", "512"))
11
+
12
+ DISCLAIMER = (
13
+ "⚠️ **Disclaimer:** This tool provides general information and is **not** a substitute for "
14
+ "official emergency guidance. In an emergency, follow directions from local authorities."
15
+ )
16
+
17
+ # ---------- Load model ----------
18
+ device = "cuda" if torch.cuda.is_available() else "cpu"
19
+ dtype = torch.float16 if device == "cuda" else torch.float32
20
+
21
+ print(f"Loading model: {MODEL_NAME} on {device} ({dtype})")
22
+
23
+ processor = AutoProcessor.from_pretrained(MODEL_NAME)
24
+ model = AutoModelForVision2Seq.from_pretrained(
25
+ MODEL_NAME,
26
+ torch_dtype=dtype
27
+ ).to(device)
28
+
29
+ # ---------- Core logic ----------
30
+ DEFAULT_QUESTION = (
31
+ "Identify the type of natural disaster in this image and give immediate and long-term "
32
+ "precautionary / preparedness measures, with a short disclaimer."
33
+ )
34
+
35
+ def analyze(image: Image.Image, question: str, history: List[Tuple[str, str]]):
36
+ if image is None:
37
+ return history, "❌ Please upload an image first."
38
+ if not question.strip():
39
+ question = DEFAULT_QUESTION
40
+
41
+ # Prepare input for Qwen2-VL
42
+ inputs = processor(images=image, text=question, return_tensors="pt").to(device)
43
+ output_ids = model.generate(**inputs, max_new_tokens=MAX_NEW_TOKENS)
44
+ answer = processor.decode(output_ids[0], skip_special_tokens=True)
45
+
46
+ answer = answer.strip() + "\n\n" + DISCLAIMER
47
+ history.append((question, answer))
48
+ return history, ""
49
+
50
+ def clear():
51
+ return [], ""
52
+
53
+ # ---------- Gradio UI ----------
54
+ with gr.Blocks(title="Disaster Precaution Chatbot (Qwen2-VL + Gradio)") as demo:
55
+ gr.Markdown("# 🌪️ Disaster Precaution Chatbot")
56
+ gr.Markdown(
57
+ "Upload an image that shows a natural disaster (or its aftermath) and ask for "
58
+ "precautionary / preparedness measures."
59
+ )
60
+ gr.Markdown(DISCLAIMER)
61
+
62
+ state = gr.State([])
63
+
64
+ with gr.Row():
65
+ with gr.Column(scale=1):
66
+ img_input = gr.Image(type="pil", label="Upload disaster image")
67
+ txt_input = gr.Textbox(
68
+ label="Your question (optional)",
69
+ placeholder="e.g., What should I do to prepare for this?"
70
+ )
71
+ analyze_btn = gr.Button("Analyze Image", variant="primary")
72
+ clear_btn = gr.Button("Clear Chat", variant="secondary")
73
+
74
+ with gr.Column(scale=1):
75
+ chatbot = gr.Chatbot(label="Chatbot", height=480, show_copy_button=True)
76
+
77
+ analyze_btn.click(
78
+ fn=analyze,
79
+ inputs=[img_input, txt_input, state],
80
+ outputs=[chatbot, txt_input]
81
+ )
82
+
83
+ clear_btn.click(fn=clear, inputs=[], outputs=[chatbot, txt_input])
84
+
85
+ if __name__ == "__main__":
86
+ demo.launch()