APIMONSTER commited on
Commit
045cedb
·
verified ·
1 Parent(s): 8869ee3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -99
app.py CHANGED
@@ -1,83 +1,105 @@
1
- import gradio as gr
2
- import cv2
3
  import time
4
- from ultralytics import YOLO
5
  import numpy as np
6
-
7
- # Model yükle
8
- model = YOLO("best.pt")
9
- device = 0 # GPU cihazı (0 = ilk GPU)
10
-
11
- def infer_fire(image, conf_thresh):
12
- # RGB→BGR çevir, infer
13
- img_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
14
- start = time.time()
15
- res = model.predict(
16
- source=img_bgr,
17
- device=device,
18
- imgsz=640,
19
- conf=conf_thresh
20
- )[0]
21
- elapsed = (time.time() - start) * 1000 # ms cinsinden
22
-
23
- annotated = img_bgr.copy()
24
- h, w = annotated.shape[:2]
25
- boxes_np = (
26
- res.boxes.data.cpu().numpy()
27
- if hasattr(res.boxes, "data")
28
- else np.empty((0, 6))
29
  )
 
 
30
 
31
- for x1, y1, x2, y2, conf, cls in boxes_np:
32
- x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))
33
- # 1) Box'u resim sınırları içinde tut
34
- x1 = max(0, x1); y1 = max(0, y1)
35
- x2 = min(w-1, x2); y2 = min(h-1, y2)
36
 
37
- label = f"{model.names[int(cls)]} {conf:.2f}"
 
 
 
 
38
 
39
- # 2) Kalın box çiz
40
- cv2.rectangle(annotated, (x1, y1), (x2, y2), (0, 255, 0), 4)
41
-
42
- # 3) Yazı boyutunu al
43
- (text_w, text_h), baseline = cv2.getTextSize(
44
- label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 3
45
- )
46
-
47
- # 4) Etiket pozisyonunu ayarla
48
- text_x = x1
49
- text_y = y1 - 5
50
- # Üst sınırı aşmasın
51
- if text_y - text_h - baseline < 0:
52
- text_y = y1 + text_h + 5
53
- # Sağ sınırı aşmasın
54
- if text_x + text_w > w:
55
- text_x = w - text_w - 5
56
-
57
- # 5) Dolgu arkaplanlı dikdörtgen
58
- cv2.rectangle(
59
- annotated,
60
- (text_x, text_y - text_h - baseline),
61
- (text_x + text_w, text_y + baseline),
62
- (0, 255, 0),
63
- cv2.FILLED
64
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- # 6) Siyah renkli yazı
67
- cv2.putText(
68
- annotated,
69
- label,
70
- (text_x, text_y),
71
- cv2.FONT_HERSHEY_SIMPLEX,
72
- 0.7,
73
- (0, 0, 0),
74
- 3
75
- )
76
 
77
- out_img = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
78
- return out_img, f"Inference time: {elapsed:.1f} ms"
 
79
 
80
- # Gradio arayüzü
 
 
81
  examples = [
82
  ["datasets/test/images/WEB10432.jpg", 0.25],
83
  ["datasets/test/images/WEB11791.jpg", 0.25],
@@ -86,49 +108,31 @@ examples = [
86
 
87
  with gr.Blocks() as demo:
88
  gr.Markdown("## 🔥 Wildfire Smoke & Fire Detector")
89
- gr.Markdown(
90
- "Upload an image below, adjust the confidence threshold, "
91
- "and the model will highlight any smoke or fire regions."
92
- )
93
 
94
  with gr.Row():
95
  with gr.Column(scale=1, min_width=200):
96
- input_img = gr.Image(type="numpy", label="Input Image")
97
- conf_slider = gr.Slider(0.0, 1.0, 0.25, 0.01, label="Confidence Threshold")
98
- run_btn = gr.Button("Detect 🔍", variant="primary")
99
  with gr.Column(scale=5, min_width=1000):
100
- output_img = gr.Image(
101
- type="numpy",
102
- label="Annotated Output",
103
- height=800,
104
- width=1000
105
- )
106
- time_txt = gr.Textbox(label="Performance", interactive=False)
107
 
108
- run_btn.click(
109
- fn=infer_fire,
110
- inputs=[input_img, conf_slider],
111
- outputs=[output_img, time_txt]
112
- )
113
 
114
  gr.Examples(
115
  examples=examples,
116
- inputs=[input_img, conf_slider],
117
- outputs=[output_img, time_txt],
118
  fn=infer_fire,
119
  cache_examples=False
120
  )
121
 
122
  gr.Markdown(
123
- "---\n"
124
- "Model trained on a custom wildfire dataset using YOLOv8. "
125
- "Adjust the threshold to trade off between false positives and false negatives."
126
  )
127
 
128
  if __name__ == "__main__":
129
  demo.launch()
130
- # server_name="0.0.0.0",
131
- # server_port=7861,
132
- # share=False,
133
- # inbrowser=True
134
-
 
1
+ import os
 
2
  import time
3
+ import cv2
4
  import numpy as np
5
+ import gradio as gr
6
+ import torch
7
+ from ultralytics import YOLO
8
+ from huggingface_hub import hf_hub_download
9
+
10
+ # ——————————————
11
+ # 1) MODEL AĞIRLIKLARINI İNDİR / YÜKLE
12
+ # ——————————————
13
+ MODEL_FILENAME = "best.pt"
14
+ if not os.path.isfile(MODEL_FILENAME):
15
+ MODEL_PATH = hf_hub_download(
16
+ repo_id="APIMONSTER/ADA447", # kendi Space adınızı yazın
17
+ filename=MODEL_FILENAME,
18
+ repo_type="space"
 
 
 
 
 
 
 
 
 
19
  )
20
+ else:
21
+ MODEL_PATH = MODEL_FILENAME
22
 
23
+ # ——————————————
24
+ # 2) CİHAZI BELİRLE
25
+ # ——————————————
26
+ device = 0 if torch.cuda.is_available() else "cpu"
 
27
 
28
+ # ——————————————
29
+ # 3) MODELİ YÜKLE
30
+ # ——————————————
31
+ model = YOLO(MODEL_PATH) # ultralytics kendi içinde cihazı algılar
32
+ # ama predict içinde yine device argümanı geçiyoruz
33
 
34
+ def infer_fire(image, conf_thresh):
35
+ if image is None:
36
+ return None, "⚠️ Please upload an image."
37
+
38
+ try:
39
+ # 1) Renk uzayını dönüştür
40
+ img_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
41
+ start = time.time()
42
+
43
+ # 2) Tahmin
44
+ results = model.predict(
45
+ source=img_bgr,
46
+ device=device,
47
+ imgsz=640,
48
+ conf=conf_thresh
 
 
 
 
 
 
 
 
 
 
49
  )
50
+ res = results[0]
51
+ elapsed = (time.time() - start) * 1000
52
+
53
+ # 3) Annotate
54
+ annotated = img_bgr.copy()
55
+ h, w = annotated.shape[:2]
56
+ boxes = res.boxes.data.cpu().numpy() if hasattr(res.boxes, "data") else np.empty((0,6))
57
+
58
+ for x1, y1, x2, y2, conf, cls in boxes:
59
+ x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))
60
+ # sınır dışına taşan coord’ları clamp et
61
+ x1, y1 = max(0, x1), max(0, y1)
62
+ x2, y2 = min(w-1, x2), min(h-1, y2)
63
+
64
+ label = f"{model.names[int(cls)]} {conf:.2f}"
65
+ # kalın kutu
66
+ cv2.rectangle(annotated, (x1, y1), (x2, y2), (0,255,0), 4)
67
+
68
+ # yazı boyutu, arka plan ve yerleşim
69
+ (tw, th), bl = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 3)
70
+ tx, ty = x1, y1 - 5
71
+ if ty - th - bl < 0:
72
+ ty = y1 + th + 5
73
+ if tx + tw > w:
74
+ tx = w - tw - 5
75
+
76
+ cv2.rectangle(
77
+ annotated,
78
+ (tx, ty-th-bl),
79
+ (tx+tw, ty+bl),
80
+ (0,255,0),
81
+ cv2.FILLED
82
+ )
83
+ cv2.putText(
84
+ annotated,
85
+ label,
86
+ (tx, ty),
87
+ cv2.FONT_HERSHEY_SIMPLEX,
88
+ 0.7,
89
+ (0,0,0),
90
+ 3
91
+ )
92
 
93
+ out_img = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
94
+ return out_img, f"✅ Inference: {elapsed:.1f} ms"
 
 
 
 
 
 
 
 
95
 
96
+ except Exception as e:
97
+ # hata mesajını kullanıcıya göster
98
+ return image, f"❌ Error: {str(e)}"
99
 
100
+ # ——————————————
101
+ # 4) GRADIO ARAYÜZÜ
102
+ # ——————————————
103
  examples = [
104
  ["datasets/test/images/WEB10432.jpg", 0.25],
105
  ["datasets/test/images/WEB11791.jpg", 0.25],
 
108
 
109
  with gr.Blocks() as demo:
110
  gr.Markdown("## 🔥 Wildfire Smoke & Fire Detector")
111
+ gr.Markdown("Upload an image, adjust confidence threshold, and detect 🔥/🚬 regions.")
 
 
 
112
 
113
  with gr.Row():
114
  with gr.Column(scale=1, min_width=200):
115
+ inp = gr.Image(type="numpy", label="Input Image")
116
+ conf = gr.Slider(0,1,0.25,0.01, label="Confidence Threshold")
117
+ btn = gr.Button("Detect 🔍", variant="primary")
118
  with gr.Column(scale=5, min_width=1000):
119
+ out_img = gr.Image(type="numpy", label="Annotated Output", height=800, width=1000)
120
+ out_text = gr.Textbox(label="Status / Performance", interactive=False)
 
 
 
 
 
121
 
122
+ btn.click(infer_fire, [inp, conf], [out_img, out_text])
 
 
 
 
123
 
124
  gr.Examples(
125
  examples=examples,
126
+ inputs=[inp, conf],
127
+ outputs=[out_img, out_text],
128
  fn=infer_fire,
129
  cache_examples=False
130
  )
131
 
132
  gr.Markdown(
133
+ "---\nModel trained on a custom wildfire dataset using YOLOv8. "
134
+ "CPU’da çalışıyorsanız `device='cpu'`, GPU varsa `device=0` seçildi."
 
135
  )
136
 
137
  if __name__ == "__main__":
138
  demo.launch()