File size: 4,423 Bytes
045cedb
ddca105
045cedb
ddca105
045cedb
 
 
 
 
 
 
 
 
 
 
 
 
 
ddca105
045cedb
 
ddca105
045cedb
 
 
 
ddca105
045cedb
 
 
 
 
ddca105
045cedb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ddca105
045cedb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ddca105
045cedb
 
ddca105
045cedb
 
 
ddca105
045cedb
 
 
ddca105
 
 
 
 
 
 
 
045cedb
ddca105
 
 
045cedb
 
 
ddca105
045cedb
 
ddca105
045cedb
ddca105
 
 
045cedb
 
ddca105
 
 
 
 
045cedb
 
ddca105
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import os
import time
import cv2
import numpy as np
import gradio as gr
import torch
from ultralytics import YOLO
from huggingface_hub import hf_hub_download

# ——————————————
# 1) MODEL AĞIRLIKLARINI İNDİR / YÜKLE
# ——————————————
MODEL_FILENAME = "best.pt"
if not os.path.isfile(MODEL_FILENAME):
    MODEL_PATH = hf_hub_download(
        repo_id="APIMONSTER/ADA447",   # kendi Space adınızı yazın
        filename=MODEL_FILENAME,
        repo_type="space"
    )
else:
    MODEL_PATH = MODEL_FILENAME

# ——————————————
# 2) CİHAZI BELİRLE
# ——————————————
device = 0 if torch.cuda.is_available() else "cpu"

# ——————————————
# 3) MODELİ YÜKLE
# ——————————————
model = YOLO(MODEL_PATH)  # ultralytics kendi içinde cihazı algılar
# ama predict içinde yine device argümanı geçiyoruz

def infer_fire(image, conf_thresh):
    if image is None:
        return None, "⚠️ Please upload an image."

    try:
        # 1) Renk uzayını dönüştür
        img_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        start = time.time()

        # 2) Tahmin
        results = model.predict(
            source=img_bgr,
            device=device,
            imgsz=640,
            conf=conf_thresh
        )
        res = results[0]
        elapsed = (time.time() - start) * 1000

        # 3) Annotate
        annotated = img_bgr.copy()
        h, w = annotated.shape[:2]
        boxes = res.boxes.data.cpu().numpy() if hasattr(res.boxes, "data") else np.empty((0,6))

        for x1, y1, x2, y2, conf, cls in boxes:
            x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))
            # sınır dışına taşan coord’ları clamp et
            x1, y1 = max(0, x1), max(0, y1)
            x2, y2 = min(w-1, x2), min(h-1, y2)

            label = f"{model.names[int(cls)]} {conf:.2f}"
            # kalın kutu
            cv2.rectangle(annotated, (x1, y1), (x2, y2), (0,255,0), 4)

            # yazı boyutu, arka plan ve yerleşim
            (tw, th), bl = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 3)
            tx, ty = x1, y1 - 5
            if ty - th - bl < 0:
                ty = y1 + th + 5
            if tx + tw > w:
                tx = w - tw - 5

            cv2.rectangle(
                annotated,
                (tx, ty-th-bl),
                (tx+tw, ty+bl),
                (0,255,0),
                cv2.FILLED
            )
            cv2.putText(
                annotated,
                label,
                (tx, ty),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.7,
                (0,0,0),
                3
            )

        out_img = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
        return out_img, f"✅ Inference: {elapsed:.1f} ms"

    except Exception as e:
        # hata mesajını kullanıcıya göster
        return image, f"❌ Error: {str(e)}"

# ——————————————
# 4) GRADIO ARAYÜZÜ
# ——————————————
examples = [
    ["datasets/test/images/WEB10432.jpg", 0.25],
    ["datasets/test/images/WEB11791.jpg", 0.25],
    ["datasets/test/images/WEB11706.jpg", 0.25],
]

with gr.Blocks() as demo:
    gr.Markdown("## 🔥 Wildfire Smoke & Fire Detector")
    gr.Markdown("Upload an image, adjust confidence threshold, and detect 🔥/🚬 regions.")

    with gr.Row():
        with gr.Column(scale=1, min_width=200):
            inp    = gr.Image(type="numpy", label="Input Image")
            conf   = gr.Slider(0,1,0.25,0.01, label="Confidence Threshold")
            btn    = gr.Button("Detect 🔍", variant="primary")
        with gr.Column(scale=5, min_width=1000):
            out_img  = gr.Image(type="numpy", label="Annotated Output", height=800, width=1000)
            out_text = gr.Textbox(label="Status / Performance", interactive=False)

    btn.click(infer_fire, [inp, conf], [out_img, out_text])

    gr.Examples(
        examples=examples,
        inputs=[inp, conf],
        outputs=[out_img, out_text],
        fn=infer_fire,
        cache_examples=False
    )

    gr.Markdown(
        "---\nModel trained on a custom wildfire dataset using YOLOv8. "
        "CPU’da çalışıyorsanız `device='cpu'`, GPU varsa `device=0` seçildi."
    )

if __name__ == "__main__":
    demo.launch()