import gradio as gr from PIL import Image import torch import base64 import io from transformers import ( pipeline, AutoModelForImageClassification, AutoImageProcessor ) MODEL_ID = "cagrigungor/fire-prediction" model = AutoModelForImageClassification.from_pretrained(MODEL_ID) processor = AutoImageProcessor.from_pretrained(MODEL_ID) pipe = pipeline( task="image-classification", model=model, image_processor=processor, device=-1 ) # ------------------------- # Inference # ------------------------- def predict_from_image(image): if image is None: return None image = image.convert("RGB") results = pipe(image) return {r["label"]: float(r["score"]) for r in results} # ------------------------- # Base64 (haritadan gelen) # ------------------------- def predict_from_base64(base64_str): if base64_str is None: return None image_bytes = base64.b64decode(base64_str.split(",")[1]) image = Image.open(io.BytesIO(image_bytes)).convert("RGB") return predict_from_image(image) # ------------------------- # UI # ------------------------- with gr.Blocks(title="🔥 Wildfire Detection with Map") as app: gr.Markdown("## 🔥 Wildfire Detection (OSM + Image Upload)") with gr.Tabs(): # ------------------------- # TAB 1: Manual Upload # ------------------------- with gr.Tab("📤 Resim Yükle"): img_input = gr.Image(type="pil", label="Görüntü Yükle") btn1 = gr.Button("Tahmin Et") out1 = gr.Label(num_top_classes=2) btn1.click( fn=predict_from_image, inputs=img_input, outputs=out1 ) # ------------------------- # TAB 2: Map # ------------------------- with gr.Tab("🗺️ Harita (OSM)"): gr.HTML(""" """) base64_input = gr.Textbox(visible=False) btn2 = gr.Button("Haritadan Tahmin Et") out2 = gr.Label(num_top_classes=2) btn2.click( fn=predict_from_base64, inputs=base64_input, outputs=out2 ) app.launch()