File size: 3,927 Bytes
b555b7e
 
a89e608
 
b555b7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a89e608
b555b7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71cc10e
b555b7e
 
 
 
 
 
 
 
 
 
 
 
71cc10e
 
 
b555b7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71cc10e
b555b7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from PIL import Image
import io
import torch
from transformers import AutoImageProcessor, AutoModelForImageClassification
import numpy as np

app = FastAPI()

# CORS ayarları - Tüm kaynaklardan gelen isteklere izin ver
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Model ve processor'ı yükle
MODEL_NAME = "Pavarissy/ConvNextV2-large-DogBreed"
print(f"🔄 Loading model: {MODEL_NAME}")

try:
    processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
    model = AutoModelForImageClassification.from_pretrained(MODEL_NAME)
    model.eval()
    print("✅ Model loaded successfully!")
except Exception as e:
    print(f"❌ Error loading model: {e}")
    raise e

@app.get("/")
async def root():
    """Ana endpoint - API durumunu gösterir"""
    return {
        "message": "Dog Breed Classification API",
        "model": MODEL_NAME,
        "status": "ready",
        "endpoints": {
            "predict": "/predict_pet (POST)",
            "health": "/health (GET)"
        }
    }

@app.post("/predict_pet")
async def predict_pet(image: UploadFile = File(...)):
    """

    Pet (köpek ırkı) tahmini endpoint'i



    Expected response format:

    {

        "predicted_label": "n02085620-Chihuahua",

        "confidence": 0.95,

        "detection": {

            "box": {"x": 50, "y": 50, "width": 400, "height": 400}

        }

    }

    """
    try:
        # Resmi oku ve RGB'ye çevir
        image_bytes = await image.read()
        img = Image.open(io.BytesIO(image_bytes)).convert('RGB')

        # Orijinal görüntü boyutları
        width, height = img.size
        print(f"📸 Image received: {width}x{height}")

        # Model için preprocessing
        inputs = processor(images=img, return_tensors="pt")

        # Tahmin yap
        with torch.no_grad():
            outputs = model(**inputs)
            logits = outputs.logits

        # Softmax ile olasılıkları hesapla
        probabilities = torch.nn.functional.softmax(logits, dim=-1)
        confidence, predicted_idx = torch.max(probabilities, dim=-1)

        # Tahmin edilen sınıfı al
        predicted_label = model.config.id2label[predicted_idx.item()]
        confidence_score = confidence.item()

        # Basit bir detection box oluştur (görüntünün %80'i merkeze yerleştirilmiş)
        box_margin = 0.1
        detection_box = {
            "x": float(width * box_margin),
            "y": float(height * box_margin),
            "width": float(width * (1 - 2 * box_margin)),
            "height": float(height * (1 - 2 * box_margin))
        }

        # Yanıt hazırla (React Native app'in beklediği formatta)
        response = {
            "predicted_label": predicted_label,
            "confidence": float(confidence_score),
            "detection": {
                "box": detection_box
            },
            "imageDimensions": {
                "width": width,
                "height": height
            }
        }

        print(f"✅ Prediction: {predicted_label} (confidence: {confidence_score:.4f})")
        return response

    except Exception as e:
        print(f"❌ Error during prediction: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Prediction error: {str(e)}")

@app.get("/health")
async def health_check():
    """Sağlık kontrolü endpoint'i"""
    return {
        "status": "healthy",
        "model_loaded": model is not None,
        "model_name": MODEL_NAME
    }

# Hugging Face Space'te uvicorn otomatik çalışır
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)