Spaces:
Sleeping
Sleeping
File size: 3,142 Bytes
71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 e62f4a7 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 71cc10e a89e608 888d9dd 71cc10e a89e608 71cc10e a89e608 71cc10e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
"""
Clean Pet Backend for Hugging Face Spaces
Using ConvNextV2-large-DogBreed model
"""
from flask import Flask, request, jsonify
from flask_cors import CORS
from PIL import Image
import io
import os
# Set cache directories FIRST
os.environ['HF_HOME'] = '/tmp/.cache'
os.environ['TRANSFORMERS_CACHE'] = '/tmp/.cache'
os.environ['TORCH_HOME'] = '/tmp/.cache'
app = Flask(__name__)
CORS(app)
# Global model variables
model = None
processor = None
def load_model():
"""Load model on first request"""
global model, processor
if model is not None:
return
print("🔄 Loading ConvNextV2-large-DogBreed model...")
from transformers import AutoImageProcessor, AutoModelForImageClassification
import torch
model_name = "Pavarissy/ConvNextV2-large-DogBreed"
processor = AutoImageProcessor.from_pretrained(model_name)
model = AutoModelForImageClassification.from_pretrained(model_name)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
model.eval()
print(f"✅ Model loaded on {device}")
@app.route('/', methods=['GET'])
def health():
return jsonify({
'status': 'healthy',
'model': 'ConvNextV2-large-DogBreed',
'accuracy': '91.39%'
})
@app.route('/predict_pet', methods=['POST'])
def predict_pet():
try:
# Load model if needed
load_model()
# Get image
if 'image' not in request.files:
return jsonify({'error': 'No image provided'}), 400
file = request.files['image']
image = Image.open(io.BytesIO(file.read())).convert('RGB')
# Process and predict
import torch
inputs = processor(image, return_tensors="pt")
device = next(model.parameters()).device
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
# Get top prediction
probs = torch.nn.functional.softmax(logits, dim=-1)[0]
top_prob, top_idx = torch.max(probs, dim=0)
predicted_breed = model.config.id2label[top_idx.item()]
confidence = float(top_prob.item())
print(f"✅ Predicted: {predicted_breed} ({confidence:.2%})")
# Return in format expected by frontend
return jsonify({
'predicted_label': predicted_breed,
'breed': predicted_breed,
'confidence': confidence,
'detection': {'box': {'x': 0, 'y': 0, 'width': 0, 'height': 0}},
'gender': 'Unknown'
})
except Exception as e:
print(f"❌ Error: {str(e)}")
import traceback
traceback.print_exc()
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
port = int(os.environ.get('PORT', 7860))
print(f"🚀 Starting server on port {port}")
app.run(host='0.0.0.0', port=port, debug=False)
|