Spaces:
Sleeping
Sleeping
| """ | |
| Clean Pet Backend for Hugging Face Spaces | |
| Using ConvNextV2-large-DogBreed model | |
| """ | |
| from flask import Flask, request, jsonify | |
| from flask_cors import CORS | |
| from PIL import Image | |
| import io | |
| import os | |
| # Set cache directories FIRST | |
| os.environ['HF_HOME'] = '/tmp/.cache' | |
| os.environ['TRANSFORMERS_CACHE'] = '/tmp/.cache' | |
| os.environ['TORCH_HOME'] = '/tmp/.cache' | |
| app = Flask(__name__) | |
| CORS(app) | |
| # Global model variables | |
| model = None | |
| processor = None | |
| def load_model(): | |
| """Load model on first request""" | |
| global model, processor | |
| if model is not None: | |
| return | |
| print("🔄 Loading ConvNextV2-large-DogBreed model...") | |
| from transformers import AutoImageProcessor, AutoModelForImageClassification | |
| import torch | |
| model_name = "Pavarissy/ConvNextV2-large-DogBreed" | |
| processor = AutoImageProcessor.from_pretrained(model_name) | |
| model = AutoModelForImageClassification.from_pretrained(model_name) | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model = model.to(device) | |
| model.eval() | |
| print(f"✅ Model loaded on {device}") | |
| def health(): | |
| return jsonify({ | |
| 'status': 'healthy', | |
| 'model': 'ConvNextV2-large-DogBreed', | |
| 'accuracy': '91.39%' | |
| }) | |
| def predict_pet(): | |
| try: | |
| # Load model if needed | |
| load_model() | |
| # Get image | |
| if 'image' not in request.files: | |
| return jsonify({'error': 'No image provided'}), 400 | |
| file = request.files['image'] | |
| image = Image.open(io.BytesIO(file.read())).convert('RGB') | |
| # Process and predict | |
| import torch | |
| inputs = processor(image, return_tensors="pt") | |
| device = next(model.parameters()).device | |
| inputs = {k: v.to(device) for k, v in inputs.items()} | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| # Get top prediction | |
| probs = torch.nn.functional.softmax(logits, dim=-1)[0] | |
| top_prob, top_idx = torch.max(probs, dim=0) | |
| predicted_breed = model.config.id2label[top_idx.item()] | |
| confidence = float(top_prob.item()) | |
| print(f"✅ Predicted: {predicted_breed} ({confidence:.2%})") | |
| # Return in format expected by frontend | |
| return jsonify({ | |
| 'predicted_label': predicted_breed, | |
| 'breed': predicted_breed, | |
| 'confidence': confidence, | |
| 'detection': {'box': {'x': 0, 'y': 0, 'width': 0, 'height': 0}}, | |
| 'gender': 'Unknown' | |
| }) | |
| except Exception as e: | |
| print(f"❌ Error: {str(e)}") | |
| import traceback | |
| traceback.print_exc() | |
| return jsonify({'error': str(e)}), 500 | |
| if __name__ == '__main__': | |
| port = int(os.environ.get('PORT', 7860)) | |
| print(f"🚀 Starting server on port {port}") | |
| app.run(host='0.0.0.0', port=port, debug=False) | |