import os from flask import Flask, request, jsonify from transformers import AutoModelForImageClassification, AutoProcessor import torch from PIL import Image import io # ✅ Set a writable cache directory inside /tmp os.environ["HF_HOME"] = "/tmp/huggingface" app = Flask(__name__) # ✅ Load the model with the correct cache directory model = AutoModelForImageClassification.from_pretrained( "shahad-alh/arabichar-finetuned-v2", trust_remote_code=True, cache_dir=os.environ["HF_HOME"] # ✅ Fix: Store model inside /tmp ) processor = AutoProcessor.from_pretrained( "shahad-alh/arabichar-finetuned-v2", cache_dir=os.environ["HF_HOME"] # ✅ Fix: Store processor inside /tmp ) @app.route('/classify', methods=['POST']) def classify(): if 'file' not in request.files: return jsonify({"error": "No file uploaded"}) file = request.files['file'] image = Image.open(file.stream) inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) predicted_class = torch.argmax(outputs.logits, dim=-1).item() return jsonify({"prediction": predicted_class}) if __name__ == '__main__': app.run(host="0.0.0.0", port=7860)