Spaces:
Sleeping
Sleeping
File size: 2,920 Bytes
9e16ada 0049671 9c09d00 0049671 cdde43b 9e16ada 9572d72 9e16ada 0049671 266b6f7 9572d72 266b6f7 9572d72 266b6f7 9572d72 6c90697 9572d72 266b6f7 9c09d00 266b6f7 9572d72 266b6f7 9572d72 8512af3 9572d72 8512af3 9572d72 0049671 266b6f7 cdde43b 97891e0 266b6f7 97891e0 9572d72 266b6f7 97891e0 266b6f7 97891e0 266b6f7 97891e0 266b6f7 0049671 266b6f7 a446f94 266b6f7 73ace6e 6c90697 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import os
import gradio as gr
import torch
from PIL import Image
from torchvision import transforms
from transformers import AutoConfig, AutoModelForImageClassification
from huggingface_hub import snapshot_download
# =========================
# إعداد البيئة لتقليل التحذيرات
# =========================
os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# =========================
# ضبط استخدام CPU
# =========================
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
# =========================
# إعداد المعالجة المسبقة
# =========================
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor()
])
# =========================
# متغيرات النموذج العالمية
# =========================
model = None
config = None
device = torch.device("cpu")
# =========================
# دالة تحميل النموذج عند الطلب
# =========================
def load_model():
global model, config
if model is None or config is None:
print("🚀 تحميل النموذج لأول مرة من Hugging Face Hub...")
model_path = snapshot_download(
repo_id="shahad-alh/arabichar-finetuned-v2",
local_dir="./model"
)
# تحميل الكود المخصص مع trust_remote_code=True
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
model = AutoModelForImageClassification.from_pretrained(
model_path,
config=config,
trust_remote_code=True
)
model.to(device)
model.eval()
print("✅ تم تحميل النموذج بنجاح")
# =========================
# دالة التنبؤ
# =========================
def predict(image: Image.Image):
try:
if image is None:
return "لا توجد صورة"
# تحميل النموذج عند أول طلب
load_model()
tensor = transform(image).unsqueeze(0).to(device)
with torch.no_grad():
outputs = model(tensor)
logits = outputs.logits
predicted = torch.argmax(logits, dim=1).item()
label = config.id2label[predicted]
return label
except Exception as e:
print("❌ Prediction error:", e)
return "خطأ في التنبؤ"
# =========================
# واجهة Gradio
# =========================
gr.Interface(
fn=predict,
inputs=gr.Image(type="pil", label="ارفع صورة حرف عربي"),
outputs=gr.Textbox(label="الحرف المتوقع"),
title="Arabic Character Recognition",
description="نموذج للتعرف على الحروف العربية من الصور",
flagging_mode="never"
).queue().launch() |