AI_Detector / app.py
mahmoudsaber0's picture
Update app.py
31a862d verified
raw
history blame
26.6 kB
import os
import re
import torch
import logging
import gc
import sys
import pwd # Added for monkey patch
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Dict, List, Optional
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from tokenizers.normalizers import Sequence, Replace, Strip
from tokenizers import Regex
from huggingface_hub import hf_hub_download # Added for reliable HF downloads
# =====================================================
# 🛠️ Monkey Patch for Docker/Container UID Issue
# =====================================================
# Fix for 'getpwuid(): uid not found: 1000' in containerized environments
def patched_getpwuid(uid_num):
try:
return original_getpwuid(uid_num)
except KeyError:
if uid_num == os.getuid():
# Create fake user entry
return pwd.struct_pwent(
name='dockeruser',
passwd='x',
uid=uid_num,
gid=os.getgid(),
gecos='Docker User',
dir='/tmp',
shell='/bin/sh'
)
raise
original_getpwuid = pwd.getpwuid
pwd.getpwuid = patched_getpwuid
# Set fallback env vars to avoid user-dependent paths
os.environ.setdefault('HOME', '/tmp')
os.environ.setdefault('USER', 'dockeruser')
# =====================================================
# 🔧 تكوين البيئة والإعدادات
# =====================================================
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# إعدادات الذاكرة والكاش
CACHE_DIR = "/tmp/huggingface_cache"
os.makedirs(CACHE_DIR, exist_ok=True)
# تكوين متغيرات البيئة لـ Hugging Face
os.environ.update({
"HF_HOME": CACHE_DIR,
"TRANSFORMERS_CACHE": CACHE_DIR,
"HF_DATASETS_CACHE": CACHE_DIR,
"HUGGINGFACE_HUB_CACHE": CACHE_DIR,
"TORCH_HOME": CACHE_DIR,
"TOKENIZERS_PARALLELISM": "false", # منع مشاكل threading
"TRANSFORMERS_OFFLINE": "0", # السماح بالتحميل من الإنترنت
})
# إعدادات PyTorch للذاكرة
if torch.cuda.is_available():
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
torch.backends.cudnn.benchmark = True
# =====================================================
# 🚀 تحديد الجهاز (GPU أو CPU)
# =====================================================
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(f"🖥️ Using device: {device}")
if torch.cuda.is_available():
logger.info(f"🎮 CUDA Device: {torch.cuda.get_device_name(0)}")
logger.info(f"💾 CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
# =====================================================
# 📊 خريطة الموديلات
# =====================================================
label_mapping = {
0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
11: 'flan_t5_base', 12: 'flan_t5_large', 13: 'flan_t5_small',
14: 'flan_t5_xl', 15: 'flan_t5_xxl', 16: 'gemma-7b-it', 17: 'gemma2-9b-it',
18: 'gpt-3.5-turbo', 19: 'gpt-35', 20: 'gpt4', 21: 'gpt4o',
22: 'gpt_j', 23: 'gpt_neox', 24: 'human', 25: 'llama3-70b', 26: 'llama3-8b',
27: 'mixtral-8x7b', 28: 'opt_1.3b', 29: 'opt_125m', 30: 'opt_13b',
31: 'opt_2.7b', 32: 'opt_30b', 33: 'opt_350m', 34: 'opt_6.7b',
35: 'opt_iml_30b', 36: 'opt_iml_max_1.3b', 37: 't0_11b', 38: 't0_3b',
39: 'text-davinci-002', 40: 'text-davinci-003'
}
# =====================================================
# 🤖 Model Manager - إدارة الموديلات
# =====================================================
class ModelManager:
def __init__(self):
self.tokenizer = None
self.models = []
self.models_loaded = False
self.model_urls = [
"https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed12",
"https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed22"
]
self.base_model_id = "answerdotai/ModernBERT-base" # Primary
self.fallback_model_id = "bert-base-uncased" # Fallback if ModernBERT fails
self.using_fallback = False
def load_tokenizer(self):
"""تحميل الـ Tokenizer مع fallback"""
try:
logger.info(f"📝 Loading tokenizer from {self.base_model_id}...")
self.tokenizer = AutoTokenizer.from_pretrained(
self.base_model_id,
cache_dir=CACHE_DIR,
use_fast=True,
trust_remote_code=False
)
logger.info("✅ Primary tokenizer loaded successfully")
except Exception as e:
logger.warning(f"⚠️ Failed to load primary tokenizer: {e}")
try:
logger.info(f"🔄 Falling back to {self.fallback_model_id}...")
self.tokenizer = AutoTokenizer.from_pretrained(
self.fallback_model_id,
cache_dir=CACHE_DIR,
use_fast=True,
trust_remote_code=False
)
self.using_fallback = True
logger.info("✅ Fallback tokenizer loaded successfully")
except Exception as fallback_e:
logger.error(f"❌ Failed to load fallback tokenizer: {fallback_e}")
return False
# إعداد معالج النصوص
try:
newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
join_hyphen_break = Replace(Regex(r'(\w+)[--]\s*\n\s*(\w+)'), r"\1\2")
self.tokenizer.backend_tokenizer.normalizer = Sequence([
self.tokenizer.backend_tokenizer.normalizer,
join_hyphen_break,
newline_to_space,
Strip()
])
except Exception as e:
logger.warning(f"⚠️ Could not set custom normalizer: {e}")
return True
def load_single_model(self, model_url=None, model_path=None, model_name="Model"):
"""تحميل موديل واحد مع fallback ومعالجة شاملة للأخطاء"""
base_model = None
try:
logger.info(f"🤖 Loading base {model_name} from {self.base_model_id}...")
# محاولة تحميل الموديل الأساسي الرئيسي
base_model = AutoModelForSequenceClassification.from_pretrained(
self.base_model_id,
num_labels=41,
cache_dir=CACHE_DIR,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
low_cpu_mem_usage=True,
trust_remote_code=False
)
logger.info("✅ Primary base model loaded")
except Exception as e:
logger.warning(f"⚠️ Failed to load primary base model: {e}")
try:
logger.info(f"🔄 Falling back to {self.fallback_model_id}...")
base_model = AutoModelForSequenceClassification.from_pretrained(
self.fallback_model_id,
num_labels=41,
cache_dir=CACHE_DIR,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
low_cpu_mem_usage=True,
trust_remote_code=False
)
self.using_fallback = True
logger.info("✅ Fallback base model loaded (note: weights may not be compatible)")
except Exception as fallback_e:
logger.error(f"❌ Failed to load fallback base model: {fallback_e}")
return None
# محاولة تحميل الأوزان (فقط إذا لم نستخدم fallback، أو إذا كانت متوافقة)
try:
if model_path and os.path.exists(model_path):
logger.info(f"📁 Loading from local file: {model_path}")
state_dict = torch.load(model_path, map_location=device, weights_only=True)
base_model.load_state_dict(state_dict, strict=False)
elif model_url:
# استخدام hf_hub_download بدلاً من torch.hub للـ HF repos
logger.info(f"🌐 Downloading weights from HF repo...")
repo_id = "mihalykiss/modernbert_2"
filename = model_url.split('/')[-1] # Extract filename like "Model_groups_3class_seed12"
pt_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
cache_dir=CACHE_DIR,
local_dir_use_symlinks=False
)
state_dict = torch.load(pt_file, map_location=device, weights_only=True)
# تحميل الأوزان فقط إذا لم نكن في وضع fallback (لأن ModernBERT weights قد لا تتوافق مع BERT القياسي)
if not self.using_fallback:
base_model.load_state_dict(state_dict, strict=False)
logger.info("✅ Weights loaded successfully")
else:
logger.warning("⚠️ Skipping weight load in fallback mode (incompatible architecture)")
else:
logger.info("📊 Using model with random initialization")
except Exception as weight_error:
logger.warning(f"⚠️ Could not load weights: {weight_error}")
logger.info("📊 Continuing with base model (random or pre-trained init)")
# نقل الموديل للجهاز المناسب
model = base_model.to(device)
model.eval()
# تنظيف الذاكرة
if 'state_dict' in locals():
del state_dict
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
logger.info(f"✅ {model_name} loaded successfully (fallback: {self.using_fallback})")
return model
def load_models(self, max_models=2):
"""تحميل الموديلات بحد أقصى للذاكرة"""
if self.models_loaded:
logger.info("✨ Models already loaded")
return True
# تحميل الـ Tokenizer أولاً
if not self.load_tokenizer():
logger.error("❌ Tokenizer load failed - cannot proceed")
return False
# تحميل الموديلات
logger.info(f"🚀 Loading up to {max_models} models...")
# محاولة تحميل الملف المحلي أولاً
local_model_path = "modernbert.bin"
if os.path.exists(local_model_path):
model = self.load_single_model(
model_path=local_model_path,
model_name="Model 1 (Local)"
)
if model is not None:
self.models.append(model)
# تحميل الموديلات من URLs (استخراج filenames)
for i, full_url in enumerate(self.model_urls[:max_models - len(self.models)]):
if len(self.models) >= max_models:
break
# استخدام full_url كما هو، لكن في load_single_model نستخرج filename
model = self.load_single_model(
model_url=full_url,
model_name=f"Model {len(self.models) + 1}"
)
if model is not None:
self.models.append(model)
# التحقق من الذاكرة المتاحة
if torch.cuda.is_available():
mem_allocated = torch.cuda.memory_allocated() / 1024**3
mem_reserved = torch.cuda.memory_reserved() / 1024**3
logger.info(f"💾 GPU Memory: {mem_allocated:.2f}GB allocated, {mem_reserved:.2f}GB reserved")
# إيقاف التحميل إذا كانت الذاكرة ممتلئة
if mem_allocated > 6: # حد أقصى 6GB
logger.warning("⚠️ Memory limit reached, stopping model loading")
break
# التحقق من نجاح التحميل
if len(self.models) > 0:
self.models_loaded = True
logger.info(f"✅ Successfully loaded {len(self.models)} models (using fallback: {self.using_fallback})")
return True
else:
logger.error("❌ No models could be loaded")
return False
def classify_text(self, text: str) -> Dict:
"""تحليل النص باستخدام الموديلات المحملة"""
if not self.models_loaded or len(self.models) == 0:
raise ValueError("No models loaded")
# تنظيف النص
cleaned_text = clean_text(text)
if not cleaned_text.strip():
raise ValueError("Empty text after cleaning")
# Tokenization (max_length adjusted for fallback BERT if needed)
max_len = 512 if not self.using_fallback else 512 # BERT max is 512
try:
inputs = self.tokenizer(
cleaned_text,
return_tensors="pt",
truncation=True,
max_length=max_len,
padding=True
).to(device)
except Exception as e:
logger.error(f"Tokenization error: {e}")
raise ValueError(f"Failed to tokenize text: {e}")
# الحصول على التنبؤات
all_probabilities = []
with torch.no_grad():
for i, model in enumerate(self.models):
try:
logits = model(**inputs).logits
probs = torch.softmax(logits, dim=1)
all_probabilities.append(probs)
except Exception as e:
logger.warning(f"Model {i+1} prediction failed: {e}")
continue
if not all_probabilities:
raise ValueError("All models failed to make predictions")
# حساب المتوسط (Soft Voting)
averaged_probs = torch.mean(torch.stack(all_probabilities), dim=0)
probabilities = averaged_probs[0]
# حساب نسب Human vs AI
human_prob = probabilities[24].item()
ai_probs = probabilities.clone()
ai_probs[24] = 0 # إزالة احتمالية Human
ai_total_prob = ai_probs.sum().item()
# التطبيع
total = human_prob + ai_total_prob
if total > 0:
human_percentage = (human_prob / total) * 100
ai_percentage = (ai_total_prob / total) * 100
else:
human_percentage = 50
ai_percentage = 50
# تحديد الموديل الأكثر احتمالاً
ai_model_idx = torch.argmax(ai_probs).item()
predicted_model = label_mapping.get(ai_model_idx, "Unknown")
# أعلى 5 تنبؤات
top_5_probs, top_5_indices = torch.topk(probabilities, 5)
top_5_results = []
for prob, idx in zip(top_5_probs, top_5_indices):
top_5_results.append({
"model": label_mapping.get(idx.item(), "Unknown"),
"probability": round(prob.item() * 100, 2)
})
return {
"human_percentage": round(human_percentage, 2),
"ai_percentage": round(ai_percentage, 2),
"predicted_model": predicted_model,
"top_5_predictions": top_5_results,
"is_human": human_percentage > ai_percentage,
"models_used": len(all_probabilities),
"using_fallback": self.using_fallback
}
# =====================================================
# 🧹 دوال التنظيف والمعالجة
# =====================================================
def clean_text(text: str) -> str:
"""تنظيف النص من المسافات الزائدة"""
text = re.sub(r'\s{2,}', ' ', text)
text = re.sub(r'\s+([,.;:?!])', r'\1', text)
return text.strip()
def split_into_paragraphs(text: str) -> List[str]:
"""تقسيم النص إلى فقرات"""
paragraphs = re.split(r'\n\s*\n', text.strip())
return [p.strip() for p in paragraphs if p.strip()]
# =====================================================
# 🌐 FastAPI Application
# =====================================================
app = FastAPI(
title="ModernBERT AI Text Detector",
description="كشف النصوص المكتوبة بواسطة الذكاء الاصطناعي",
version="2.2.0" # Updated version with UID fix
)
# إضافة CORS للسماح بالاستخدام من المتصفح
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# إنشاء مدير الموديلات
model_manager = ModelManager()
# =====================================================
# 📝 نماذج البيانات (Pydantic Models)
# =====================================================
class TextInput(BaseModel):
text: str
analyze_paragraphs: Optional[bool] = False
class SimpleTextInput(BaseModel):
text: str
class DetectionResult(BaseModel):
success: bool
code: int
message: str
data: Dict
# =====================================================
# 🎯 API Endpoints
# =====================================================
@app.on_event("startup")
async def startup_event():
"""تحميل الموديلات عند بداية التشغيل"""
logger.info("=" * 50)
logger.info("🚀 Starting ModernBERT AI Detector...")
logger.info(f"🐍 Python version: {sys.version}")
logger.info(f"🔥 PyTorch version: {torch.__version__}")
import transformers
logger.info(f"🔧 Transformers version: {transformers.__version__}")
logger.info("🛡️ UID Monkey Patch Applied (for Docker/Container)")
logger.info("=" * 50)
# محاولة تحميل الموديلات
max_models = int(os.environ.get("MAX_MODELS", "2"))
success = model_manager.load_models(max_models=max_models)
if success:
logger.info("✅ Application ready! (Fallback mode: %s)", model_manager.using_fallback)
else:
logger.error("⚠️ Failed to load models - API will return errors")
logger.info("💡 Tip: Ensure 'transformers>=4.45.0' and 'huggingface_hub' are installed. Run: pip install --upgrade transformers huggingface_hub")
@app.get("/")
async def root():
"""الصفحة الرئيسية"""
return {
"message": "ModernBERT AI Text Detector API",
"status": "online" if model_manager.models_loaded else "initializing",
"models_loaded": len(model_manager.models),
"using_fallback": model_manager.using_fallback,
"device": str(device),
"endpoints": {
"analyze": "/analyze",
"simple": "/analyze-simple",
"health": "/health",
"docs": "/docs"
}
}
@app.get("/health")
async def health_check():
"""فحص صحة الخدمة"""
memory_info = {}
if torch.cuda.is_available():
memory_info = {
"gpu_allocated_gb": round(torch.cuda.memory_allocated() / 1024**3, 2),
"gpu_reserved_gb": round(torch.cuda.memory_reserved() / 1024**3, 2)
}
return {
"status": "healthy" if model_manager.models_loaded else "unhealthy",
"models_loaded": len(model_manager.models),
"using_fallback": model_manager.using_fallback,
"device": str(device),
"cuda_available": torch.cuda.is_available(),
"memory_info": memory_info
}
@app.post("/analyze", response_model=DetectionResult)
async def analyze_text(data: TextInput):
"""
تحليل النص للكشف عن AI
يحاكي نفس وظيفة Gradio classify_text
"""
try:
# التحقق من النص
text = data.text.strip()
if not text:
return DetectionResult(
success=False,
code=400,
message="Empty input text",
data={}
)
# التأكد من تحميل الموديلات
if not model_manager.models_loaded:
# محاولة تحميل الموديلات
if not model_manager.load_models():
return DetectionResult(
success=False,
code=503,
message="Models not available. Check logs for details.",
data={}
)
# حساب عدد الكلمات
total_words = len(text.split())
# التحليل الأساسي
result = model_manager.classify_text(text)
# النتائج الأساسية
ai_percentage = result["ai_percentage"]
human_percentage = result["human_percentage"]
ai_words = int(total_words * (ai_percentage / 100))
# تحليل الفقرات إذا طُلب ذلك
paragraphs_analysis = []
if data.analyze_paragraphs and ai_percentage > 50:
paragraphs = split_into_paragraphs(text)
recalc_ai_words = 0
recalc_total_words = 0
for para in paragraphs[:10]: # حد أقصى 10 فقرات
if para.strip():
try:
para_result = model_manager.classify_text(para)
para_words = len(para.split())
recalc_total_words += para_words
recalc_ai_words += para_words * (para_result["ai_percentage"] / 100)
paragraphs_analysis.append({
"paragraph": para[:200] + "..." if len(para) > 200 else para,
"ai_generated_score": para_result["ai_percentage"] / 100,
"human_written_score": para_result["human_percentage"] / 100,
"predicted_model": para_result["predicted_model"]
})
except Exception as e:
logger.warning(f"Failed to analyze paragraph: {e}")
# إعادة حساب النسب بناءً على الفقرات
if recalc_total_words > 0:
ai_percentage = round((recalc_ai_words / recalc_total_words) * 100, 2)
human_percentage = round(100 - ai_percentage, 2)
ai_words = int(recalc_ai_words)
# إنشاء رسالة التغذية الراجعة
if ai_percentage > 50:
feedback = "Most of Your Text is AI/GPT Generated"
else:
feedback = "Most of Your Text Appears Human-Written"
# إرجاع النتائج بنفس تنسيق الكود الأصلي
return DetectionResult(
success=True,
code=200,
message="analysis completed",
data={
"fakePercentage": ai_percentage,
"isHuman": human_percentage,
"textWords": total_words,
"aiWords": ai_words,
"paragraphs": paragraphs_analysis,
"predicted_model": result["predicted_model"],
"feedback": feedback,
"input_text": text[:500] + "..." if len(text) > 500 else text,
"detected_language": "en",
"top_5_predictions": result.get("top_5_predictions", []),
"models_used": result.get("models_used", 1),
"using_fallback": result.get("using_fallback", False)
}
)
except Exception as e:
logger.error(f"Analysis error: {e}", exc_info=True)
return DetectionResult(
success=False,
code=500,
message=f"Analysis failed: {str(e)}",
data={}
)
@app.post("/analyze-simple")
async def analyze_simple(data: SimpleTextInput):
"""
تحليل مبسط - يرجع النتائج الأساسية فقط
"""
try:
text = data.text.strip()
if not text:
raise HTTPException(status_code=400, detail="Empty text")
if not model_manager.models_loaded:
if not model_manager.load_models():
raise HTTPException(status_code=503, detail="Models not available")
result = model_manager.classify_text(text)
return {
"is_ai": result["ai_percentage"] > 50,
"ai_score": result["ai_percentage"],
"human_score": result["human_percentage"],
"detected_model": result["predicted_model"] if result["ai_percentage"] > 50 else None,
"confidence": max(result["ai_percentage"], result["human_percentage"]),
"using_fallback": result.get("using_fallback", False)
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Simple analysis error: {e}")
raise HTTPException(status_code=500, detail=str(e))
# =====================================================
# 🏃 تشغيل التطبيق
# =====================================================
if __name__ == "__main__":
import uvicorn
# الحصول على الإعدادات من البيئة
port = int(os.environ.get("PORT", 8000))
host = os.environ.get("HOST", "0.0.0.0")
workers = int(os.environ.get("WORKERS", 1))
logger.info("=" * 50)
logger.info(f"🌐 Starting server on {host}:{port}")
logger.info(f"👷 Workers: {workers}")
logger.info(f"📚 Documentation: http://{host}:{port}/docs")
logger.info("=" * 50)
uvicorn.run(
"main:app", # Assuming this file is named main.py
host=host,
port=port,
workers=workers,
reload=False # Set to True for dev
)