Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,7 +10,7 @@ import re
|
|
| 10 |
import os
|
| 11 |
import gc
|
| 12 |
import threading
|
| 13 |
-
import
|
| 14 |
|
| 15 |
logging.basicConfig(level=logging.INFO)
|
| 16 |
logger = logging.getLogger(__name__)
|
|
@@ -26,36 +26,39 @@ app.add_middleware(
|
|
| 26 |
allow_headers=["*"],
|
| 27 |
)
|
| 28 |
|
| 29 |
-
# 🔥 نظام إدارة النماذج الذكي -
|
| 30 |
class SmartModelManager:
|
| 31 |
def __init__(self):
|
| 32 |
self.loaded_models = {}
|
| 33 |
self.model_lock = threading.Lock()
|
| 34 |
|
| 35 |
-
# نماذج Hugging Face صغيرة الحجم
|
| 36 |
self.models_config = {
|
| 37 |
"arabic_general": {
|
| 38 |
-
"name": "microsoft/DialoGPT-small",
|
| 39 |
"type": "text-generation",
|
| 40 |
"description": "النموذج للفهم العام والردود",
|
| 41 |
-
"max_length":
|
|
|
|
| 42 |
},
|
| 43 |
"coding_expert": {
|
| 44 |
-
"name": "microsoft/DialoGPT-small",
|
| 45 |
-
"type": "text-generation",
|
| 46 |
"description": "نموذج البرمجة والأمن السيبراني",
|
| 47 |
-
"max_length":
|
|
|
|
| 48 |
},
|
| 49 |
"cyber_security": {
|
| 50 |
-
"name": "google/flan-t5-small",
|
| 51 |
"type": "text2text-generation",
|
| 52 |
"description": "نموذج الأمن السيبراني المتخصص",
|
| 53 |
-
"max_length":
|
|
|
|
| 54 |
}
|
| 55 |
}
|
| 56 |
|
| 57 |
def load_model(self, model_type: str):
|
| 58 |
-
"""تحميل نموذج عند الحاجة فقط"""
|
| 59 |
with self.model_lock:
|
| 60 |
if model_type in self.loaded_models:
|
| 61 |
return self.loaded_models[model_type]
|
|
@@ -66,32 +69,49 @@ class SmartModelManager:
|
|
| 66 |
model_config = self.models_config[model_type]
|
| 67 |
|
| 68 |
if model_config["type"] == "text-generation":
|
| 69 |
-
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
|
|
|
|
| 72 |
"text-generation",
|
| 73 |
-
model=
|
|
|
|
| 74 |
device=-1, # استخدام CPU
|
| 75 |
max_length=model_config["max_length"],
|
| 76 |
do_sample=True,
|
| 77 |
temperature=0.7,
|
| 78 |
-
|
| 79 |
)
|
| 80 |
|
| 81 |
elif model_config["type"] == "text2text-generation":
|
| 82 |
-
from transformers import pipeline
|
| 83 |
|
| 84 |
-
model =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
"text2text-generation",
|
| 86 |
-
model=
|
|
|
|
| 87 |
max_length=model_config["max_length"],
|
| 88 |
-
device=-1
|
| 89 |
-
torch_dtype="auto"
|
| 90 |
)
|
| 91 |
|
| 92 |
-
self.loaded_models[model_type] =
|
| 93 |
logger.info(f"✅ تم تحميل النموذج {model_type} بنجاح")
|
| 94 |
-
return
|
| 95 |
|
| 96 |
except Exception as e:
|
| 97 |
logger.error(f"❌ فشل تحميل النموذج {model_type}: {e}")
|
|
@@ -103,6 +123,8 @@ class SmartModelManager:
|
|
| 103 |
if model_type in self.loaded_models:
|
| 104 |
del self.loaded_models[model_type]
|
| 105 |
gc.collect()
|
|
|
|
|
|
|
| 106 |
logger.info(f"🗑️ تم إلغاء تحميل النموذج {model_type}")
|
| 107 |
|
| 108 |
def unload_all_models(self):
|
|
@@ -217,55 +239,68 @@ def generate_with_model(model_type: str, prompt: str) -> str:
|
|
| 217 |
# تحميل النموذج
|
| 218 |
model = model_manager.load_model(model_type)
|
| 219 |
|
| 220 |
-
# توليد النص
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
|
| 232 |
except Exception as e:
|
| 233 |
logger.error(f"❌ خطأ في توليد النص: {e}")
|
| 234 |
raise Exception(f"فشل في معالجة الطلب: {e}")
|
| 235 |
finally:
|
| 236 |
# 🔥 إلغاء تحميل النموذج فوراً بعد الاستخدام
|
| 237 |
-
|
| 238 |
-
model_manager.unload_model(model_type)
|
| 239 |
|
| 240 |
def build_smart_prompt(user_message: str, model_type: str) -> str:
|
| 241 |
"""بناء prompt ذكي حسب نوع النموذج"""
|
| 242 |
|
| 243 |
if model_type == "arabic_general":
|
| 244 |
prompt = f"""
|
| 245 |
-
أنت مساعد ذكي
|
| 246 |
|
| 247 |
-
|
| 248 |
|
| 249 |
-
الرد:
|
| 250 |
"""
|
| 251 |
|
| 252 |
elif model_type == "coding_expert":
|
| 253 |
prompt = f"""
|
| 254 |
-
You are
|
| 255 |
-
Respond in Arabic with technical advice:
|
| 256 |
|
| 257 |
-
User
|
| 258 |
|
| 259 |
-
|
| 260 |
"""
|
| 261 |
|
| 262 |
else: # cyber_security
|
| 263 |
prompt = f"""
|
| 264 |
-
You are a cybersecurity expert. Respond in Arabic with professional security advice
|
| 265 |
|
| 266 |
-
|
| 267 |
|
| 268 |
-
|
| 269 |
"""
|
| 270 |
|
| 271 |
return prompt
|
|
@@ -276,12 +311,15 @@ def get_intelligent_response(user_message: str, client_id: str) -> Dict[str, Any
|
|
| 276 |
try:
|
| 277 |
# 🔥 التحليل المتقدم للطلب
|
| 278 |
analysis = thinker.analyze_request(user_message)
|
|
|
|
| 279 |
|
| 280 |
# 🔥 بناء prompt ذكي
|
| 281 |
prompt = build_smart_prompt(user_message, analysis["required_model"])
|
|
|
|
| 282 |
|
| 283 |
# 🔥 الحصول على الرد من النموذج المناسب
|
| 284 |
ai_response = generate_with_model(analysis["required_model"], prompt)
|
|
|
|
| 285 |
|
| 286 |
return {
|
| 287 |
"response": ai_response,
|
|
@@ -308,6 +346,7 @@ async def chat_with_ai(request: ChatRequest):
|
|
| 308 |
actions=ai_result["actions"]
|
| 309 |
)
|
| 310 |
|
|
|
|
| 311 |
return response
|
| 312 |
|
| 313 |
except Exception as e:
|
|
|
|
| 10 |
import os
|
| 11 |
import gc
|
| 12 |
import threading
|
| 13 |
+
import torch # إضافة torch لمعالجة أنواع البيانات
|
| 14 |
|
| 15 |
logging.basicConfig(level=logging.INFO)
|
| 16 |
logger = logging.getLogger(__name__)
|
|
|
|
| 26 |
allow_headers=["*"],
|
| 27 |
)
|
| 28 |
|
| 29 |
+
# 🔥 نظام إدارة النماذج الذكي - إصلاح مشكلة Half precision
|
| 30 |
class SmartModelManager:
|
| 31 |
def __init__(self):
|
| 32 |
self.loaded_models = {}
|
| 33 |
self.model_lock = threading.Lock()
|
| 34 |
|
| 35 |
+
# نماذج Hugging Face صغيرة الحجم مع إعدادات متوافقة مع CPU
|
| 36 |
self.models_config = {
|
| 37 |
"arabic_general": {
|
| 38 |
+
"name": "microsoft/DialoGPT-small",
|
| 39 |
"type": "text-generation",
|
| 40 |
"description": "النموذج للفهم العام والردود",
|
| 41 |
+
"max_length": 300,
|
| 42 |
+
"torch_dtype": torch.float32 # استخدام float32 بدلاً من auto
|
| 43 |
},
|
| 44 |
"coding_expert": {
|
| 45 |
+
"name": "microsoft/DialoGPT-small",
|
| 46 |
+
"type": "text-generation",
|
| 47 |
"description": "نموذج البرمجة والأمن السيبراني",
|
| 48 |
+
"max_length": 300,
|
| 49 |
+
"torch_dtype": torch.float32
|
| 50 |
},
|
| 51 |
"cyber_security": {
|
| 52 |
+
"name": "google/flan-t5-small",
|
| 53 |
"type": "text2text-generation",
|
| 54 |
"description": "نموذج الأمن السيبراني المتخصص",
|
| 55 |
+
"max_length": 400,
|
| 56 |
+
"torch_dtype": torch.float32
|
| 57 |
}
|
| 58 |
}
|
| 59 |
|
| 60 |
def load_model(self, model_type: str):
|
| 61 |
+
"""تحميل نموذج عند الحاجة فقط مع إصلاح مشكلة Half precision"""
|
| 62 |
with self.model_lock:
|
| 63 |
if model_type in self.loaded_models:
|
| 64 |
return self.loaded_models[model_type]
|
|
|
|
| 69 |
model_config = self.models_config[model_type]
|
| 70 |
|
| 71 |
if model_config["type"] == "text-generation":
|
| 72 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 73 |
+
|
| 74 |
+
# تحميل المكونات يدوياً للتحكم في إعدادات dtype
|
| 75 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 76 |
+
model_config["name"],
|
| 77 |
+
torch_dtype=model_config["torch_dtype"],
|
| 78 |
+
low_cpu_mem_usage=True
|
| 79 |
+
)
|
| 80 |
+
tokenizer = AutoTokenizer.from_pretrained(model_config["name"])
|
| 81 |
|
| 82 |
+
# إنشاء pipeline مع الإعدادات المحسنة
|
| 83 |
+
model_pipeline = pipeline(
|
| 84 |
"text-generation",
|
| 85 |
+
model=model,
|
| 86 |
+
tokenizer=tokenizer,
|
| 87 |
device=-1, # استخدام CPU
|
| 88 |
max_length=model_config["max_length"],
|
| 89 |
do_sample=True,
|
| 90 |
temperature=0.7,
|
| 91 |
+
pad_token_id=tokenizer.eos_token_id # إضافة هذا للإصلاح
|
| 92 |
)
|
| 93 |
|
| 94 |
elif model_config["type"] == "text2text-generation":
|
| 95 |
+
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
| 96 |
|
| 97 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(
|
| 98 |
+
model_config["name"],
|
| 99 |
+
torch_dtype=model_config["torch_dtype"],
|
| 100 |
+
low_cpu_mem_usage=True
|
| 101 |
+
)
|
| 102 |
+
tokenizer = AutoTokenizer.from_pretrained(model_config["name"])
|
| 103 |
+
|
| 104 |
+
model_pipeline = pipeline(
|
| 105 |
"text2text-generation",
|
| 106 |
+
model=model,
|
| 107 |
+
tokenizer=tokenizer,
|
| 108 |
max_length=model_config["max_length"],
|
| 109 |
+
device=-1
|
|
|
|
| 110 |
)
|
| 111 |
|
| 112 |
+
self.loaded_models[model_type] = model_pipeline
|
| 113 |
logger.info(f"✅ تم تحميل النموذج {model_type} بنجاح")
|
| 114 |
+
return model_pipeline
|
| 115 |
|
| 116 |
except Exception as e:
|
| 117 |
logger.error(f"❌ فشل تحميل النموذج {model_type}: {e}")
|
|
|
|
| 123 |
if model_type in self.loaded_models:
|
| 124 |
del self.loaded_models[model_type]
|
| 125 |
gc.collect()
|
| 126 |
+
if torch.cuda.is_available():
|
| 127 |
+
torch.cuda.empty_cache()
|
| 128 |
logger.info(f"🗑️ تم إلغاء تحميل النموذج {model_type}")
|
| 129 |
|
| 130 |
def unload_all_models(self):
|
|
|
|
| 239 |
# تحميل النموذج
|
| 240 |
model = model_manager.load_model(model_type)
|
| 241 |
|
| 242 |
+
# توليد النص مع معالجة الأخطاء المحسنة
|
| 243 |
+
try:
|
| 244 |
+
if model_type in ["arabic_general", "coding_expert"]:
|
| 245 |
+
result = model(
|
| 246 |
+
prompt,
|
| 247 |
+
max_length=300,
|
| 248 |
+
temperature=0.7,
|
| 249 |
+
do_sample=True,
|
| 250 |
+
num_return_sequences=1,
|
| 251 |
+
pad_token_id=model.tokenizer.eos_token_id
|
| 252 |
+
)
|
| 253 |
+
response = result[0]['generated_text']
|
| 254 |
+
else: # cyber_security
|
| 255 |
+
result = model(prompt, max_length=400)
|
| 256 |
+
response = result[0]['generated_text']
|
| 257 |
+
|
| 258 |
+
# تنظيف الاستجابة
|
| 259 |
+
response = response.replace(prompt, "").strip()
|
| 260 |
+
if not response:
|
| 261 |
+
response = "أفهم طلبك، وأحتاج إلى مزيد من التفاصيل لتقديم المساعدة المثلى."
|
| 262 |
+
|
| 263 |
+
return response
|
| 264 |
+
|
| 265 |
+
except Exception as generation_error:
|
| 266 |
+
logger.error(f"❌ خطأ في توليد النص للنموذج {model_type}: {generation_error}")
|
| 267 |
+
raise Exception(f"فشل في توليد الرد: {generation_error}")
|
| 268 |
|
| 269 |
except Exception as e:
|
| 270 |
logger.error(f"❌ خطأ في توليد النص: {e}")
|
| 271 |
raise Exception(f"فشل في معالجة الطلب: {e}")
|
| 272 |
finally:
|
| 273 |
# 🔥 إلغاء تحميل النموذج فوراً بعد الاستخدام
|
| 274 |
+
model_manager.unload_model(model_type)
|
|
|
|
| 275 |
|
| 276 |
def build_smart_prompt(user_message: str, model_type: str) -> str:
|
| 277 |
"""بناء prompt ذكي حسب نوع النموذج"""
|
| 278 |
|
| 279 |
if model_type == "arabic_general":
|
| 280 |
prompt = f"""
|
| 281 |
+
أنت مساعد ذكي يتحدث العربية. قم بالرد بطريقة مفيدة واحترافية.
|
| 282 |
|
| 283 |
+
السؤال: {user_message}
|
| 284 |
|
| 285 |
+
الرد بالعربية:
|
| 286 |
"""
|
| 287 |
|
| 288 |
elif model_type == "coding_expert":
|
| 289 |
prompt = f"""
|
| 290 |
+
You are a technical AI assistant. Respond in Arabic with helpful programming advice.
|
|
|
|
| 291 |
|
| 292 |
+
User: {user_message}
|
| 293 |
|
| 294 |
+
Arabic response:
|
| 295 |
"""
|
| 296 |
|
| 297 |
else: # cyber_security
|
| 298 |
prompt = f"""
|
| 299 |
+
You are a cybersecurity expert. Respond in Arabic with professional security advice.
|
| 300 |
|
| 301 |
+
Question: {user_message}
|
| 302 |
|
| 303 |
+
Arabic response:
|
| 304 |
"""
|
| 305 |
|
| 306 |
return prompt
|
|
|
|
| 311 |
try:
|
| 312 |
# 🔥 التحليل المتقدم للطلب
|
| 313 |
analysis = thinker.analyze_request(user_message)
|
| 314 |
+
logger.info(f"🔍 تحليل الطلب: {analysis}")
|
| 315 |
|
| 316 |
# 🔥 بناء prompt ذكي
|
| 317 |
prompt = build_smart_prompt(user_message, analysis["required_model"])
|
| 318 |
+
logger.info(f"📝 Prompt مبني للنموذج {analysis['required_model']}")
|
| 319 |
|
| 320 |
# 🔥 الحصول على الرد من النموذج المناسب
|
| 321 |
ai_response = generate_with_model(analysis["required_model"], prompt)
|
| 322 |
+
logger.info(f"✅ تم توليد الرد بنجاح")
|
| 323 |
|
| 324 |
return {
|
| 325 |
"response": ai_response,
|
|
|
|
| 346 |
actions=ai_result["actions"]
|
| 347 |
)
|
| 348 |
|
| 349 |
+
logger.info(f"✅ تم إرسال الرد بنجاح")
|
| 350 |
return response
|
| 351 |
|
| 352 |
except Exception as e:
|