ibrahimlasfar commited on
Commit
abbf146
·
verified ·
1 Parent(s): 50c2461

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +178 -73
app.py CHANGED
@@ -8,7 +8,7 @@ from datetime import datetime
8
  from typing import List, Dict, Optional, Union
9
  from concurrent.futures import ThreadPoolExecutor
10
 
11
- from fastapi import FastAPI, HTTPException, Request, UploadFile, File, WebSocket, WebSocketDisconnect
12
  from fastapi.responses import StreamingResponse, HTMLResponse, FileResponse, JSONResponse
13
  from fastapi.staticfiles import StaticFiles
14
  from fastapi.templating import Jinja2Templates
@@ -29,13 +29,25 @@ from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
29
  from tensorflow.keras.models import load_model
30
  from sklearn.feature_extraction.text import TfidfVectorizer
31
  from sklearn.metrics.pairwise import cosine_similarity
 
32
 
33
  # التهيئة الأساسية
34
  DetectorFactory.seed = 0
35
  logging.basicConfig(level=logging.INFO)
36
  logger = logging.getLogger("MarkAI")
37
 
38
- app = FastAPI(title="MarkAI - الذكاء الاصطناعي المتكامل", version="2.0")
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # إعدادات CORS
41
  app.add_middleware(
@@ -53,8 +65,9 @@ os.makedirs("memory/projects", exist_ok=True)
53
  os.makedirs("memory/code", exist_ok=True)
54
  os.makedirs("memory/backups", exist_ok=True)
55
 
56
- # 1. نماذج اللغات المدعومة (محدثة)
57
  LANGUAGE_MODELS = {
 
58
  "en": "gpt2-medium",
59
  "ar": "arbml/gpt2-arabic-poetry",
60
  "zh": "bert-base-chinese",
@@ -63,7 +76,12 @@ LANGUAGE_MODELS = {
63
  "de": "dbmdz/gpt2-german",
64
  "it": "LorenzoDeMattei/GePpeTto",
65
  "hi": "surajpai/GPT2-Hindi",
66
- "code": "codeparrot/codeparrot-small"
 
 
 
 
 
67
  }
68
 
69
  # 2. نظام الأمان والمفاتيح
@@ -74,7 +92,7 @@ def load_api_keys():
74
  with open("memory/api_keys.json", "r") as f:
75
  return json.load(f)
76
  except:
77
- return {}
78
 
79
  def save_api_keys(keys):
80
  with open("memory/api_keys.json", "w") as f:
@@ -94,22 +112,25 @@ class AIMemory:
94
 
95
  def load_all_data(self):
96
  """تحميل جميع البيانات من الملفات"""
97
- # تحميل المحادثات
98
- for conv_file in os.listdir("memory/conversations"):
99
- if conv_file.endswith(".json"):
100
- conv_id = conv_file.split(".")[0]
101
- with open(f"memory/conversations/{conv_file}", "r", encoding="utf-8") as f:
102
- self.conversations[conv_id] = json.load(f)
103
-
104
- # تحميل المشاريع
105
- if os.path.exists("memory/projects/projects.json"):
106
- with open("memory/projects/projects.json", "r", encoding="utf-8") as f:
107
- self.projects = json.load(f)
108
-
109
- # تحميل مستودع الأكواد
110
- if os.path.exists("memory/code/code_repository.json"):
111
- with open("memory/code/code_repository.json", "r", encoding="utf-8") as f:
112
- self.code_repository = json.load(f)
 
 
 
113
 
114
  def create_conversation(self, initial_prompt: str) -> str:
115
  """إنشاء محادثة جديدة مع تسمية تلقائية"""
@@ -133,8 +154,11 @@ class AIMemory:
133
  def save_conversation(self, conv_id: str):
134
  """حفظ محادثة معينة"""
135
  if conv_id in self.conversations:
136
- with open(f"memory/conversations/{conv_id}.json", "w", encoding="utf-8") as f:
137
- json.dump(self.conversations[conv_id], f, ensure_ascii=False, indent=2)
 
 
 
138
 
139
  def add_message(self, conv_id: str, role: str, content: str, metadata: dict = {}):
140
  """إضافة رسالة إلى المحادثة"""
@@ -181,8 +205,11 @@ class AIMemory:
181
 
182
  def save_projects(self):
183
  """حفظ جميع المشاريع"""
184
- with open("memory/projects/projects.json", "w", encoding="utf-8") as f:
185
- json.dump(self.projects, f, ensure_ascii=False, indent=2)
 
 
 
186
 
187
  def save_code_snippet(self, code: str, language: str, purpose: str, metadata: dict = {}):
188
  """حفظ جزء من الكود في المستودع"""
@@ -204,8 +231,11 @@ class AIMemory:
204
 
205
  def save_code_repository(self):
206
  """حفظ مستودع الأكواد"""
207
- with open("memory/code/code_repository.json", "w", encoding="utf-8") as f:
208
- json.dump(self.code_repository, f, ensure_ascii=False, indent=2)
 
 
 
209
 
210
  def backup_data(self):
211
  """إنشاء نسخة احتياطية لجميع البيانات"""
@@ -213,21 +243,25 @@ class AIMemory:
213
  backup_dir = f"memory/backups/{timestamp}"
214
  os.makedirs(backup_dir, exist_ok=True)
215
 
216
- # نسخ المحادثات
217
- os.makedirs(f"{backup_dir}/conversations", exist_ok=True)
218
- for conv_id, conv_data in self.conversations.items():
219
- with open(f"{backup_dir}/conversations/{conv_id}.json", "w", encoding="utf-8") as f:
220
- json.dump(conv_data, f, ensure_ascii=False, indent=2)
221
-
222
- # نسخ المشاريع
223
- with open(f"{backup_dir}/projects.json", "w", encoding="utf-8") as f:
224
- json.dump(self.projects, f, ensure_ascii=False, indent=2)
225
-
226
- # نسخ الأكواد
227
- with open(f"{backup_dir}/code_repository.json", "w", encoding="utf-8") as f:
228
- json.dump(self.code_repository, f, ensure_ascii=False, indent=2)
229
-
230
- return backup_dir
 
 
 
 
231
 
232
  memory = AIMemory()
233
 
@@ -247,7 +281,8 @@ class AnalyticsEngine:
247
  "positive": result["label"] == "POSITIVE",
248
  "negative": result["label"] == "NEGATIVE"
249
  }
250
- except:
 
251
  # Fallback basic sentiment analysis
252
  positive_words = ["good", "great", "excellent", "happy", "جيد", "رائع", "ممتاز", "سعيد"]
253
  negative_words = ["bad", "terrible", "awful", "sad", "سيء", "فظيع", "مزعج", "حزين"]
@@ -275,7 +310,8 @@ class AnalyticsEngine:
275
  try:
276
  vectors = self.tfidf.fit_transform([prompt, response])
277
  relevance_score = cosine_similarity(vectors[0:1], vectors[1:2])[0][0]
278
- except:
 
279
  relevance_score = 0.7 # قيمة افتراضية في حالة الخطأ
280
 
281
  # تحليل المشاعر
@@ -297,16 +333,33 @@ class AIEngine:
297
  self.executor = ThreadPoolExecutor(max_workers=8)
298
  self.models = {}
299
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
300
 
301
- async def load_model(self, model_type: str, model_name: str = None):
302
  """تحميل نموذج معين"""
303
- if model_type not in self.models:
 
 
304
  try:
305
  if model_type == "text":
306
- model_name = model_name or LANGUAGE_MODELS.get("en")
 
 
 
 
 
 
 
 
 
 
 
307
  tokenizer = AutoTokenizer.from_pretrained(model_name)
308
- model = AutoModelForCausalLM.from_pretrained(model_name).to(self.device)
309
- self.models[model_type] = {"tokenizer": tokenizer, "model": model}
310
 
311
  elif model_type == "image":
312
  scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler")
@@ -315,36 +368,50 @@ class AIEngine:
315
  scheduler=scheduler,
316
  torch_dtype=torch.float16
317
  ).to(self.device)
318
- self.models[model_type] = model
319
 
320
  elif model_type == "code":
321
- tokenizer = AutoTokenizer.from_pretrained(LANGUAGE_MODELS["code"])
322
- model = AutoModelForCausalLM.from_pretrained(LANGUAGE_MODELS["code"]).to(self.device)
323
- self.models[model_type] = {"tokenizer": tokenizer, "model": model}
 
 
 
 
 
 
 
 
 
324
 
325
- logger.info(f"تم تحميل النموذج بنجاح: {model_type}")
326
  except Exception as e:
327
  logger.error(f"خطأ في تحميل النموذج: {str(e)}")
328
  raise
329
 
330
- return self.models[model_type]
331
 
332
- async def generate_text(self, prompt: str, lang: str = None, max_length: int = 300) -> str:
333
  """توليد نص بناء على المطالبة"""
334
  if not lang:
335
- lang = detect(prompt)
 
 
 
 
 
 
336
 
337
- model_name = LANGUAGE_MODELS.get(lang, LANGUAGE_MODELS["en"])
338
- model = await self.load_model("text", model_name)
339
 
340
  inputs = model["tokenizer"](prompt, return_tensors="pt").to(self.device)
341
  outputs = model["model"].generate(**inputs, max_length=max_length, do_sample=True, top_k=50, top_p=0.95)
342
 
343
  return model["tokenizer"].decode(outputs[0], skip_special_tokens=True)
344
 
345
- async def generate_code(self, prompt: str, language: str = "python", max_length: int = 500) -> str:
346
  """توليد كود برمجي"""
347
- model = await self.load_model("code")
348
 
349
  prompt = f"# Language: {language}\n# Description: {prompt}\n# Code:\n"
350
  inputs = model["tokenizer"](prompt, return_tensors="pt").to(self.device)
@@ -357,7 +424,10 @@ class AIEngine:
357
  code=generated_code,
358
  language=language,
359
  purpose=prompt[:100],
360
- metadata={"generated_at": str(datetime.now())}
 
 
 
361
  )
362
 
363
  return generated_code
@@ -378,8 +448,8 @@ class AIEngine:
378
  """توليد فيديو من النص (محاكاة)"""
379
  save_path = f"uploads/generated_video_{int(time.time())}.mp4"
380
 
381
- # إنشاء فيديو مع نص
382
- clip = mp.VideoFileClip("assets/blank_video.mp4").set_duration(duration)
383
  txt_clip = mp.TextClip(prompt, fontsize=24, color='white', size=clip.size).set_position('center').set_duration(duration)
384
  video = mp.CompositeVideoClip([clip, txt_clip])
385
  video.write_videofile(save_path, fps=fps)
@@ -512,7 +582,11 @@ class ThinkingEngine:
512
 
513
  async def generate_plan(self, prompt: str, task_type: str = "text") -> dict:
514
  """إنشاء خطة تنفيذية للمهمة"""
515
- lang = detect(prompt)
 
 
 
 
516
  steps = self.get_thinking_steps(task_type, lang)
517
 
518
  plan = {
@@ -536,10 +610,12 @@ class GenerationRequest(BaseModel):
536
  language: Optional[str] = None
537
  conversation_id: Optional[str] = None
538
  improvements: Optional[List[str]] = None
 
539
 
540
  class ConversationRequest(BaseModel):
541
  initial_prompt: str
542
  project_id: Optional[str] = None
 
543
 
544
  class ProjectRequest(BaseModel):
545
  name: str
@@ -550,6 +626,7 @@ class CodeImprovementRequest(BaseModel):
550
  code: str
551
  language: str
552
  improvements: List[str] = Field(..., example=["add_comments", "optimize", "add_error_handling"])
 
553
 
554
  # 8. نظام إدارة المحادثات عبر WebSocket
555
  class ConnectionManager:
@@ -585,7 +662,11 @@ async def start_conversation(request: ConversationRequest):
585
  conv_id=conv_id,
586
  role="user",
587
  content=request.initial_prompt,
588
- metadata={"type": "text", "project_id": request.project_id}
 
 
 
 
589
  )
590
 
591
  return {"conversation_id": conv_id, "name": memory.conversations[conv_id]["name"]}
@@ -606,7 +687,10 @@ async def websocket_conversation(websocket: WebSocket, conversation_id: str):
606
  conv_id=conversation_id,
607
  role="user",
608
  content=message["content"],
609
- metadata={"type": message.get("content_type", "text")}
 
 
 
610
  )
611
 
612
  # إنشاء خطة للرد
@@ -622,10 +706,19 @@ async def websocket_conversation(websocket: WebSocket, conversation_id: str):
622
  await asyncio.sleep(1)
623
 
624
  # توليد الرد
 
 
625
  if content_type == "text":
626
- response = await engine.generate_text(message["content"])
 
 
 
627
  elif content_type == "code":
628
- response = await engine.generate_code(message["content"], message.get("language", "python"))
 
 
 
 
629
  elif content_type == "image":
630
  image_path = await engine.generate_image(message["content"])
631
  response = f"IMAGE_GENERATED:{image_path}"
@@ -646,7 +739,8 @@ async def websocket_conversation(websocket: WebSocket, conversation_id: str):
646
  metadata={
647
  "type": content_type,
648
  "evaluation": evaluation,
649
- "plan": plan
 
650
  }
651
  )
652
 
@@ -698,7 +792,8 @@ async def improve_code(request: CodeImprovementRequest):
698
  metadata={
699
  "original_code": request.code,
700
  "improvements": request.improvements,
701
- "analyzed_at": str(datetime.now())
 
702
  }
703
  )
704
 
@@ -739,7 +834,8 @@ async def backup_scheduler():
739
  await asyncio.sleep(3600) # كل ساعة
740
  try:
741
  backup_dir = memory.backup_data()
742
- logger.info(f"تم إنشاء نسخة احتياطية في: {backup_dir}")
 
743
  except Exception as e:
744
  logger.error(f"فشل النسخ الاحتياطي: {str(e)}")
745
 
@@ -773,7 +869,16 @@ async def startup_event():
773
 
774
  logger.info("تم بدء تشغيل MarkAI بنجاح")
775
 
776
- # 13. تشغيل التطبيق
 
 
 
 
 
 
 
 
 
777
  if __name__ == "__main__":
778
  import uvicorn
779
  uvicorn.run(app, host="0.0.0.0", port=7860, reload=True)
 
8
  from typing import List, Dict, Optional, Union
9
  from concurrent.futures import ThreadPoolExecutor
10
 
11
+ from fastapi import FastAPI, HTTPException, Request, UploadFile, File, WebSocket, WebSocketDisconnect, Depends
12
  from fastapi.responses import StreamingResponse, HTMLResponse, FileResponse, JSONResponse
13
  from fastapi.staticfiles import StaticFiles
14
  from fastapi.templating import Jinja2Templates
 
29
  from tensorflow.keras.models import load_model
30
  from sklearn.feature_extraction.text import TfidfVectorizer
31
  from sklearn.metrics.pairwise import cosine_similarity
32
+ from transformers import BitsAndBytesConfig
33
 
34
  # التهيئة الأساسية
35
  DetectorFactory.seed = 0
36
  logging.basicConfig(level=logging.INFO)
37
  logger = logging.getLogger("MarkAI")
38
 
39
+ app = FastAPI(
40
+ title="MarkAI - الذكاء الاصطناعي المتكامل",
41
+ version="2.0",
42
+ description="منصة متكاملة للذكاء الاصطناعي تدعم توليد النصوص، الأكواد، الصور والفيديوهات مع نظام ذاكرة متقدم",
43
+ contact={
44
+ "name": "Ibrahim Lasfar",
45
+ "email": "ibrahim@markai.com"
46
+ },
47
+ license_info={
48
+ "name": "MIT License",
49
+ }
50
+ )
51
 
52
  # إعدادات CORS
53
  app.add_middleware(
 
65
  os.makedirs("memory/code", exist_ok=True)
66
  os.makedirs("memory/backups", exist_ok=True)
67
 
68
+ # 1. نماذج اللغات المدعومة (محدثة مع النماذج الكبيرة)
69
  LANGUAGE_MODELS = {
70
+ # النماذج الصغيرة (افتراضية)
71
  "en": "gpt2-medium",
72
  "ar": "arbml/gpt2-arabic-poetry",
73
  "zh": "bert-base-chinese",
 
76
  "de": "dbmdz/gpt2-german",
77
  "it": "LorenzoDeMattei/GePpeTto",
78
  "hi": "surajpai/GPT2-Hindi",
79
+ "code": "codeparrot/codeparrot-small",
80
+
81
+ # النماذج الكبيرة
82
+ "en-large": "EleutherAI/gpt-j-6B",
83
+ "ar-large": "bigscience/bloom-7b1",
84
+ "code-large": "tiiuae/falcon-7b"
85
  }
86
 
87
  # 2. نظام الأمان والمفاتيح
 
92
  with open("memory/api_keys.json", "r") as f:
93
  return json.load(f)
94
  except:
95
+ return {"demo_key": "demo123"} # مفتاح تجريبي افتراضي
96
 
97
  def save_api_keys(keys):
98
  with open("memory/api_keys.json", "w") as f:
 
112
 
113
  def load_all_data(self):
114
  """تحميل جميع البيانات من الملفات"""
115
+ try:
116
+ # تحميل المحادثات
117
+ for conv_file in os.listdir("memory/conversations"):
118
+ if conv_file.endswith(".json"):
119
+ conv_id = conv_file.split(".")[0]
120
+ with open(f"memory/conversations/{conv_file}", "r", encoding="utf-8") as f:
121
+ self.conversations[conv_id] = json.load(f)
122
+
123
+ # تحميل المشاريع
124
+ if os.path.exists("memory/projects/projects.json"):
125
+ with open("memory/projects/projects.json", "r", encoding="utf-8") as f:
126
+ self.projects = json.load(f)
127
+
128
+ # تحميل مستودع الأكواد
129
+ if os.path.exists("memory/code/code_repository.json"):
130
+ with open("memory/code/code_repository.json", "r", encoding="utf-8") as f:
131
+ self.code_repository = json.load(f)
132
+ except Exception as e:
133
+ logger.error(f"Error loading data: {str(e)}")
134
 
135
  def create_conversation(self, initial_prompt: str) -> str:
136
  """إنشاء محادثة جديدة مع تسمية تلقائية"""
 
154
  def save_conversation(self, conv_id: str):
155
  """حفظ محادثة معينة"""
156
  if conv_id in self.conversations:
157
+ try:
158
+ with open(f"memory/conversations/{conv_id}.json", "w", encoding="utf-8") as f:
159
+ json.dump(self.conversations[conv_id], f, ensure_ascii=False, indent=2)
160
+ except Exception as e:
161
+ logger.error(f"Error saving conversation {conv_id}: {str(e)}")
162
 
163
  def add_message(self, conv_id: str, role: str, content: str, metadata: dict = {}):
164
  """إضافة رسالة إلى المحادثة"""
 
205
 
206
  def save_projects(self):
207
  """حفظ جميع المشاريع"""
208
+ try:
209
+ with open("memory/projects/projects.json", "w", encoding="utf-8") as f:
210
+ json.dump(self.projects, f, ensure_ascii=False, indent=2)
211
+ except Exception as e:
212
+ logger.error(f"Error saving projects: {str(e)}")
213
 
214
  def save_code_snippet(self, code: str, language: str, purpose: str, metadata: dict = {}):
215
  """حفظ جزء من الكود في المستودع"""
 
231
 
232
  def save_code_repository(self):
233
  """حفظ مستودع الأكواد"""
234
+ try:
235
+ with open("memory/code/code_repository.json", "w", encoding="utf-8") as f:
236
+ json.dump(self.code_repository, f, ensure_ascii=False, indent=2)
237
+ except Exception as e:
238
+ logger.error(f"Error saving code repository: {str(e)}")
239
 
240
  def backup_data(self):
241
  """إنشاء نسخة احتياطية لجميع البيانات"""
 
243
  backup_dir = f"memory/backups/{timestamp}"
244
  os.makedirs(backup_dir, exist_ok=True)
245
 
246
+ try:
247
+ # نسخ المحادثات
248
+ os.makedirs(f"{backup_dir}/conversations", exist_ok=True)
249
+ for conv_id, conv_data in self.conversations.items():
250
+ with open(f"{backup_dir}/conversations/{conv_id}.json", "w", encoding="utf-8") as f:
251
+ json.dump(conv_data, f, ensure_ascii=False, indent=2)
252
+
253
+ # نسخ المشاريع
254
+ with open(f"{backup_dir}/projects.json", "w", encoding="utf-8") as f:
255
+ json.dump(self.projects, f, ensure_ascii=False, indent=2)
256
+
257
+ # نسخ الأكواد
258
+ with open(f"{backup_dir}/code_repository.json", "w", encoding="utf-8") as f:
259
+ json.dump(self.code_repository, f, ensure_ascii=False, indent=2)
260
+
261
+ return backup_dir
262
+ except Exception as e:
263
+ logger.error(f"Error during backup: {str(e)}")
264
+ return None
265
 
266
  memory = AIMemory()
267
 
 
281
  "positive": result["label"] == "POSITIVE",
282
  "negative": result["label"] == "NEGATIVE"
283
  }
284
+ except Exception as e:
285
+ logger.warning(f"Sentiment analysis failed, using fallback: {str(e)}")
286
  # Fallback basic sentiment analysis
287
  positive_words = ["good", "great", "excellent", "happy", "جيد", "رائع", "ممتاز", "سعيد"]
288
  negative_words = ["bad", "terrible", "awful", "sad", "سيء", "فظيع", "مزعج", "حزين"]
 
310
  try:
311
  vectors = self.tfidf.fit_transform([prompt, response])
312
  relevance_score = cosine_similarity(vectors[0:1], vectors[1:2])[0][0]
313
+ except Exception as e:
314
+ logger.warning(f"TF-IDF analysis failed: {str(e)}")
315
  relevance_score = 0.7 # قيمة افتراضية في حالة الخطأ
316
 
317
  # تحليل المشاعر
 
333
  self.executor = ThreadPoolExecutor(max_workers=8)
334
  self.models = {}
335
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
336
+ self.quantization_config = BitsAndBytesConfig(
337
+ load_in_4bit=True,
338
+ bnb_4bit_compute_dtype=torch.float16,
339
+ bnb_4bit_quant_type="nf4"
340
+ )
341
 
342
+ async def load_model(self, model_type: str, model_name: str = None, use_large: bool = False):
343
  """تحميل نموذج معين"""
344
+ model_key = f"{model_type}-large" if use_large else model_type
345
+
346
+ if model_key not in self.models:
347
  try:
348
  if model_type == "text":
349
+ model_name = model_name or (LANGUAGE_MODELS.get(f"{model_type}-large") if use_large else LANGUAGE_MODELS.get("en"))
350
+
351
+ if use_large:
352
+ model = AutoModelForCausalLM.from_pretrained(
353
+ model_name,
354
+ quantization_config=self.quantization_config,
355
+ device_map="auto",
356
+ torch_dtype=torch.float16
357
+ )
358
+ else:
359
+ model = AutoModelForCausalLM.from_pretrained(model_name).to(self.device)
360
+
361
  tokenizer = AutoTokenizer.from_pretrained(model_name)
362
+ self.models[model_key] = {"tokenizer": tokenizer, "model": model}
 
363
 
364
  elif model_type == "image":
365
  scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler")
 
368
  scheduler=scheduler,
369
  torch_dtype=torch.float16
370
  ).to(self.device)
371
+ self.models[model_key] = model
372
 
373
  elif model_type == "code":
374
+ if use_large:
375
+ model = AutoModelForCausalLM.from_pretrained(
376
+ LANGUAGE_MODELS["code-large"],
377
+ quantization_config=self.quantization_config,
378
+ device_map="auto",
379
+ torch_dtype=torch.float16
380
+ )
381
+ else:
382
+ model = AutoModelForCausalLM.from_pretrained(LANGUAGE_MODELS["code"]).to(self.device)
383
+
384
+ tokenizer = AutoTokenizer.from_pretrained(LANGUAGE_MODELS["code-large"] if use_large else LANGUAGE_MODELS["code"])
385
+ self.models[model_key] = {"tokenizer": tokenizer, "model": model}
386
 
387
+ logger.info(f"تم تحميل النموذج بنجاح: {model_key}")
388
  except Exception as e:
389
  logger.error(f"خطأ في تحميل النموذج: {str(e)}")
390
  raise
391
 
392
+ return self.models[model_key]
393
 
394
+ async def generate_text(self, prompt: str, lang: str = None, max_length: int = 300, use_large: bool = False) -> str:
395
  """توليد نص بناء على المطالبة"""
396
  if not lang:
397
+ try:
398
+ lang = detect(prompt)
399
+ except:
400
+ lang = "en"
401
+
402
+ model_name = LANGUAGE_MODELS.get(f"{lang}-large" if use_large else lang,
403
+ LANGUAGE_MODELS.get("en-large" if use_large else "en"))
404
 
405
+ model = await self.load_model("text", model_name, use_large)
 
406
 
407
  inputs = model["tokenizer"](prompt, return_tensors="pt").to(self.device)
408
  outputs = model["model"].generate(**inputs, max_length=max_length, do_sample=True, top_k=50, top_p=0.95)
409
 
410
  return model["tokenizer"].decode(outputs[0], skip_special_tokens=True)
411
 
412
+ async def generate_code(self, prompt: str, language: str = "python", max_length: int = 500, use_large: bool = False) -> str:
413
  """توليد كود برمجي"""
414
+ model = await self.load_model("code", use_large=use_large)
415
 
416
  prompt = f"# Language: {language}\n# Description: {prompt}\n# Code:\n"
417
  inputs = model["tokenizer"](prompt, return_tensors="pt").to(self.device)
 
424
  code=generated_code,
425
  language=language,
426
  purpose=prompt[:100],
427
+ metadata={
428
+ "generated_at": str(datetime.now()),
429
+ "model_used": "large" if use_large else "base"
430
+ }
431
  )
432
 
433
  return generated_code
 
448
  """توليد فيديو من النص (محاكاة)"""
449
  save_path = f"uploads/generated_video_{int(time.time())}.mp4"
450
 
451
+ # إنشاء فيديو مع نص (استخدام صورة سوداء كخلفية)
452
+ clip = mp.ColorClip(size=(640, 480), color=(0, 0, 0), duration=duration)
453
  txt_clip = mp.TextClip(prompt, fontsize=24, color='white', size=clip.size).set_position('center').set_duration(duration)
454
  video = mp.CompositeVideoClip([clip, txt_clip])
455
  video.write_videofile(save_path, fps=fps)
 
582
 
583
  async def generate_plan(self, prompt: str, task_type: str = "text") -> dict:
584
  """إنشاء خطة تنفيذية للمهمة"""
585
+ try:
586
+ lang = detect(prompt)
587
+ except:
588
+ lang = "en"
589
+
590
  steps = self.get_thinking_steps(task_type, lang)
591
 
592
  plan = {
 
610
  language: Optional[str] = None
611
  conversation_id: Optional[str] = None
612
  improvements: Optional[List[str]] = None
613
+ use_large_model: bool = False # إضافة خيار استخدام النماذج الكبيرة
614
 
615
  class ConversationRequest(BaseModel):
616
  initial_prompt: str
617
  project_id: Optional[str] = None
618
+ use_large_model: bool = False
619
 
620
  class ProjectRequest(BaseModel):
621
  name: str
 
626
  code: str
627
  language: str
628
  improvements: List[str] = Field(..., example=["add_comments", "optimize", "add_error_handling"])
629
+ use_large_model: bool = False
630
 
631
  # 8. نظام إدارة المحادثات عبر WebSocket
632
  class ConnectionManager:
 
662
  conv_id=conv_id,
663
  role="user",
664
  content=request.initial_prompt,
665
+ metadata={
666
+ "type": "text",
667
+ "project_id": request.project_id,
668
+ "use_large_model": request.use_large_model
669
+ }
670
  )
671
 
672
  return {"conversation_id": conv_id, "name": memory.conversations[conv_id]["name"]}
 
687
  conv_id=conversation_id,
688
  role="user",
689
  content=message["content"],
690
+ metadata={
691
+ "type": message.get("content_type", "text"),
692
+ "use_large_model": message.get("use_large_model", False)
693
+ }
694
  )
695
 
696
  # إنشاء خطة للرد
 
706
  await asyncio.sleep(1)
707
 
708
  # توليد الرد
709
+ use_large = message.get("use_large_model", False)
710
+
711
  if content_type == "text":
712
+ response = await engine.generate_text(
713
+ message["content"],
714
+ use_large=use_large
715
+ )
716
  elif content_type == "code":
717
+ response = await engine.generate_code(
718
+ message["content"],
719
+ message.get("language", "python"),
720
+ use_large=use_large
721
+ )
722
  elif content_type == "image":
723
  image_path = await engine.generate_image(message["content"])
724
  response = f"IMAGE_GENERATED:{image_path}"
 
739
  metadata={
740
  "type": content_type,
741
  "evaluation": evaluation,
742
+ "plan": plan,
743
+ "model_used": "large" if use_large else "base"
744
  }
745
  )
746
 
 
792
  metadata={
793
  "original_code": request.code,
794
  "improvements": request.improvements,
795
+ "analyzed_at": str(datetime.now()),
796
+ "model_used": "large" if request.use_large_model else "base"
797
  }
798
  )
799
 
 
834
  await asyncio.sleep(3600) # كل ساعة
835
  try:
836
  backup_dir = memory.backup_data()
837
+ if backup_dir:
838
+ logger.info(f"تم إنشاء نسخة احتياطية في: {backup_dir}")
839
  except Exception as e:
840
  logger.error(f"فشل النسخ الاحتياطي: {str(e)}")
841
 
 
869
 
870
  logger.info("تم بدء تشغيل MarkAI بنجاح")
871
 
872
+ # 13. ملفات إضافية لتهيئة Hugging Face Spaces
873
+ @app.get("/app")
874
+ async def serve_app():
875
+ return FileResponse("static/index.html")
876
+
877
+ @app.get("/favicon.ico")
878
+ async def favicon():
879
+ return FileResponse("static/favicon.ico")
880
+
881
+ # 14. تشغيل التطبيق
882
  if __name__ == "__main__":
883
  import uvicorn
884
  uvicorn.run(app, host="0.0.0.0", port=7860, reload=True)