ibrahimlasfar commited on
Commit
50c2461
·
verified ·
1 Parent(s): 959cb3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +689 -322
app.py CHANGED
@@ -2,41 +2,58 @@ import os
2
  import json
3
  import time
4
  import asyncio
5
- import threading
6
  import logging
7
- import numpy as np
8
- import pandas as pd
9
- import cv2
10
- import nltk
11
  from datetime import datetime
 
12
  from concurrent.futures import ThreadPoolExecutor
13
- from fastapi import FastAPI, HTTPException, Request, UploadFile, File
14
- from fastapi.responses import StreamingResponse, HTMLResponse, FileResponse
 
15
  from fastapi.staticfiles import StaticFiles
16
  from fastapi.templating import Jinja2Templates
17
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
18
- from diffusers import StableDiffusionPipeline
 
19
  from langdetect import detect, DetectorFactory
 
 
 
20
  import torch
21
- from tensorflow.keras.models import Sequential
22
- from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
23
- from sklearn.ensemble import RandomForestClassifier
24
- from sklearn.metrics import accuracy_score
25
  import moviepy.editor as mp
26
- from PIL import Image, ImageDraw, ImageFont
27
- from pydantic import BaseModel
28
- from typing import List, Dict, Optional
 
 
 
 
29
 
30
  # التهيئة الأساسية
31
  DetectorFactory.seed = 0
32
- app = FastAPI()
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  # تهيئة مجلدات التخزين
35
  os.makedirs("uploads", exist_ok=True)
36
- os.makedirs("memory", exist_ok=True)
37
- os.makedirs("projects", exist_ok=True)
 
 
38
 
39
- # 1. نماذج اللغات المدعومة (خفيفة الوزن)
40
  LANGUAGE_MODELS = {
41
  "en": "gpt2-medium",
42
  "ar": "arbml/gpt2-arabic-poetry",
@@ -45,314 +62,686 @@ LANGUAGE_MODELS = {
45
  "fr": "dbmdz/gpt2-french",
46
  "de": "dbmdz/gpt2-german",
47
  "it": "LorenzoDeMattei/GePpeTto",
48
- "hi": "surajpai/GPT2-Hindi"
 
49
  }
50
 
51
- # 2. نظام الذاكرة والتعلم التلقائي
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  class AIMemory:
53
  def __init__(self):
54
- self.memory_file = "memory/interactions.json"
55
- self.projects_file = "memory/projects.json"
56
- self.code_snippets_file = "memory/code_snippets.json"
57
- self.interactions = self._load_data(self.memory_file, [])
58
- self.projects = self._load_data(self.projects_file, [])
59
- self.code_snippets = self._load_data(self.code_snippets_file, [])
60
-
61
- def _load_data(self, file_path, default):
62
- if os.path.exists(file_path):
63
- with open(file_path, "r", encoding="utf-8") as f:
64
- return json.load(f)
65
- return default
66
-
67
- def save_interaction(self, interaction_type: str, data: dict):
68
- if interaction_type == "chat":
69
- self.interactions.append(data)
70
- self._save_data(self.memory_file, self.interactions)
71
- elif interaction_type == "project":
72
- self.projects.append(data)
73
- self._save_data(self.projects_file, self.projects)
74
- elif interaction_type == "code":
75
- self.code_snippets.append(data)
76
- self._save_data(self.code_snippets_file, self.code_snippets)
77
-
78
- def _save_data(self, file_path, data):
79
- with open(file_path, "w", encoding="utf-8") as f:
80
- json.dump(data, f, ensure_ascii=False, indent=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  memory = AIMemory()
83
 
84
- # 3. نظام التقييم الذاتي
85
- def evaluate_response(prompt: str, response: str) -> float:
86
- """يقيم الجودة بين 0-1 بناءً على عدة معايير"""
87
- length_score = min(len(response) / 100, 1.0)
88
- unique_words = len(set(response.split()))
89
- diversity_score = min(unique_words / 20, 1.0)
90
- return (length_score + diversity_score) / 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- # 4. المحرك الأساسي
93
  class AIEngine:
94
  def __init__(self):
95
- self.executor = ThreadPoolExecutor(max_workers=4)
96
- self.text_models = {}
97
- self.image_model = None
98
- self.code_model = None
99
- self.video_model = None
100
-
101
- async def load_text_model(self, model_name: str):
102
- if model_name not in self.text_models:
103
  try:
104
- tokenizer = AutoTokenizer.from_pretrained(model_name)
105
- model = AutoModelForCausalLM.from_pretrained(model_name)
106
- self.text_models[model_name] = {"tokenizer": tokenizer, "model": model}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  except Exception as e:
108
- logging.error(f"Failed to load model {model_name}: {str(e)}")
109
  raise
110
- return self.text_models[model_name]
 
111
 
112
- async def load_image_model(self):
113
- if not self.image_model:
114
- try:
115
- self.image_model = StableDiffusionPipeline.from_pretrained(
116
- "stabilityai/stable-diffusion-2-base",
117
- torch_dtype=torch.float16
118
- )
119
- except Exception as e:
120
- logging.error(f"Failed to load image model: {str(e)}")
121
- raise
122
- return self.image_model
 
123
 
124
- async def generate_code(self, prompt: str, language: str):
125
  """توليد كود برمجي"""
126
- model = await self.load_text_model("codeparrot/codeparrot-small")
127
- inputs = model["tokenizer"](f"# {language}\n# {prompt}\n", return_tensors="pt")
128
- outputs = model["model"].generate(**inputs, max_length=200)
129
- return model["tokenizer"].decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
- async def generate_video(self, prompt: str):
132
- """توليد فيديو من نص"""
133
- # محاكاة لتوليد الفيديو (في الإصدار الحقيقي سيتم استخدام مكتبة مثل moviepy)
134
- video_path = f"uploads/generated_video_{int(time.time())}.mp4"
135
- clip = mp.VideoFileClip("assets/blank_video.mp4").set_duration(5)
136
- txt_clip = mp.TextClip(prompt, fontsize=24, color='white').set_position('center').set_duration(5)
 
137
  video = mp.CompositeVideoClip([clip, txt_clip])
138
- video.write_videofile(video_path, fps=24)
139
- return video_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
  engine = AIEngine()
142
 
143
- # 5. نظام التفكير الظاهر
144
- def thinking_process(lang: str, task_type: str):
145
- """يعرض خطوات التفكير حسب اللغة ونوع المهمة"""
146
- steps = {
147
- "text": {
148
- "ar": [
149
- "🔍 جاري تحليل طلبك...",
150
- "🧠 جاري معالجة النص...",
151
- "📚 جاري البحث في المعرفة...",
152
- "✨ جاري توليد الإجابة..."
153
- ],
154
- "en": [
155
- "🔍 Analyzing your request...",
156
- "🧠 Processing text...",
157
- "📚 Searching knowledge...",
158
- "✨ Generating answer..."
159
- ]
160
- },
161
- "code": {
162
- "ar": [
163
- "🔍 تحليل متطلبات الكود...",
164
- "🧠 جاري كتابة الخوارزمية...",
165
- "📚 جاري تحسين الكود...",
166
- "✨ جاري توليد الكود النهائي..."
167
- ],
168
- "en": [
169
- "🔍 Analyzing code requirements...",
170
- "🧠 Writing algorithm...",
171
- "📚 Optimizing code...",
172
- "✨ Generating final code..."
173
- ]
174
- },
175
- "image": {
176
- "ar": [
177
- "🔍 تحليل وصف الصورة...",
178
- "🧠 جاري تكوين المفاهيم...",
179
- "🎨 جاري رسم الصورة...",
180
- "✨ جاري تنقيح التفاصيل..."
181
- ],
182
- "en": [
183
- "🔍 Analyzing image description...",
184
- "🧠 Composing concepts...",
185
- "🎨 Drawing image...",
186
- "✨ Refining details..."
187
- ]
188
- },
189
- "video": {
190
- "ar": [
191
- "🔍 تحليل سيناريو الفيديو...",
192
- "🎬 جاري إعداد المشاهد...",
193
- "🎞️ جاري تركيب الفيديو...",
194
- "✨ جاري إضافة المؤثرات..."
195
- ],
196
- "en": [
197
- "🔍 Analyzing video scenario...",
198
- "🎬 Preparing scenes...",
199
- "🎞️ Composing video...",
200
- "✨ Adding effects..."
201
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  }
203
- }
204
- return steps.get(task_type, steps["text"]).get(lang, steps["text"]["en"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- # 6. نماذج طلبات API
207
  class GenerationRequest(BaseModel):
208
  prompt: str
209
- content_type: str = "text" # text, code, image, video
210
- language: Optional[str] = None # للكود البرمجي
 
 
 
 
 
 
211
 
212
  class ProjectRequest(BaseModel):
213
  name: str
214
  description: str
215
- project_type: str # web, mobile, desktop, ai
216
 
217
  class CodeImprovementRequest(BaseModel):
218
  code: str
219
  language: str
220
- improvements: List[str]
221
 
222
- # 7. نقاط النهاية الأساسية
223
- @app.post("/api/generate")
224
- async def generate(request: GenerationRequest):
225
- def stream_response():
226
- try:
227
- # اكتشاف اللغة
228
- lang = detect(request.prompt)
229
- model_name = LANGUAGE_MODELS.get(lang, "gpt2-medium")
230
-
231
- # عرض عملية التفكير
232
- steps = thinking_process(lang, request.content_type)
233
- for step in steps:
234
- yield f"data: {step}\n\n"
235
- time.sleep(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
 
237
- # توليد المحتوى حسب النوع
238
- if request.content_type == "text":
239
- model = await engine.load_text_model(model_name)
240
- inputs = model["tokenizer"](request.prompt, return_tensors="pt")
241
- outputs = model["model"].generate(**inputs, max_length=300)
242
- response = model["tokenizer"].decode(outputs[0], skip_special_tokens=True)
243
- score = evaluate_response(request.prompt, response)
 
244
 
245
- elif request.content_type == "code":
246
- response = await engine.generate_code(request.prompt, request.language or "python")
247
- score = 0.9 # درجة ثقة عالية للكود
248
 
249
- elif request.content_type == "image":
250
- pipe = await engine.load_image_model()
251
- image = pipe(request.prompt).images[0]
252
- image_path = f"uploads/generated_image_{int(time.time())}.png"
253
- image.save(image_path)
254
- response = f"IMAGE_GENERATED:{image_path}"
255
- score = 0.85
256
 
257
- elif request.content_type == "video":
258
- video_path = await engine.generate_video(request.prompt)
259
- response = f"VIDEO_GENERATED:{video_path}"
260
- score = 0.8
 
 
 
 
 
 
 
 
 
261
 
262
- else:
263
- raise HTTPException(status_code=400, detail="نوع المحتوى غير مدعوم")
264
-
265
- # حفظ التفاعل
266
- memory.save_interaction("chat", {
267
- "prompt": request.prompt,
268
- "response": response,
269
- "type": request.content_type,
270
- "language": lang,
271
- "timestamp": str(datetime.now()),
272
- "confidence": score
273
- })
274
-
275
- yield f"data: FINAL_RESPONSE:{response}:{score}\n\n"
276
-
277
- except Exception as e:
278
- logging.error(f"Error in generation: {str(e)}")
279
- yield f"data: ERROR:{str(e)}\n\n"
280
-
281
- return StreamingResponse(stream_response(), media_type="text/event-stream")
282
-
283
- # 8. إدارة المشاريع
284
- @app.post("/api/create_project")
285
- async def create_project(request: ProjectRequest):
286
- try:
287
- project_dir = f"projects/{request.name.replace(' ', '_')}"
288
- os.makedirs(project_dir, exist_ok=True)
289
-
290
- # إنشاء ملفات المشروع الأساسية
291
- with open(f"{project_dir}/README.md", "w") as f:
292
- f.write(f"# {request.name}\n\n{request.description}")
293
-
294
- memory.save_interaction("project", {
295
- "name": request.name,
296
- "type": request.project_type,
297
- "path": project_dir,
298
- "created_at": str(datetime.now()),
299
- "status": "active"
300
- })
301
-
302
- return {"status": "success", "project_path": project_dir}
303
  except Exception as e:
304
- raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
305
 
306
- # 9. تحسين الكود
307
- @app.post("/api/improve_code")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
  async def improve_code(request: CodeImprovementRequest):
309
- try:
310
- improved_code = f"""{request.code}
311
-
312
- # التحسينات المطبقة:
313
- # {' | '.join(request.improvements)}
314
- # تم تحسين الكود بواسطة MarkAI في {datetime.now()}
315
- """
316
-
317
- memory.save_interaction("code", {
 
318
  "original_code": request.code,
319
- "improved_code": improved_code,
320
- "language": request.language,
321
  "improvements": request.improvements,
322
- "timestamp": str(datetime.now())
323
- })
324
-
325
- return {"status": "success", "improved_code": improved_code}
326
- except Exception as e:
327
- raise HTTPException(status_code=500, detail=str(e))
328
-
329
- # 10. تحليل المشاعر
330
- @app.post("/api/analyze_sentiment")
331
- async def analyze_sentiment(text: str):
332
- try:
333
- # محاكاة لتحليل المشاعر (في الإصدار الحقيقي سيتم استخدام نموذج متخصص)
334
- positive_words = ["جيد", "رائع", "ممتاز", "سعيد"]
335
- negative_words = ["سيء", "مزعج", "حزين", "غاضب"]
336
-
337
- positive_count = sum(text.count(word) for word in positive_words)
338
- negative_count = sum(text.count(word) for word in negative_words)
339
-
340
- sentiment = "neutral"
341
- if positive_count > negative_count:
342
- sentiment = "positive"
343
- elif negative_count > positive_count:
344
- sentiment = "negative"
345
-
346
- score = (positive_count - negative_count) / len(text.split())
347
-
348
- return {
349
- "sentiment": sentiment,
350
- "score": score,
351
- "positive_words": positive_count,
352
- "negative_words": negative_count
353
  }
354
- except Exception as e:
355
- raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356
 
357
  # 11. واجهة المستخدم
358
  app.mount("/static", StaticFiles(directory="static"), name="static")
@@ -363,50 +752,28 @@ templates = Jinja2Templates(directory="templates")
363
  async def read_root(request: Request):
364
  return templates.TemplateResponse("index.html", {"request": request})
365
 
366
- # 12. نقاط نهاية إضافية
367
- @app.get("/api/memory")
368
- async def get_memory():
369
- return {
370
- "chat_history": memory.interactions[-10:],
371
- "projects": memory.projects,
372
- "code_snippets": memory.code_snippets
373
- }
374
-
375
- @app.get("/api/download/{file_type}/{filename}")
376
- async def download_file(file_type: str, filename: str):
377
- file_path = f"uploads/{filename}"
378
- if os.path.exists(file_path):
379
- return FileResponse(file_path)
380
- raise HTTPException(status_code=404, detail="File not found")
381
-
382
- # 13. نظام النسخ الاحتياطي التلقائي
383
- def backup_data():
384
- try:
385
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
386
- for data_type, data in [("chat", memory.interactions),
387
- ("projects", memory.projects),
388
- ("code", memory.code_snippets)]:
389
- backup_path = f"memory/{data_type}_backup_{timestamp}.json"
390
- with open(backup_path, "w", encoding="utf-8") as f:
391
- json.dump(data, f, ensure_ascii=False, indent=2)
392
- except Exception as e:
393
- logging.error(f"Backup failed: {str(e)}")
394
-
395
- # تشغيل النسخ الاحتياطي كل ساعة
396
- async def backup_scheduler():
397
- while True:
398
- await asyncio.sleep(3600)
399
- backup_data()
400
 
401
- # بدء المهمة الجانبية
402
  @app.on_event("startup")
403
  async def startup_event():
404
  asyncio.create_task(backup_scheduler())
 
405
  # تحميل النماذج الأساسية مسبقاً
406
- await engine.load_text_model("gpt2-medium")
407
- await engine.load_image_model()
 
 
408
 
409
- # 14. تشغيل التطبيق
410
  if __name__ == "__main__":
411
  import uvicorn
412
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
2
  import json
3
  import time
4
  import asyncio
 
5
  import logging
6
+ import hashlib
 
 
 
7
  from datetime import datetime
8
+ from typing import List, Dict, Optional, Union
9
  from concurrent.futures import ThreadPoolExecutor
10
+
11
+ from fastapi import FastAPI, HTTPException, Request, UploadFile, File, WebSocket, WebSocketDisconnect
12
+ from fastapi.responses import StreamingResponse, HTMLResponse, FileResponse, JSONResponse
13
  from fastapi.staticfiles import StaticFiles
14
  from fastapi.templating import Jinja2Templates
15
+ from fastapi.security import APIKeyHeader
16
+ from fastapi.middleware.cors import CORSMiddleware
17
+ from pydantic import BaseModel, Field
18
  from langdetect import detect, DetectorFactory
19
+ import numpy as np
20
+ import pandas as pd
21
+ import cv2
22
  import torch
23
+ from PIL import Image
 
 
 
24
  import moviepy.editor as mp
25
+
26
+ # تحميل نماذج الذكاء الاصطناعي
27
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
28
+ from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
29
+ from tensorflow.keras.models import load_model
30
+ from sklearn.feature_extraction.text import TfidfVectorizer
31
+ from sklearn.metrics.pairwise import cosine_similarity
32
 
33
  # التهيئة الأساسية
34
  DetectorFactory.seed = 0
35
+ logging.basicConfig(level=logging.INFO)
36
+ logger = logging.getLogger("MarkAI")
37
+
38
+ app = FastAPI(title="MarkAI - الذكاء الاصطناعي المتكامل", version="2.0")
39
+
40
+ # إعدادات CORS
41
+ app.add_middleware(
42
+ CORSMiddleware,
43
+ allow_origins=["*"],
44
+ allow_credentials=True,
45
+ allow_methods=["*"],
46
+ allow_headers=["*"],
47
+ )
48
 
49
  # تهيئة مجلدات التخزين
50
  os.makedirs("uploads", exist_ok=True)
51
+ os.makedirs("memory/conversations", exist_ok=True)
52
+ os.makedirs("memory/projects", exist_ok=True)
53
+ os.makedirs("memory/code", exist_ok=True)
54
+ os.makedirs("memory/backups", exist_ok=True)
55
 
56
+ # 1. نماذج اللغات المدعومة (محدثة)
57
  LANGUAGE_MODELS = {
58
  "en": "gpt2-medium",
59
  "ar": "arbml/gpt2-arabic-poetry",
 
62
  "fr": "dbmdz/gpt2-french",
63
  "de": "dbmdz/gpt2-german",
64
  "it": "LorenzoDeMattei/GePpeTto",
65
+ "hi": "surajpai/GPT2-Hindi",
66
+ "code": "codeparrot/codeparrot-small"
67
  }
68
 
69
+ # 2. نظام الأمان والمفاتيح
70
+ API_KEY_HEADER = APIKeyHeader(name="X-API-KEY")
71
+
72
+ def load_api_keys():
73
+ try:
74
+ with open("memory/api_keys.json", "r") as f:
75
+ return json.load(f)
76
+ except:
77
+ return {}
78
+
79
+ def save_api_keys(keys):
80
+ with open("memory/api_keys.json", "w") as f:
81
+ json.dump(keys, f)
82
+
83
+ def authenticate(api_key: str):
84
+ keys = load_api_keys()
85
+ return api_key in keys.values()
86
+
87
+ # 3. نظام الذاكرة المتقدم
88
  class AIMemory:
89
  def __init__(self):
90
+ self.conversations = {}
91
+ self.projects = {}
92
+ self.code_repository = {}
93
+ self.load_all_data()
94
+
95
+ def load_all_data(self):
96
+ """تحميل جميع البيانات من الملفات"""
97
+ # تحميل المحادثات
98
+ for conv_file in os.listdir("memory/conversations"):
99
+ if conv_file.endswith(".json"):
100
+ conv_id = conv_file.split(".")[0]
101
+ with open(f"memory/conversations/{conv_file}", "r", encoding="utf-8") as f:
102
+ self.conversations[conv_id] = json.load(f)
103
+
104
+ # تحميل المشاريع
105
+ if os.path.exists("memory/projects/projects.json"):
106
+ with open("memory/projects/projects.json", "r", encoding="utf-8") as f:
107
+ self.projects = json.load(f)
108
+
109
+ # تحميل مستودع الأكواد
110
+ if os.path.exists("memory/code/code_repository.json"):
111
+ with open("memory/code/code_repository.json", "r", encoding="utf-8") as f:
112
+ self.code_repository = json.load(f)
113
+
114
+ def create_conversation(self, initial_prompt: str) -> str:
115
+ """إنشاء محادثة جديدة مع تسمية تلقائية"""
116
+ conv_id = hashlib.md5(f"{initial_prompt}{datetime.now()}".encode()).hexdigest()[:10]
117
+ conv_name = initial_prompt[:30] + "..." if len(initial_prompt) > 30 else initial_prompt
118
+
119
+ conversation = {
120
+ "id": conv_id,
121
+ "name": conv_name,
122
+ "created_at": str(datetime.now()),
123
+ "updated_at": str(datetime.now()),
124
+ "messages": [],
125
+ "context": [],
126
+ "status": "active"
127
+ }
128
+
129
+ self.conversations[conv_id] = conversation
130
+ self.save_conversation(conv_id)
131
+ return conv_id
132
+
133
+ def save_conversation(self, conv_id: str):
134
+ """حفظ محادثة معينة"""
135
+ if conv_id in self.conversations:
136
+ with open(f"memory/conversations/{conv_id}.json", "w", encoding="utf-8") as f:
137
+ json.dump(self.conversations[conv_id], f, ensure_ascii=False, indent=2)
138
+
139
+ def add_message(self, conv_id: str, role: str, content: str, metadata: dict = {}):
140
+ """إضافة رسالة إلى المحادثة"""
141
+ if conv_id not in self.conversations:
142
+ raise ValueError("المحادثة غير موجودة")
143
+
144
+ message = {
145
+ "role": role,
146
+ "content": content,
147
+ "timestamp": str(datetime.now()),
148
+ "metadata": metadata
149
+ }
150
+
151
+ self.conversations[conv_id]["messages"].append(message)
152
+ self.conversations[conv_id]["updated_at"] = str(datetime.now())
153
+ self.save_conversation(conv_id)
154
+
155
+ def get_conversation_context(self, conv_id: str, max_messages: int = 10) -> List[dict]:
156
+ """الحصول على سياق المحادثة"""
157
+ if conv_id not in self.conversations:
158
+ return []
159
+
160
+ return self.conversations[conv_id]["messages"][-max_messages:]
161
+
162
+ def create_project(self, name: str, description: str, project_type: str) -> str:
163
+ """إنشاء مشروع جديد"""
164
+ project_id = hashlib.md5(f"{name}{datetime.now()}".encode()).hexdigest()[:8]
165
+
166
+ project = {
167
+ "id": project_id,
168
+ "name": name,
169
+ "description": description,
170
+ "type": project_type,
171
+ "created_at": str(datetime.now()),
172
+ "updated_at": str(datetime.now()),
173
+ "status": "active",
174
+ "files": [],
175
+ "conversations": []
176
+ }
177
+
178
+ self.projects[project_id] = project
179
+ self.save_projects()
180
+ return project_id
181
+
182
+ def save_projects(self):
183
+ """حفظ جميع المشاريع"""
184
+ with open("memory/projects/projects.json", "w", encoding="utf-8") as f:
185
+ json.dump(self.projects, f, ensure_ascii=False, indent=2)
186
+
187
+ def save_code_snippet(self, code: str, language: str, purpose: str, metadata: dict = {}):
188
+ """حفظ جزء من الكود في المستودع"""
189
+ code_id = hashlib.md5(f"{code}{datetime.now()}".encode()).hexdigest()[:8]
190
+
191
+ snippet = {
192
+ "id": code_id,
193
+ "code": code,
194
+ "language": language,
195
+ "purpose": purpose,
196
+ "metadata": metadata,
197
+ "created_at": str(datetime.now()),
198
+ "usage_count": 0
199
+ }
200
+
201
+ self.code_repository[code_id] = snippet
202
+ self.save_code_repository()
203
+ return code_id
204
+
205
+ def save_code_repository(self):
206
+ """حفظ مستودع الأكواد"""
207
+ with open("memory/code/code_repository.json", "w", encoding="utf-8") as f:
208
+ json.dump(self.code_repository, f, ensure_ascii=False, indent=2)
209
+
210
+ def backup_data(self):
211
+ """إنشاء نسخة احتياطية لجميع البيانات"""
212
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
213
+ backup_dir = f"memory/backups/{timestamp}"
214
+ os.makedirs(backup_dir, exist_ok=True)
215
+
216
+ # نسخ المحادثات
217
+ os.makedirs(f"{backup_dir}/conversations", exist_ok=True)
218
+ for conv_id, conv_data in self.conversations.items():
219
+ with open(f"{backup_dir}/conversations/{conv_id}.json", "w", encoding="utf-8") as f:
220
+ json.dump(conv_data, f, ensure_ascii=False, indent=2)
221
+
222
+ # نسخ المشاريع
223
+ with open(f"{backup_dir}/projects.json", "w", encoding="utf-8") as f:
224
+ json.dump(self.projects, f, ensure_ascii=False, indent=2)
225
+
226
+ # نسخ الأكواد
227
+ with open(f"{backup_dir}/code_repository.json", "w", encoding="utf-8") as f:
228
+ json.dump(self.code_repository, f, ensure_ascii=False, indent=2)
229
+
230
+ return backup_dir
231
 
232
  memory = AIMemory()
233
 
234
+ # 4. نظام التقييم والتحليل
235
+ class AnalyticsEngine:
236
+ def __init__(self):
237
+ self.sentiment_model = pipeline("sentiment-analysis")
238
+ self.tfidf = TfidfVectorizer()
239
+
240
+ def analyze_sentiment(self, text: str) -> dict:
241
+ """تحليل المشاعر للنص"""
242
+ try:
243
+ result = self.sentiment_model(text)[0]
244
+ return {
245
+ "sentiment": result["label"],
246
+ "score": result["score"],
247
+ "positive": result["label"] == "POSITIVE",
248
+ "negative": result["label"] == "NEGATIVE"
249
+ }
250
+ except:
251
+ # Fallback basic sentiment analysis
252
+ positive_words = ["good", "great", "excellent", "happy", "جيد", "رائع", "ممتاز", "سعيد"]
253
+ negative_words = ["bad", "terrible", "awful", "sad", "سيء", "فظيع", "مزعج", "حزين"]
254
+
255
+ positive_count = sum(text.lower().count(word) for word in positive_words)
256
+ negative_count = sum(text.lower().count(word) for word in negative_words)
257
+
258
+ if positive_count > negative_count:
259
+ return {"sentiment": "POSITIVE", "score": positive_count/(positive_count+negative_count+1)}
260
+ elif negative_count > positive_count:
261
+ return {"sentiment": "NEGATIVE", "score": negative_count/(positive_count+negative_count+1)}
262
+ else:
263
+ return {"sentiment": "NEUTRAL", "score": 0.5}
264
+
265
+ def evaluate_response(self, prompt: str, response: str) -> dict:
266
+ """تقييم جودة الرد"""
267
+ # تحليل طول الرد
268
+ length_score = min(len(response.split()) / 100, 1.0)
269
+
270
+ # تحليل التنوع
271
+ unique_words = len(set(response.split()))
272
+ diversity_score = min(unique_words / 50, 1.0)
273
+
274
+ # تحليل الصلة بالموضوع
275
+ try:
276
+ vectors = self.tfidf.fit_transform([prompt, response])
277
+ relevance_score = cosine_similarity(vectors[0:1], vectors[1:2])[0][0]
278
+ except:
279
+ relevance_score = 0.7 # قيمة افتراضية في حالة الخطأ
280
+
281
+ # تحليل المشاعر
282
+ sentiment = self.analyze_sentiment(response)
283
+
284
+ return {
285
+ "length_score": length_score,
286
+ "diversity_score": diversity_score,
287
+ "relevance_score": relevance_score,
288
+ "sentiment": sentiment,
289
+ "overall_score": (length_score + diversity_score + relevance_score + sentiment["score"]) / 4
290
+ }
291
+
292
+ analytics = AnalyticsEngine()
293
 
294
+ # 5. المحرك الأساسي للذكاء الاصطناعي
295
  class AIEngine:
296
  def __init__(self):
297
+ self.executor = ThreadPoolExecutor(max_workers=8)
298
+ self.models = {}
299
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
300
+
301
+ async def load_model(self, model_type: str, model_name: str = None):
302
+ """تحميل نموذج معين"""
303
+ if model_type not in self.models:
 
304
  try:
305
+ if model_type == "text":
306
+ model_name = model_name or LANGUAGE_MODELS.get("en")
307
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
308
+ model = AutoModelForCausalLM.from_pretrained(model_name).to(self.device)
309
+ self.models[model_type] = {"tokenizer": tokenizer, "model": model}
310
+
311
+ elif model_type == "image":
312
+ scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler")
313
+ model = StableDiffusionPipeline.from_pretrained(
314
+ "stabilityai/stable-diffusion-2",
315
+ scheduler=scheduler,
316
+ torch_dtype=torch.float16
317
+ ).to(self.device)
318
+ self.models[model_type] = model
319
+
320
+ elif model_type == "code":
321
+ tokenizer = AutoTokenizer.from_pretrained(LANGUAGE_MODELS["code"])
322
+ model = AutoModelForCausalLM.from_pretrained(LANGUAGE_MODELS["code"]).to(self.device)
323
+ self.models[model_type] = {"tokenizer": tokenizer, "model": model}
324
+
325
+ logger.info(f"تم تحميل النموذج بنجاح: {model_type}")
326
  except Exception as e:
327
+ logger.error(f"خطأ في تحميل النموذج: {str(e)}")
328
  raise
329
+
330
+ return self.models[model_type]
331
 
332
+ async def generate_text(self, prompt: str, lang: str = None, max_length: int = 300) -> str:
333
+ """توليد نص بناء على المطالبة"""
334
+ if not lang:
335
+ lang = detect(prompt)
336
+
337
+ model_name = LANGUAGE_MODELS.get(lang, LANGUAGE_MODELS["en"])
338
+ model = await self.load_model("text", model_name)
339
+
340
+ inputs = model["tokenizer"](prompt, return_tensors="pt").to(self.device)
341
+ outputs = model["model"].generate(**inputs, max_length=max_length, do_sample=True, top_k=50, top_p=0.95)
342
+
343
+ return model["tokenizer"].decode(outputs[0], skip_special_tokens=True)
344
 
345
+ async def generate_code(self, prompt: str, language: str = "python", max_length: int = 500) -> str:
346
  """توليد كود برمجي"""
347
+ model = await self.load_model("code")
348
+
349
+ prompt = f"# Language: {language}\n# Description: {prompt}\n# Code:\n"
350
+ inputs = model["tokenizer"](prompt, return_tensors="pt").to(self.device)
351
+ outputs = model["model"].generate(**inputs, max_length=max_length, do_sample=True, top_k=50, top_p=0.95)
352
+
353
+ generated_code = model["tokenizer"].decode(outputs[0], skip_special_tokens=True)
354
+
355
+ # حفظ الكود في المستودع
356
+ code_id = memory.save_code_snippet(
357
+ code=generated_code,
358
+ language=language,
359
+ purpose=prompt[:100],
360
+ metadata={"generated_at": str(datetime.now())}
361
+ )
362
+
363
+ return generated_code
364
+
365
+ async def generate_image(self, prompt: str, save_path: str = None) -> str:
366
+ """توليد صورة من النص"""
367
+ model = await self.load_model("image")
368
+
369
+ if not save_path:
370
+ save_path = f"uploads/generated_image_{int(time.time())}.png"
371
+
372
+ image = model(prompt).images[0]
373
+ image.save(save_path)
374
+
375
+ return save_path
376
 
377
+ async def generate_video(self, prompt: str, duration: int = 5, fps: int = 24) -> str:
378
+ """توليد فيديو من النص (محاكاة)"""
379
+ save_path = f"uploads/generated_video_{int(time.time())}.mp4"
380
+
381
+ # إنشاء فيديو مع نص
382
+ clip = mp.VideoFileClip("assets/blank_video.mp4").set_duration(duration)
383
+ txt_clip = mp.TextClip(prompt, fontsize=24, color='white', size=clip.size).set_position('center').set_duration(duration)
384
  video = mp.CompositeVideoClip([clip, txt_clip])
385
+ video.write_videofile(save_path, fps=fps)
386
+
387
+ return save_path
388
+
389
+ async def analyze_code(self, code: str, language: str = "python") -> dict:
390
+ """تحليل الكود وإعطاء تقييم"""
391
+ # تحليل أساسي للكود
392
+ analysis = {
393
+ "length": len(code.split("\n")),
394
+ "complexity": "low",
395
+ "quality": "medium",
396
+ "issues": [],
397
+ "suggestions": []
398
+ }
399
+
400
+ # تحليل أولي
401
+ if len(code.split("\n")) > 50:
402
+ analysis["complexity"] = "high"
403
+ analysis["suggestions"].append("Consider breaking this into smaller functions/modules")
404
+
405
+ if "TODO" in code or "FIXME" in code:
406
+ analysis["issues"].append("Contains unfinished tasks (TODO/FIXME)")
407
+ analysis["quality"] = "low"
408
+
409
+ if language == "python" and "print(" in code:
410
+ analysis["suggestions"].append("Consider using logging instead of print statements for production code")
411
+
412
+ return analysis
413
+
414
+ async def improve_code(self, code: str, language: str, improvements: List[str]) -> str:
415
+ """تحسين الكود بناء على طلبات محددة"""
416
+ improved_code = code
417
+
418
+ # تطبيق التحسينات الأساسية
419
+ if "add_comments" in improvements:
420
+ improved_code = f"# Improved by MarkAI at {datetime.now()}\n# Original code with enhancements\n\n{improved_code}"
421
+
422
+ if "optimize" in improvements:
423
+ improved_code = improved_code.replace("for i in range(len(", "for item in ")
424
+ improved_code = improved_code.replace(".append(", " += [")
425
+
426
+ if "add_error_handling" in improvements and language == "python":
427
+ improved_code = f"try:\n {improved_code.replace('\n', '\n ')}\nexcept Exception as e:\n print(f\"An error occurred: {e}\")"
428
+
429
+ return improved_code
430
 
431
  engine = AIEngine()
432
 
433
+ # 6. نظام التفكير والتخطيط
434
+ class ThinkingEngine:
435
+ def __init__(self):
436
+ self.planning_steps = {
437
+ "text": {
438
+ "ar": [
439
+ "🔍 تحليل الطلب والمتطلبات...",
440
+ "🧠 معالجة البيانات والبحث...",
441
+ "📚 استرجاع المعلومات ذات الصلة...",
442
+ "✨ توليد الإجابة المثلى..."
443
+ ],
444
+ "en": [
445
+ "🔍 Analyzing request and requirements...",
446
+ "🧠 Processing data and researching...",
447
+ "📚 Retrieving relevant information...",
448
+ "✨ Generating optimal response..."
449
+ ]
450
+ },
451
+ "code": {
452
+ "ar": [
453
+ "🔍 تحليل متطلبات الكود...",
454
+ "🧠 تصميم الخوارزمية...",
455
+ "📚 البحث عن الحلول المثلى...",
456
+ "✨ كتابة وتوليد الكود..."
457
+ ],
458
+ "en": [
459
+ "🔍 Analyzing code requirements...",
460
+ "🧠 Designing algorithm...",
461
+ "📚 Researching optimal solutions...",
462
+ "✨ Writing and generating code..."
463
+ ]
464
+ },
465
+ "image": {
466
+ "ar": [
467
+ "🔍 تحليل وصف الصورة...",
468
+ "🧠 تكوين المفاهيم الفنية...",
469
+ "🎨 رسم العناصر الأساسية...",
470
+ "✨ إضافة اللمسات النهائية..."
471
+ ],
472
+ "en": [
473
+ "🔍 Analyzing image description...",
474
+ "🧠 Composing artistic concepts...",
475
+ "🎨 Sketching basic elements...",
476
+ "✨ Adding final touches..."
477
+ ]
478
+ },
479
+ "video": {
480
+ "ar": [
481
+ "🔍 تحليل السيناريو...",
482
+ "🎬 إعداد القصة والمشاهد...",
483
+ "🎞️ تركيب العناصر المرئية...",
484
+ "✨ إضافة المؤثرات والصوت..."
485
+ ],
486
+ "en": [
487
+ "🔍 Analyzing scenario...",
488
+ "🎬 Preparing storyboard and scenes...",
489
+ "🎞️ Composing visual elements...",
490
+ "✨ Adding effects and sound..."
491
+ ]
492
+ },
493
+ "project": {
494
+ "ar": [
495
+ "🔍 تحليل متطلبات المشروع...",
496
+ "📝 تحديد الهيكل الأساسي...",
497
+ "🛠️ إعداد الملفات والموارد...",
498
+ "✨ إنشاء المشروع الجديد..."
499
+ ],
500
+ "en": [
501
+ "🔍 Analyzing project requirements...",
502
+ "📝 Defining basic structure...",
503
+ "🛠️ Preparing files and resources...",
504
+ "✨ Creating new project..."
505
+ ]
506
+ }
507
  }
508
+
509
+ def get_thinking_steps(self, task_type: str, lang: str = "en") -> List[str]:
510
+ """الحصول على خطوات التفكير حسب نوع المهمة واللغة"""
511
+ return self.planning_steps.get(task_type, self.planning_steps["text"]).get(lang, self.planning_steps["text"]["en"])
512
+
513
+ async def generate_plan(self, prompt: str, task_type: str = "text") -> dict:
514
+ """إنشاء خطة تنفيذية للمهمة"""
515
+ lang = detect(prompt)
516
+ steps = self.get_thinking_steps(task_type, lang)
517
+
518
+ plan = {
519
+ "task": prompt,
520
+ "type": task_type,
521
+ "language": lang,
522
+ "steps": steps,
523
+ "estimated_time": "30 seconds", # يمكن جعل هذا أكثر دقة
524
+ "required_resources": ["CPU", "GPU"] if task_type in ["image", "video"] else ["CPU"],
525
+ "created_at": str(datetime.now())
526
+ }
527
+
528
+ return plan
529
+
530
+ thinker = ThinkingEngine()
531
 
532
+ # 7. نماذج طلبات API
533
  class GenerationRequest(BaseModel):
534
  prompt: str
535
+ content_type: str = "text" # text, code, image, video, project
536
+ language: Optional[str] = None
537
+ conversation_id: Optional[str] = None
538
+ improvements: Optional[List[str]] = None
539
+
540
+ class ConversationRequest(BaseModel):
541
+ initial_prompt: str
542
+ project_id: Optional[str] = None
543
 
544
  class ProjectRequest(BaseModel):
545
  name: str
546
  description: str
547
+ project_type: str # web, mobile, desktop, ai, other
548
 
549
  class CodeImprovementRequest(BaseModel):
550
  code: str
551
  language: str
552
+ improvements: List[str] = Field(..., example=["add_comments", "optimize", "add_error_handling"])
553
 
554
+ # 8. نظام إدارة المحادثات عبر WebSocket
555
+ class ConnectionManager:
556
+ def __init__(self):
557
+ self.active_connections: Dict[str, WebSocket] = {}
558
+
559
+ async def connect(self, conversation_id: str, websocket: WebSocket):
560
+ await websocket.accept()
561
+ self.active_connections[conversation_id] = websocket
562
+
563
+ def disconnect(self, conversation_id: str):
564
+ if conversation_id in self.active_connections:
565
+ del self.active_connections[conversation_id]
566
+
567
+ async def send_message(self, conversation_id: str, message: str):
568
+ if conversation_id in self.active_connections:
569
+ await self.active_connections[conversation_id].send_text(message)
570
+
571
+ manager = ConnectionManager()
572
+
573
+ # 9. نقاط النهاية الأساسية
574
+ @app.post("/api/conversation/start")
575
+ async def start_conversation(request: ConversationRequest):
576
+ """بدء محادثة جديدة"""
577
+ conv_id = memory.create_conversation(request.initial_prompt)
578
+
579
+ if request.project_id and request.project_id in memory.projects:
580
+ memory.projects[request.project_id]["conversations"].append(conv_id)
581
+ memory.save_projects()
582
+
583
+ # إضافة الرسالة الأولى
584
+ memory.add_message(
585
+ conv_id=conv_id,
586
+ role="user",
587
+ content=request.initial_prompt,
588
+ metadata={"type": "text", "project_id": request.project_id}
589
+ )
590
+
591
+ return {"conversation_id": conv_id, "name": memory.conversations[conv_id]["name"]}
592
+
593
+ @app.websocket("/api/conversation/ws/{conversation_id}")
594
+ async def websocket_conversation(websocket: WebSocket, conversation_id: str):
595
+ """محادثة في الوقت الحقيقي عبر WebSocket"""
596
+ await manager.connect(conversation_id, websocket)
597
+
598
+ try:
599
+ while True:
600
+ data = await websocket.receive_text()
601
+ message = json.loads(data)
602
 
603
+ if message["type"] == "user_message":
604
+ # حفظ رسالة المستخدم
605
+ memory.add_message(
606
+ conv_id=conversation_id,
607
+ role="user",
608
+ content=message["content"],
609
+ metadata={"type": message.get("content_type", "text")}
610
+ )
611
 
612
+ # إنشاء خطة للرد
613
+ content_type = message.get("content_type", "text")
614
+ plan = await thinker.generate_plan(message["content"], content_type)
615
 
616
+ # إرسال خطوات التفكير
617
+ for step in plan["steps"]:
618
+ await manager.send_message(conversation_id, json.dumps({
619
+ "type": "thinking",
620
+ "content": step
621
+ }))
622
+ await asyncio.sleep(1)
623
 
624
+ # توليد الرد
625
+ if content_type == "text":
626
+ response = await engine.generate_text(message["content"])
627
+ elif content_type == "code":
628
+ response = await engine.generate_code(message["content"], message.get("language", "python"))
629
+ elif content_type == "image":
630
+ image_path = await engine.generate_image(message["content"])
631
+ response = f"IMAGE_GENERATED:{image_path}"
632
+ elif content_type == "video":
633
+ video_path = await engine.generate_video(message["content"])
634
+ response = f"VIDEO_GENERATED:{video_path}"
635
+ else:
636
+ response = "نوع المحتوى غير مدعوم"
637
 
638
+ # تحليل الرد
639
+ evaluation = analytics.evaluate_response(message["content"], response)
640
+
641
+ # حفظ الرد
642
+ memory.add_message(
643
+ conv_id=conversation_id,
644
+ role="assistant",
645
+ content=response,
646
+ metadata={
647
+ "type": content_type,
648
+ "evaluation": evaluation,
649
+ "plan": plan
650
+ }
651
+ )
652
+
653
+ # إرسال الرد النهائي
654
+ await manager.send_message(conversation_id, json.dumps({
655
+ "type": "assistant_response",
656
+ "content": response,
657
+ "evaluation": evaluation
658
+ }))
659
+
660
+ except WebSocketDisconnect:
661
+ manager.disconnect(conversation_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
662
  except Exception as e:
663
+ logger.error(f"WebSocket error: {str(e)}")
664
+ await manager.send_message(conversation_id, json.dumps({
665
+ "type": "error",
666
+ "content": f"حدث خطأ: {str(e)}"
667
+ }))
668
 
669
+ @app.post("/api/project/create")
670
+ async def create_project(request: ProjectRequest, api_key: str = Depends(API_KEY_HEADER)):
671
+ """إنشاء مشروع جديد"""
672
+ if not authenticate(api_key):
673
+ raise HTTPException(status_code=403, detail="غير مصرح به")
674
+
675
+ project_id = memory.create_project(request.name, request.description, request.project_type)
676
+
677
+ # إنشاء مجلد المشروع
678
+ project_dir = f"projects/{project_id}"
679
+ os.makedirs(project_dir, exist_ok=True)
680
+
681
+ # إنشاء ملفات أساسية
682
+ with open(f"{project_dir}/README.md", "w", encoding="utf-8") as f:
683
+ f.write(f"# {request.name}\n\n{request.description}\n\nCreated by MarkAI at {datetime.now()}")
684
+
685
+ return {"project_id": project_id, "path": project_dir}
686
+
687
+ @app.post("/api/code/improve")
688
  async def improve_code(request: CodeImprovementRequest):
689
+ """تحسين الكود المقدم"""
690
+ analysis = await engine.analyze_code(request.code, request.language)
691
+ improved_code = await engine.improve_code(request.code, request.language, request.improvements)
692
+
693
+ # حفظ الكود المحسن
694
+ code_id = memory.save_code_snippet(
695
+ code=improved_code,
696
+ language=request.language,
697
+ purpose="Improved code",
698
+ metadata={
699
  "original_code": request.code,
 
 
700
  "improvements": request.improvements,
701
+ "analyzed_at": str(datetime.now())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
702
  }
703
+ )
704
+
705
+ return {
706
+ "improved_code": improved_code,
707
+ "analysis": analysis,
708
+ "code_id": code_id
709
+ }
710
+
711
+ @app.get("/api/conversation/list")
712
+ async def list_conversations(project_id: Optional[str] = None):
713
+ """الحصول على قائمة المحادثات"""
714
+ if project_id and project_id in memory.projects:
715
+ convs = [memory.conversations[cid] for cid in memory.projects[project_id]["conversations"] if cid in memory.conversations]
716
+ else:
717
+ convs = list(memory.conversations.values())
718
+
719
+ return {"conversations": convs}
720
+
721
+ @app.get("/api/project/list")
722
+ async def list_projects():
723
+ """الحصول على قائمة المشاريع"""
724
+ return {"projects": list(memory.projects.values())}
725
+
726
+ @app.get("/api/code/list")
727
+ async def list_code_snippets(language: Optional[str] = None):
728
+ """الحصول على قائمة الأكواد المحفوظة"""
729
+ snippets = list(memory.code_repository.values())
730
+
731
+ if language:
732
+ snippets = [s for s in snippets if s["language"].lower() == language.lower()]
733
+
734
+ return {"snippets": snippets}
735
+
736
+ # 10. نظام النسخ الاحتياطي التلقائي
737
+ async def backup_scheduler():
738
+ while True:
739
+ await asyncio.sleep(3600) # كل ساعة
740
+ try:
741
+ backup_dir = memory.backup_data()
742
+ logger.info(f"تم إنشاء نسخة احتياطية في: {backup_dir}")
743
+ except Exception as e:
744
+ logger.error(f"فشل النسخ الاحتياطي: {str(e)}")
745
 
746
  # 11. واجهة المستخدم
747
  app.mount("/static", StaticFiles(directory="static"), name="static")
 
752
  async def read_root(request: Request):
753
  return templates.TemplateResponse("index.html", {"request": request})
754
 
755
+ @app.get("/chat/{conversation_id}", response_class=HTMLResponse)
756
+ async def chat_interface(request: Request, conversation_id: str):
757
+ if conversation_id not in memory.conversations:
758
+ raise HTTPException(status_code=404, detail="المحادثة غير موجودة")
759
+
760
+ return templates.TemplateResponse("chat.html", {
761
+ "request": request,
762
+ "conversation": memory.conversations[conversation_id]
763
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764
 
765
+ # 12. بدء المهام الجانبية
766
  @app.on_event("startup")
767
  async def startup_event():
768
  asyncio.create_task(backup_scheduler())
769
+
770
  # تحميل النماذج الأساسية مسبقاً
771
+ await engine.load_model("text")
772
+ await engine.load_model("code")
773
+
774
+ logger.info("تم بدء تشغيل MarkAI بنجاح")
775
 
776
+ # 13. تشغيل التطبيق
777
  if __name__ == "__main__":
778
  import uvicorn
779
+ uvicorn.run(app, host="0.0.0.0", port=7860, reload=True)