File size: 6,375 Bytes
a44ef87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import logging
import json
from llm_provider import llm
from tools import (
    structural_beam_analysis, slab_analysis, column_analysis, 
    foundation_analysis, draw_beam_section, generate_beam_dxf,
    retaining_wall_analysis, stair_analysis
)
from rules_engine import check_structural_safety
from database import SessionLocal, ProjectSettings, MemoryEntry

logger = logging.getLogger("agents")

# --- 1. وكيل التصميم ---
class StructuralAgent:
    def __init__(self):
        self.name = "Blue-Struct"
    
    def analyze(self, text, nums):
        results = {}
        if "كمرة" in text or "ميدة" in text:
            res = structural_beam_analysis(nums[0] if nums else 5.0, nums[1] if len(nums)>1 else 2.0)
            if res["success"]:
                best = res["scenarios"][0] 
                safety = check_structural_safety(best['raw_values'])
                results["📊 النتائج"] = f"Best: {best['label']} | Depth: {best['depth_mm']}mm | Cost: {best['cost_egp']} EGP"
                if not safety["is_safe"]: results["⚠️ تحذير"] = "\n".join(safety["warnings"])
                bars = int(best['steel_bars'].split()[0])
                results["image_data"] = draw_beam_section(best['width_mm'], best['depth_mm'], bars)
                results["dxf_data"] = generate_beam_dxf(best['width_mm'], best['depth_mm'], bars)
        if "جدار استنادي" in text or "retaining wall" in text:
            height = nums[0] if nums else 3.0
            soil_load = nums[1] if len(nums)>1 else 20.0
            res = retaining_wall_analysis(height, soil_load)
            if res["success"]:
                results["📐 نتائج الجدار"] = res["results"]["explanation"]
                results["raw_values"] = res["results"]["raw_values"]
        if "سلم" in text or "stair" in text:
            floor_height = nums[0] if nums else 3.0
            res = stair_analysis(floor_height)
            if res["success"]:
                results["📐 نتائج السلم"] = res["results"]["explanation"]
                results["raw_values"] = res["results"]["raw_values"]
        return results

# --- 2. وكيل الرؤية ---
class VisionAgent:
    def __init__(self):
        self.name = "Blue-Vision"
    async def analyze_image(self, image_base64, prompt):
        return await llm.analyze_image(image_base64, prompt)
    async def detect_defects(self, image_base64):
        prompt = """
        Analyze this construction site image as a QC Engineer.
        Identify any visible defects (Honeycombing, Cracks, Exposed Rebar).
        Return JSON: {"defects": [{"type":"...", "severity":"High/Med/Low"}]}
        """
        return await llm.analyze_image(image_base64, prompt)

# --- 3. وكيل التقارير والمنطق (مع تفعيل الذاكرة) ---
class ReasoningAgent:
    def __init__(self):
        self.name = "Blue-Logic"
    
    async def generate_summary(self, data):
        prompt = f"""
        You are an expert engineer. Summarize this data in Arabic.
        Focus on safety and cost. 
        Data: {json.dumps(data)}
        """
        return await llm.generate_text(prompt, model_preference=["deepseek", "gpt-4o", "gemini"])

    async def chat(self, text, history=None, project_id=None):
        # جلب الذاكرة الخاصة بالمشروع
        memory_context = ""
        if project_id:
            try:
                db = SessionLocal()
                memories = db.query(MemoryEntry).filter(
                    MemoryEntry.project_id == project_id,
                    MemoryEntry.entry_type == 'chat'
                ).order_by(MemoryEntry.created_at.desc()).limit(5).all()
                
                if memories:
                    memory_lines = []
                    for m in memories:
                        memory_lines.append(f"{m.created_at.strftime('%Y-%m-%d')}: {m.content[:100]}")
                    memory_context = "ذكريات سابقة من هذا المشروع:\n" + "\n".join(memory_lines) + "\n\n"
                db.close()
            except Exception as e:
                logger.warning(f"Could not fetch memory: {e}")

        full_prompt = memory_context + text
        if history:
            context = "\n".join([f"{m['role']}: {m['content']}" for m in history[-3:]])
            full_prompt = f"Context of conversation:\n{context}\n\n{memory_context}User: {text}\nAssistant:"

        preferred_model = "gemini"
        if project_id:
            try:
                db = SessionLocal()
                settings = db.query(ProjectSettings).filter(ProjectSettings.project_id == project_id).first()
                if settings and settings.preferred_ai_model:
                    preferred_model = settings.preferred_ai_model
                db.close()
            except Exception as e:
                logger.warning(f"Could not fetch project settings: {e}")

        model_preference = [
            preferred_model,
            "mistral-small",
            "mistral-medium",
            "mistral-large",
            "openrouter-llama32-3b",
            "openrouter-gemma3-4b",
            "openrouter-gemma3n-4b",
            "openrouter-gemma3-12b",
            "openrouter-gemma3n-2b",
            "openrouter-zai-glm",
            "gemini",
            "gpt-4o",
            "deepseek",
            "grok"
        ]
        seen = set()
        unique_models = [m for m in model_preference if not (m in seen or seen.add(m))]

        response = await llm.generate_text(full_prompt, model_preference=unique_models)
        
        # حفظ المحادثة في الذاكرة
        if project_id and len(text) > 5:
            try:
                db = SessionLocal()
                memory = MemoryEntry(
                    project_id=project_id,
                    entry_type="chat",
                    title=f"User query: {text[:50]}",
                    content=f"Q: {text}\nA: {response[:200]}",
                    tags="chat,auto"
                )
                db.add(memory)
                db.commit()
                db.close()
            except Exception as e:
                logger.warning(f"Could not save memory: {e}")
        
        return response

# تهيئة الوكلاء
struct_agent = StructuralAgent()
vision_agent = VisionAgent()
reasoning_agent = ReasoningAgent()