| import os |
| import json |
| import requests |
| from typing import List, Dict, Any |
|
|
| try: |
| from groq import Groq |
| except ImportError: |
| Groq = None |
|
|
| try: |
| import openai |
| except ImportError: |
| openai = None |
|
|
| |
| try: |
| from server.smart_action_engine import generate_smart_action_plan |
| except ImportError: |
| generate_smart_action_plan = None |
|
|
| def _run_rule_engine(audit_data: dict) -> List[Dict[str, str]]: |
| """ |
| Step 1: Simple Rule Engine - Arabic output (Legacy fallback) |
| """ |
| actions = [] |
| pages = audit_data.get('pages', []) |
| if not pages: |
| return actions |
|
|
| for page in pages: |
| url = page.get('url', '') |
| tags = [h.get('tag', '') for h in page.get('headings', [])] |
| if 'h1' not in tags: |
| actions.append({ |
| "type": "technical", |
| "task": f"أضف وسم H1 إلى الصفحة: {url}", |
| "priority": "high" |
| }) |
| paras = page.get('paragraphs', []) |
| if paras: |
| avg_words = sum(len(str(p).split()) for p in paras) / len(paras) |
| if avg_words < 20: |
| actions.append({ |
| "type": "content", |
| "task": f"توسيع المحتوى الضعيف في {url} (متوسط الكلمات: {int(avg_words)} كلمة/فقرة)", |
| "priority": "medium" |
| }) |
| else: |
| actions.append({ |
| "type": "content", |
| "task": f"أضف فقرات نصية ومحتوى كافياً إلى الصفحة: {url}", |
| "priority": "high" |
| }) |
|
|
| ai_vis = audit_data.get('ai_visibility', {}) |
| if ai_vis and not ai_vis.get('results', []): |
| actions.append({ |
| "type": "authority", |
| "task": "لا يوجد ظهور في الذكاء الاصطناعي! أنشئ فقرة 'من نحن' قوية وانشر بيانات منظمة JSON-LD", |
| "priority": "high" |
| }) |
|
|
| return actions |
|
|
| def _call_ollama(prompt: str, model: str = "mistral") -> str: |
| """Call local Ollama instance.""" |
| try: |
| url = "http://localhost:11434/api/generate" |
| payload = { |
| "model": model, |
| "prompt": prompt, |
| "stream": False, |
| "format": "json" |
| } |
| resp = requests.post(url, json=payload, timeout=30) |
| resp.raise_for_status() |
| return resp.json().get("response", "") |
| except Exception as e: |
| print(f"Ollama error: {e}") |
| return "" |
|
|
| def _call_groq(prompt: str, api_key: str) -> str: |
| if not Groq or not api_key: |
| return "" |
| try: |
| client = Groq(api_key=api_key) |
| completion = client.chat.completions.create( |
| model=os.getenv('GROQ_MODEL', 'llama-3.3-70b-versatile'), |
| messages=[ |
| {'role': 'system', 'content': 'You are an AI Growth Engine. Output valid JSON only.'}, |
| {'role': 'user', 'content': prompt} |
| ], |
| response_format={"type": "json_object"}, |
| temperature=0.2 |
| ) |
| return completion.choices[0].message.content |
| except Exception as e: |
| print(f"Groq logic error: {e}") |
| return "" |
|
|
| def _call_openai(prompt: str, api_key: str) -> str: |
| if not openai or not api_key: |
| return "" |
| try: |
| client = openai.OpenAI(api_key=api_key) |
| completion = client.chat.completions.create( |
| model=os.getenv('OPENAI_MODEL', 'gpt-4o-mini'), |
| messages=[ |
| {'role': 'system', 'content': 'You are an AI Growth Engine. Output valid JSON only.'}, |
| {'role': 'user', 'content': prompt} |
| ], |
| response_format={"type": "json_object"}, |
| temperature=0.2 |
| ) |
| return completion.choices[0].message.content |
| except Exception as e: |
| print(f"OpenAI logic error: {e}") |
| return "" |
|
|
| def generate_action_plan(audit_data: dict, api_keys: dict = None) -> dict: |
| """ |
| Main Action Engine logic - Now uses Smart Action Engine for enhanced recommendations |
| """ |
| api_keys = api_keys or {} |
| |
| |
| if generate_smart_action_plan: |
| try: |
| smart_result = generate_smart_action_plan(audit_data, api_keys) |
| if smart_result.get('ok'): |
| print("✅ Using Smart Action Engine") |
| return smart_result |
| except Exception as e: |
| print(f"⚠️ Smart Action Engine failed: {e}, falling back to legacy engine") |
| |
| |
| print("📋 Using Legacy Action Engine") |
| |
| |
| rule_actions = _run_rule_engine(audit_data) |
| |
| |
| brand = audit_data.get('org_name', 'Unknown') |
| pages = audit_data.get('pages', []) |
| ai_vis = audit_data.get('ai_visibility', {}) |
| |
| page_summary = f"Pages crawled: {len(pages)}. " |
| if pages: |
| titles = [p.get('title') for p in pages[:5] if p.get('title')] |
| page_summary += f"Top pages: {', '.join(titles)}." |
| |
| prompt = f""" |
| حلّل بيانات SEO هذه وأعد خطة عمل باللغة العربية. |
| العلامة التجارية: {brand} |
| {page_summary} |
| حالة الظهور في AI: {'جيد' if ai_vis.get('results') else 'ضعيف / غير موجود'} |
| |
| اكتب أهم 6 إجراءات استراتيجية بالعربية لتحسين SEO والظهور في الذكاء الاصطناعي. |
| |
| الصيغة المطلوبة حصراً: |
| {{ |
| "actions": [ |
| {{ |
| "type": "content|تقني|سلطة|تواصل", |
| "task": "وصف قصير واضح بالعربية", |
| "priority": "high|medium|low" |
| }} |
| ] |
| }} |
| """ |
| |
| |
| raw_ai_response = "" |
| |
| groq_key = api_keys.get('groq') or os.getenv('GROQ_API_KEY') |
| if groq_key: |
| raw_ai_response = _call_groq(prompt, groq_key) |
| |
| |
| if not raw_ai_response: |
| openai_key = api_keys.get('openai') or os.getenv('OPENAI_API_KEY') |
| if openai_key: |
| raw_ai_response = _call_openai(prompt, openai_key) |
| |
| |
| if not raw_ai_response: |
| raw_ai_response = _call_ollama(prompt) |
| |
| |
| ai_actions = [] |
| if raw_ai_response: |
| try: |
| parsed = json.loads(raw_ai_response) |
| if "actions" in parsed: |
| ai_actions = parsed.get("actions", []) |
| except Exception as e: |
| print(f"Failed to parse AI action response: {e}") |
| |
| |
| combined_actions = rule_actions + ai_actions |
| |
| |
| from server import entity_extractor |
| try: |
| job_id = audit_data.get('id') |
| entity_extractor.build_knowledge_graph(job_id, audit_data) |
| except Exception as e: |
| print(f"Entity Graph Error: {e}") |
|
|
| |
| unique_actions = [] |
| seen = set() |
| for act in combined_actions: |
| |
| t = act.get('task', '').strip().lower() |
| if t not in seen and t: |
| seen.add(t) |
| unique_actions.append(act) |
| |
| return { |
| "ok": True, |
| "actions": unique_actions |
| } |
|
|