AumCoreAI commited on
Commit
efe66b2
·
verified ·
1 Parent(s): 9073727

Update modules/orchestrator.py

Browse files
Files changed (1) hide show
  1. modules/orchestrator.py +139 -175
modules/orchestrator.py CHANGED
@@ -9,240 +9,204 @@ import time
9
  import psutil
10
  import threading
11
  import functools
12
- import importlib.util
13
- from abc import ABC, abstractmethod
14
  from datetime import datetime
15
- from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
16
  from typing import Dict, Any, List, Optional, Union, Callable
17
  from dataclasses import dataclass, field
18
  from fastapi import APIRouter, Form, Request
19
 
 
 
 
 
 
20
  # =================================================================
21
- # 1. ENTERPRISE CONFIGURATION & ARCHITECTURE CONSTANTS
22
  # =================================================================
23
  @dataclass
24
  class MasterConfig:
25
- version: str = "3.5.0-Enterprise"
26
  max_workers: int = os.cpu_count() or 4
27
- timeout: int = 120
28
  memory_limit_mb: int = 1024
29
- db_path: str = "data/system_state.json"
30
- log_file: str = "logs/orchestrator.log"
31
- diagnostics_log: str = "logs/diagnostics.log"
32
- enable_telemetry: bool = True
33
- username: str = "AumCore AI"
34
  repo_name: str = "aumcore-m7b-docker"
 
 
35
 
36
  # =================================================================
37
- # 2. ADVANCED LOGGING & AUDIT SYSTEM
38
  # =================================================================
39
- class AumAuditLogger:
40
- """Professional Audit System with multi-sink capability"""
41
- def __init__(self, name: str, log_file: str):
42
  if not os.path.exists('logs'): os.makedirs('logs')
43
- self.logger = logging.getLogger(name)
44
  self.logger.setLevel(logging.DEBUG)
45
-
46
- # Prevent duplicate handlers
47
  if not self.logger.handlers:
48
- fmt = logging.Formatter('%(asctime)s | [%(levelname)s] | %(name)s | %(message)s')
49
-
50
  fh = logging.FileHandler(log_file)
51
- fh.setFormatter(fmt)
52
  self.logger.addHandler(fh)
53
-
54
  ch = logging.StreamHandler()
55
- ch.setFormatter(fmt)
56
  self.logger.addHandler(ch)
57
 
58
- def info(self, msg): self.logger.info(msg)
59
- def error(self, msg): self.logger.error(msg)
60
- def warn(self, msg): self.logger.warning(msg)
61
- def critical(self, msg): self.logger.critical(msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  # =================================================================
64
- # 3. AI PIPELINE: THE BRAIN OF THE ORCHESTRATOR
65
  # =================================================================
66
- class AIPipelineManager:
67
- """Manages the flow between Intelligence, Reviewer, and Formatter"""
68
- def __init__(self, client, config: MasterConfig, logger: AumAuditLogger):
69
  self.client = client
70
- self.config = config
71
  self.logger = logger
 
72
 
73
- async def run_professional_workflow(self, message: str) -> str:
74
- """Sequential Execution of AI Modules"""
75
- req_id = f"AI-{uuid.uuid4().hex[:6]}"
76
- self.logger.info(f"[{req_id}] Initiating Professional Coding Workflow")
 
 
 
 
 
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  try:
79
- # Step 1: Logic Analysis (Calling Code Intelligence)
80
- logic_data = await self._invoke_module_feature("code_intelligence", "analyze_logic", message)
81
-
82
- # Step 2: Expert Persona Generation
83
- raw_response = await self._generate_with_expert_persona(logic_data or message)
84
-
85
- # Step 3: Code Quality Review
86
- reviewed_code = await self._invoke_module_feature("code_reviewer", "review_code", raw_response)
87
- final_code = reviewed_code if reviewed_code else raw_response
88
-
89
- # Step 4: Formatting & Beautification
90
- formatted_res = await self._invoke_module_feature("code_formatter", "format_response", final_code)
91
-
92
- self.logger.info(f"[{req_id}] Workflow completed successfully.")
93
- return formatted_res if formatted_res else final_code
94
-
95
  except Exception as e:
96
- self.logger.error(f"[{req_id}] Workflow Crash: {str(e)}")
97
- return f"❌ Pipeline Error: {str(e)}"
98
 
99
- async def _invoke_module_feature(self, mod_name: str, method_name: str, data: Any):
100
- """Safely call a module if it exists in sys.modules"""
101
  module = sys.modules.get(mod_name)
102
- if module and hasattr(module, method_name):
103
  try:
104
- self.logger.info(f"Calling {mod_name}.{method_name}")
105
- func = getattr(module, method_name)
106
- if asyncio.iscoroutinefunction(func):
107
- return await func(data)
108
- return func(data)
109
  except Exception as e:
110
- self.logger.warn(f"Failed to execute {mod_name}: {e}")
111
  return None
112
 
113
- async def _generate_with_expert_persona(self, prompt: str) -> str:
114
- """Calls Groq with the Senior Developer System Prompt"""
115
- if not self.client:
116
- return "Groq Client Not Available"
117
-
118
- system_prompt = (
119
- f"You are the Core AI Engine of {self.config.username}. "
120
- "Your task is to provide TOP-TIER PROFESSIONAL CODING. "
121
- "Rules: 1. Always use PEP 8. 2. Include robust error handling. "
122
- "3. Use type hints. 4. Output MUST be in beautiful Markdown with language tags."
123
- )
124
-
125
- try:
126
- loop = asyncio.get_event_loop()
127
- response = await loop.run_in_executor(
128
- None,
129
- lambda: self.client.chat.completions.create(
130
- model="llama-3.3-70b-versatile",
131
- messages=[
132
- {"role": "system", "content": system_prompt},
133
- {"role": "user", "content": prompt}
134
- ],
135
- temperature=0.1,
136
- max_tokens=2048
137
- )
138
- )
139
- return response.choices[0].message.content
140
- except Exception as e:
141
- raise Exception(f"LLM Generation Failed: {str(e)}")
142
-
143
  # =================================================================
144
- # 4. RESOURCE & DIAGNOSTICS ENGINE
145
  # =================================================================
146
- class AumDiagnostics:
147
- def __init__(self, config: MasterConfig, logger: AumAuditLogger):
148
- self.config = config
149
- self.logger = logger
150
-
151
- async def get_system_report(self) -> Dict:
152
- """Generates a deep-dive system health report"""
153
  vmem = psutil.virtual_memory()
154
- cpu = psutil.cpu_percent(interval=0.5)
155
-
156
- report = {
157
- "timestamp": datetime.now().isoformat(),
158
- "os": sys.platform,
159
- "python_version": sys.version,
160
- "resources": {
161
- "cpu_usage": f"{cpu}%",
162
- "memory_used": f"{vmem.percent}%",
163
- "memory_available_gb": round(vmem.available / (1024**3), 2)
164
- },
165
- "active_modules": [m for m in sys.modules if m.startswith('code_') or m == 'orchestrator'],
166
- "status": "EXCELLENT" if vmem.percent < 80 else "WARNING"
167
  }
168
- return report
169
 
170
  # =================================================================
171
- # 5. THE MASTER ORCHESTRATOR CLASS
172
  # =================================================================
173
  class AumCoreMaster:
174
- """The High-Level Controller that integrates everything"""
175
- def __init__(self, client=None):
176
  self.config = MasterConfig()
177
- self.logger = AumAuditLogger("Master", self.config.log_file)
178
- self.diagnostics = AumDiagnostics(self.config, self.logger)
179
- self.pipeline = AIPipelineManager(client, self.config, self.logger)
180
- self.is_active = True
181
-
182
- # Graceful shutdown handling
183
- signal.signal(signal.SIGINT, self._shutdown_handler)
184
-
185
- def _shutdown_handler(self, sig, frame):
186
- self.logger.warn("Shutdown signal received. Finalizing tasks...")
187
- self.is_active = False
188
-
189
- async def process_professional_request(self, message: str) -> str:
190
- """Entry point for app.py chat logic"""
191
- # Feature detection: Is this a coding task?
192
- coding_triggers = ["code", "python", "script", "function", "fix error", "build"]
193
- is_coding = any(t in message.lower() for t in coding_triggers)
194
 
195
- if is_coding:
196
- return await self.pipeline.run_professional_workflow(message)
197
-
198
- # Fallback to standard logic if not coding
199
- return "Not a coding request. Handled by fallback."
200
 
201
  # =================================================================
202
- # 6. FASTAPI MODULE REGISTRATION (INTERFACE)
203
  # =================================================================
204
  def register_module(app, client, username):
205
- """
206
- NEVER CHANGE THIS: This connects Orchestrator to app.py
207
- """
208
  master = AumCoreMaster(client)
209
- router = APIRouter(prefix="/system", tags=["Core Orchestration"])
210
-
211
- @router.get("/status")
212
- async def get_status():
213
- report = await master.diagnostics.get_system_report()
214
- return {"success": True, "report": report}
215
 
216
  @router.post("/orchestrate")
217
- async def orchestrate_task(message: str = Form(...)):
218
- """The primary endpoint for professional AI logic"""
219
- response = await master.process_professional_request(message)
220
  return {"response": response}
221
 
222
- @router.get("/health/deep")
223
- async def deep_health():
224
- return {
225
- "status": "online",
226
- "uptime": "active",
227
- "module_version": master.config.version,
228
- "repo": master.config.repo_name
229
- }
230
 
231
- # Dynamic Registration
232
  app.include_router(router)
233
-
234
- # Store master in app state for access if needed
235
  app.state.orchestrator = master
236
-
237
- print("=" * 50)
238
- print(f"🚀 {master.config.version} DEPLOYED")
239
- print(f"✅ Professional Coding Pipeline: ACTIVE")
240
- print(f"✅ System Diagnostics: ONLINE")
241
- print("=" * 50)
242
 
243
- # =================================================================
244
- # 7. STANDALONE BOOTSTRAP (For testing)
245
- # =================================================================
 
 
 
246
  if __name__ == "__main__":
247
- print("Starting AumCore Orchestrator in standalone mode...")
248
- # Standalone testing logic here
 
9
  import psutil
10
  import threading
11
  import functools
12
+ import re
 
13
  from datetime import datetime
 
14
  from typing import Dict, Any, List, Optional, Union, Callable
15
  from dataclasses import dataclass, field
16
  from fastapi import APIRouter, Form, Request
17
 
18
+ # Pygments for Colorful Coding
19
+ from pygments import highlight
20
+ from pygments.lexers import get_lexer_by_name, PythonLexer
21
+ from pygments.formatters import HtmlFormatter
22
+
23
  # =================================================================
24
+ # 1. ADVANCED ENTERPRISE CONFIGURATION
25
  # =================================================================
26
  @dataclass
27
  class MasterConfig:
28
+ version: str = "4.0.0-Titan"
29
  max_workers: int = os.cpu_count() or 4
30
+ timeout: int = 150
31
  memory_limit_mb: int = 1024
32
+ log_file: str = "logs/orchestrator_titan.log"
33
+ username: str = "AumCoreAI"
 
 
 
34
  repo_name: str = "aumcore-m7b-docker"
35
+ # Styling for Colorful Output
36
+ code_style: str = "monokai"
37
 
38
  # =================================================================
39
+ # 2. PROFESSIONAL AUDIT & LOGGING SYSTEM
40
  # =================================================================
41
+ class AumAuditSystem:
42
+ def __init__(self, log_file: str):
 
43
  if not os.path.exists('logs'): os.makedirs('logs')
44
+ self.logger = logging.getLogger("TitanOrchestrator")
45
  self.logger.setLevel(logging.DEBUG)
 
 
46
  if not self.logger.handlers:
47
+ formatter = logging.Formatter('%(asctime)s | [%(levelname)s] | %(message)s')
 
48
  fh = logging.FileHandler(log_file)
49
+ fh.setFormatter(formatter)
50
  self.logger.addHandler(fh)
 
51
  ch = logging.StreamHandler()
52
+ ch.setFormatter(formatter)
53
  self.logger.addHandler(ch)
54
 
55
+ def log_event(self, level: str, msg: str):
56
+ if level == "info": self.logger.info(msg)
57
+ elif level == "error": self.logger.error(msg)
58
+ elif level == "warn": self.logger.warning(msg)
59
+
60
+ # =================================================================
61
+ # 3. COLORFUL FORMATTING ENGINE (PYGMENTS INTEGRATION)
62
+ # =================================================================
63
+ class ColorfulFormatter:
64
+ """The Engine that kills Black & White responses"""
65
+ def __init__(self, style: str):
66
+ self.formatter = HtmlFormatter(style=style, noclasses=True, nowrap=True)
67
+ self.style_defs = HtmlFormatter(style=style).get_style_defs('.highlight')
68
+
69
+ def inject_colors(self, text: str) -> str:
70
+ """Finds markdown blocks and injects colorful HTML"""
71
+ # Pattern to match ```lang \n code \n ```
72
+ pattern = r"```(\w+)\n(.*?)\n```"
73
+
74
+ def replacer(match):
75
+ lang = match.group(1)
76
+ code = match.group(2)
77
+ try:
78
+ lexer = get_lexer_by_name(lang, stripall=True)
79
+ except:
80
+ lexer = PythonLexer(stripall=True)
81
+
82
+ colored_code = highlight(code, lexer, self.formatter)
83
+ return (
84
+ f'<div style="background-color: #272822; color: #f8f8f2; border-radius: 10px; '
85
+ f'padding: 15px; margin: 10px 0; font-family: Consolas, monospace; overflow-x: auto; '
86
+ f'border: 1px solid #444;">'
87
+ f'<div style="color: #66d9ef; margin-bottom: 5px; font-size: 0.8em; '
88
+ f'text-transform: uppercase;">{lang} Editor</div>'
89
+ f'<pre style="margin: 0;">{colored_code}</pre></div>'
90
+ )
91
+
92
+ return re.sub(pattern, replacer, text, flags=re.DOTALL)
93
 
94
  # =================================================================
95
+ # 4. INTELLIGENT PIPELINE MANAGER
96
  # =================================================================
97
+ class PipelineManager:
98
+ def __init__(self, client, logger: AumAuditSystem):
 
99
  self.client = client
 
100
  self.logger = logger
101
+ self.formatter = ColorfulFormatter("monokai")
102
 
103
+ async def execute_workflow(self, message: str) -> str:
104
+ """Main AI Flow: Reason -> Generate -> Review -> Colorize"""
105
+ self.logger.log_event("info", f"Processing Request: {message[:50]}...")
106
+
107
+ # 1. Logic Expansion (Call Intelligence if available)
108
+ enhanced_prompt = await self._call_module("code_intelligence", "analyze_logic", message)
109
+
110
+ # 2. Master Generation
111
+ raw_ai_response = await self._generate_core_ai(enhanced_prompt or message)
112
 
113
+ # 3. Code Review (Call Reviewer if available)
114
+ reviewed_response = await self._call_module("code_reviewer", "review_code", raw_ai_response)
115
+
116
+ # 4. Final Formatting (Injection of Colors)
117
+ final_output = self.formatter.inject_colors(reviewed_response or raw_ai_response)
118
+
119
+ return final_output
120
+
121
+ async def _generate_core_ai(self, prompt: str) -> str:
122
+ system_instructions = (
123
+ "You are the Titan-Class AI Architect of AumCore. "
124
+ "You write PRODUCTION-READY code with PEP 8 standards. "
125
+ "Always wrap code in ```python blocks. Never use simple text for logic. "
126
+ "Think step-by-step and provide clean, modular, and well-documented scripts."
127
+ )
128
  try:
129
+ # Use executor for blocking Groq call
130
+ loop = asyncio.get_event_loop()
131
+ res = await loop.run_in_executor(None, lambda: self.client.chat.completions.create(
132
+ model="llama-3.3-70b-versatile",
133
+ messages=[{"role": "system", "content": system_instructions}, {"role": "user", "content": prompt}],
134
+ temperature=0.3,
135
+ max_tokens=3000
136
+ ))
137
+ return res.choices[0].message.content
 
 
 
 
 
 
 
138
  except Exception as e:
139
+ self.logger.log_event("error", f"LLM Crash: {e}")
140
+ return f"❌ System Error in Generation: {str(e)}"
141
 
142
+ async def _call_module(self, mod_name: str, method: str, data: str):
 
143
  module = sys.modules.get(mod_name)
144
+ if module and hasattr(module, method):
145
  try:
146
+ func = getattr(module, method)
147
+ return await func(data) if asyncio.iscoroutinefunction(func) else func(data)
 
 
 
148
  except Exception as e:
149
+ self.logger.log_event("warn", f"Module {mod_name} failed: {e}")
150
  return None
151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  # =================================================================
153
+ # 5. DIAGNOSTICS & SYSTEM MONITOR
154
  # =================================================================
155
+ class TitanDiagnostics:
156
+ @staticmethod
157
+ async def run_check():
 
 
 
 
158
  vmem = psutil.virtual_memory()
159
+ return {
160
+ "status": "Healthy",
161
+ "cpu_load": f"{psutil.cpu_percent()}%",
162
+ "memory": f"{vmem.percent}% used",
163
+ "uptime": f"{round(time.time() - psutil.boot_time())}s"
 
 
 
 
 
 
 
 
164
  }
 
165
 
166
  # =================================================================
167
+ # 6. MASTER ORCHESTRATOR INTERFACE
168
  # =================================================================
169
  class AumCoreMaster:
170
+ def __init__(self, client):
 
171
  self.config = MasterConfig()
172
+ self.audit = AumAuditSystem(self.config.log_file)
173
+ self.pipeline = PipelineManager(client, self.audit)
174
+ self.is_running = True
175
+
176
+ async def handle_request(self, message: str) -> str:
177
+ # Check for code intent
178
+ coding_triggers = ["code", "python", "script", "build", "debug", "function"]
179
+ if any(word in message.lower() for word in coding_triggers):
180
+ return await self.pipeline.execute_workflow(message)
 
 
 
 
 
 
 
 
181
 
182
+ # Standard Chat with basic color injection
183
+ raw_res = await self.pipeline._generate_core_ai(message)
184
+ return self.pipeline.formatter.inject_colors(raw_res)
 
 
185
 
186
  # =================================================================
187
+ # 7. FASTAPI REGISTRATION (THE BRIDGE)
188
  # =================================================================
189
  def register_module(app, client, username):
 
 
 
190
  master = AumCoreMaster(client)
191
+ router = APIRouter(prefix="/system")
 
 
 
 
 
192
 
193
  @router.post("/orchestrate")
194
+ async def process_task(message: str = Form(...)):
195
+ response = await master.handle_request(message)
 
196
  return {"response": response}
197
 
198
+ @router.get("/titan/health")
199
+ async def health():
200
+ return await TitanDiagnostics.run_check()
 
 
 
 
 
201
 
 
202
  app.include_router(router)
 
 
203
  app.state.orchestrator = master
 
 
 
 
 
 
204
 
205
+ print("\n" + "="*60)
206
+ print(f"🚀 TITAN ORCHESTRATOR v{master.config.version} ONLINE")
207
+ print(f"✅ Colorful Injection Engine: READY")
208
+ print(f"✅ Expert Coding Pipeline: ACTIVE")
209
+ print("="*60 + "\n")
210
+
211
  if __name__ == "__main__":
212
+ print("Orchestrator testing mode...")