hectorruiz9 commited on
Commit
c329059
·
verified ·
1 Parent(s): 3123cd9

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +60 -603
main.py CHANGED
@@ -1,603 +1,60 @@
1
- import json
2
- import asyncio
3
- import aiohttp
4
- import hashlib
5
- import base64
6
- import time
7
- import random
8
- import logging
9
- from typing import Dict, List, Optional, Any, Callable
10
- from dataclasses import dataclass, field
11
- from enum import Enum
12
- from functools import wraps
13
- from datetime import datetime
14
- import traceback
15
- import flet as ft
16
-
17
- # ============================================
18
- # CONFIGURACIÓN DE LOGGING SEGURO
19
- # ============================================
20
- class SecureFormatter(logging.Formatter):
21
- """Formatter que ofusca información sensible en logs"""
22
-
23
- SENSITIVE_PATTERNS = ['api_key', 'token', 'secret', 'password', 'credential']
24
-
25
- def format(self, record):
26
- msg = super().format(record)
27
- for pattern in self.SENSITIVE_PATTERNS:
28
- if pattern in msg.lower():
29
- # Ofuscar valores sensibles
30
- msg = self._obfuscate_sensitive(msg, pattern)
31
- return msg
32
-
33
- def _obfuscate_sensitive(self, text: str, pattern: str) -> str:
34
- import re
35
- # Reemplaza valores después de patrones sensibles
36
- return re.sub(
37
- rf'({pattern}["\s:=]+)([^\s,"}}]+)',
38
- rf'\1[REDACTED]',
39
- text,
40
- flags=re.IGNORECASE
41
- )
42
-
43
- logging.setLoggerClass(logging.Logger)
44
- logger = logging.getLogger("HectronSystem")
45
- logger.setLevel(logging.INFO)
46
-
47
- handler = logging.StreamHandler()
48
- handler.setFormatter(SecureFormatter(
49
- '%(asctime)s | %(levelname)s | %(name)s | %(message)s'
50
- ))
51
- logger.addHandler(handler)
52
-
53
-
54
- # ============================================
55
- # ENUMS Y TIPOS
56
- # ============================================
57
- class ModelProvider(Enum):
58
- OLLAMA = "ollama"
59
- OPENAI = "openai"
60
- ANTHROPIC = "anthropic"
61
- GROQ = "groq"
62
- LOCAL = "local"
63
-
64
-
65
- class AgentStatus(Enum):
66
- IDLE = "idle"
67
- PROCESSING = "processing"
68
- ERROR = "error"
69
- RECOVERING = "recovering"
70
-
71
-
72
- # ============================================
73
- # DATA CLASSES
74
- # ============================================
75
- @dataclass
76
- class APIConfig:
77
- """Configuración para un proveedor de API"""
78
- provider: ModelProvider
79
- base_url: str
80
- api_key: Optional[str] = None
81
- model: str = ""
82
- max_tokens: int = 4096
83
- temperature: float = 0.7
84
- timeout: int = 60
85
- max_retries: int = 3
86
- retry_delay: float = 1.0
87
- enabled: bool = True
88
- priority: int = 0 # Menor = mayor prioridad
89
-
90
-
91
- @dataclass
92
- class TraceRecord:
93
- """Registro de traza ofuscado"""
94
- trace_id: str
95
- agent_key: str
96
- timestamp: float
97
- input_hash: str
98
- output_hash: str
99
- provider: str
100
- latency_ms: float
101
- success: bool
102
- error_type: Optional[str] = None
103
-
104
- def to_secure_dict(self) -> dict:
105
- """Convierte a diccionario seguro para almacenamiento"""
106
- return {
107
- "trace_id": self.trace_id,
108
- "agent": self._hash_agent(self.agent_key),
109
- "ts": self.timestamp,
110
- "in_h": self.input_hash[:16],
111
- "out_h": self.output_hash[:16] if self.output_hash else None,
112
- "prov": self.provider,
113
- "lat": self.latency_ms,
114
- "ok": self.success,
115
- "err": self.error_type
116
- }
117
-
118
- @staticmethod
119
- def _hash_agent(agent: str) -> str:
120
- return hashlib.sha256(agent.encode()).hexdigest()[:12]
121
-
122
-
123
- @dataclass
124
- class AgentConfig:
125
- """Configuración de un agente"""
126
- role: str
127
- objective: str
128
- instructions: str
129
- preferred_provider: Optional[ModelProvider] = None
130
- fallback_providers: List[ModelProvider] = field(default_factory=list)
131
- max_context_tokens: int = 8192
132
-
133
-
134
- # ============================================
135
- # DECORADOR ANTI-ERROR
136
- # ============================================
137
- def anti_error(
138
- max_retries: int = 3,
139
- base_delay: float = 1.0,
140
- exponential_backoff: bool = True,
141
- jitter: bool = True,
142
- fallback_value: Any = None
143
- ):
144
- """
145
- Decorador de refuerzo anti-error con:
146
- - Reintentos con backoff exponencial
147
- - Jitter para evitar thundering herd
148
- - Valor de fallback configurable
149
- """
150
- def decorator(func: Callable):
151
- @wraps(func)
152
- async def async_wrapper(*args, **kwargs):
153
- last_exception = None
154
-
155
- for attempt in range(max_retries):
156
- try:
157
- return await func(*args, **kwargs)
158
- except Exception as e:
159
- last_exception = e
160
-
161
- # Calcular delay
162
- if exponential_backoff:
163
- delay = base_delay * (2 ** attempt)
164
- else:
165
- delay = base_delay
166
-
167
- # Añadir jitter
168
- if jitter:
169
- delay *= (0.5 + random.random())
170
-
171
- logger.warning(
172
- f"Intento {attempt + 1}/{max_retries} fallido: {type(e).__name__}. "
173
- f"Reintentando en {delay:.2f}s"
174
- )
175
-
176
- if attempt < max_retries - 1:
177
- await asyncio.sleep(delay)
178
-
179
- # Todos los reintentos fallaron
180
- logger.error(
181
- f"Todos los reintentos agotados para {func.__name__}: {last_exception}"
182
- )
183
-
184
- if fallback_value is not None:
185
- return fallback_value
186
-
187
- raise last_exception
188
-
189
- @wraps(func)
190
- def sync_wrapper(*args, **kwargs):
191
- last_exception = None
192
-
193
- for attempt in range(max_retries):
194
- try:
195
- return func(*args, **kwargs)
196
- except Exception as e:
197
- last_exception = e
198
-
199
- if exponential_backoff:
200
- delay = base_delay * (2 ** attempt)
201
- else:
202
- delay = base_delay
203
-
204
- if jitter:
205
- delay *= (0.5 + random.random())
206
-
207
- logger.warning(
208
- f"Intento {attempt + 1}/{max_retries} fallido: {type(e).__name__}"
209
- )
210
-
211
- if attempt < max_retries - 1:
212
- time.sleep(delay)
213
-
214
- if fallback_value is not None:
215
- return fallback_value
216
-
217
- raise last_exception
218
-
219
- import asyncio
220
- if asyncio.iscoroutinefunction(func):
221
- return async_wrapper
222
- return sync_wrapper
223
-
224
- return decorator
225
-
226
-
227
- # ============================================
228
- # GESTOR DE TRAZAS SEGURO
229
- # ============================================
230
- class SecureTracer:
231
- """
232
- Sistema de rastreo a prueba de filtrado:
233
- - Hash de inputs/outputs
234
- - IDs de traza aleatorios
235
- - Sin almacenamiento de contenido sensible
236
- - Rotación automática de registros
237
- """
238
-
239
- def __init__(self, max_records: int = 10000, rotation_interval: int = 3600):
240
- self.traces: List[TraceRecord] = []
241
- self.max_records = max_records
242
- self.rotation_interval = rotation_interval
243
- self._last_rotation = time.time()
244
- self._salt = self._generate_salt()
245
-
246
- def _generate_salt(self) -> str:
247
- """Genera salt aleatorio para hashing"""
248
- return hashlib.sha256(
249
- f"{time.time()}{random.random()}".encode()
250
- ).hexdigest()[:32]
251
-
252
- def _hash_content(self, content: str) -> str:
253
- """Hashea contenido de forma segura"""
254
- if not content:
255
- return ""
256
- return hashlib.sha256(
257
- f"{self._salt}{content}".encode()
258
- ).hexdigest()
259
-
260
- def _generate_trace_id(self) -> str:
261
- """Genera ID de traza único y no rastreable"""
262
- return hashlib.sha256(
263
- f"{time.time()}{random.random()}{id(self)}".encode()
264
- ).hexdigest()[:24]
265
-
266
- def start_trace(self, agent_key: str, input_content: str) -> str:
267
- """Inicia una nueva traza"""
268
- self._check_rotation()
269
-
270
- trace_id = self._generate_trace_id()
271
- trace = TraceRecord(
272
- trace_id=trace_id,
273
- agent_key=agent_key,
274
- timestamp=time.time(),
275
- input_hash=self._hash_content(input_content),
276
- output_hash="",
277
- provider="",
278
- latency_ms=0,
279
- success=False
280
- )
281
- self.traces.append(trace)
282
- return trace_id
283
-
284
- def end_trace(
285
- self,
286
- trace_id: str,
287
- output_content: str,
288
- provider: str,
289
- success: bool,
290
- error_type: Optional[str] = None
291
- ):
292
- """Finaliza una traza"""
293
- for trace in self.traces:
294
- if trace.trace_id == trace_id:
295
- trace.output_hash = self._hash_content(output_content)
296
- trace.provider = provider
297
- trace.latency_ms = (time.time() - trace.timestamp) * 1000
298
- trace.success = success
299
- trace.error_type = error_type
300
- break
301
-
302
- def _check_rotation(self):
303
- """Verifica si es necesario rotar los registros"""
304
- current_time = time.time()
305
-
306
- if (current_time - self._last_rotation > self.rotation_interval or
307
- len(self.traces) >= self.max_records):
308
- self._rotate()
309
-
310
- def _rotate(self):
311
- """Rota los registros y regenera salt"""
312
- # Guardar registros antiguos de forma segura
313
- old_traces = self.traces[:len(self.traces)//2]
314
- self.traces = self.traces[len(self.traces)//2:]
315
-
316
- # Regenerar salt para invalidar hashes anteriores
317
- self._salt = self._generate_salt()
318
- self._last_rotation = time.time()
319
-
320
- logger.info(f"Rotación de trazas completada. {len(old_traces)} registros archivados.")
321
-
322
- def get_stats(self) -> dict:
323
- """Obtiene estadísticas agregadas (sin datos sensibles)"""
324
- if not self.traces:
325
- return {"total": 0, "success_rate": 0, "avg_latency_ms": 0}
326
-
327
- successful = sum(1 for t in self.traces if t.success)
328
- avg_latency = sum(t.latency_ms for t in self.traces) / len(self.traces)
329
-
330
- provider_counts = {}
331
- for t in self.traces:
332
- provider_counts[t.provider] = provider_counts.get(t.provider, 0) + 1
333
-
334
- return {
335
- "total": len(self.traces),
336
- "success_rate": successful / len(self.traces),
337
- "avg_latency_ms": avg_latency,
338
- "by_provider": provider_counts
339
- }
340
-
341
-
342
- # ============================================
343
- # CLIENTE OLLAMA
344
- # ============================================
345
- class OllamaClient:
346
- """Cliente para modelos locales Ollama"""
347
-
348
- def __init__(self, base_url: str = "http://localhost:11434"):
349
- self.base_url = base_url.rstrip('/')
350
- self.session: Optional[aiohttp.ClientSession] = None
351
-
352
- async def _ensure_session(self):
353
- if self.session is None or self.session.closed:
354
- self.session = aiohttp.ClientSession()
355
-
356
- @anti_error(max_retries=3, base_delay=2.0)
357
- async def generate(
358
- self,
359
- model: str,
360
- prompt: str,
361
- system: Optional[str] = None,
362
- temperature: float = 0.7,
363
- max_tokens: int = 4096
364
- ) -> str:
365
- """Genera respuesta usando Ollama"""
366
- await self._ensure_session()
367
-
368
- payload = {
369
- "model": model,
370
- "prompt": prompt,
371
- "stream": False,
372
- "options": {
373
- "temperature": temperature,
374
- "num_predict": max_tokens
375
- }
376
- }
377
-
378
- if system:
379
- payload["system"] = system
380
-
381
- async with self.session.post(
382
- f"{self.base_url}/api/generate",
383
- json=payload,
384
- timeout=aiohttp.ClientTimeout(total=120)
385
- ) as response:
386
- response.raise_for_status()
387
- data = await response.json()
388
- return data.get("response", "")
389
-
390
- @anti_error(max_retries=2, base_delay=1.0)
391
- async def list_models(self) -> List[str]:
392
- """Lista modelos disponibles"""
393
- await self._ensure_session()
394
-
395
- async with self.session.get(
396
- f"{self.base_url}/api/tags",
397
- timeout=aiohttp.ClientTimeout(total=30)
398
- ) as response:
399
- response.raise_for_status()
400
- data = await response.json()
401
- return [m["name"] for m in data.get("models", [])]
402
-
403
- async def close(self):
404
- if self.session and not self.session.closed:
405
- await self.session.close()
406
-
407
-
408
- # ============================================
409
- # CLIENTE MULTI-API
410
- # ============================================
411
- class MultiAPIClient:
412
- """
413
- Cliente que gestiona múltiples proveedores de API con:
414
- - Failover automático
415
- - Balanceo de carga por prioridad
416
- - Rate limiting adaptativo
417
- - Circuit breaker
418
- """
419
-
420
- def __init__(self):
421
- self.providers: Dict[ModelProvider, APIConfig] = {}
422
- self.ollama_client: Optional[OllamaClient] = None
423
- self.session: Optional[aiohttp.ClientSession] = None
424
- self._circuit_breakers: Dict[ModelProvider, dict] = {}
425
- self._rate_limits: Dict[ModelProvider, list] = {}
426
-
427
- def register_provider(self, config: APIConfig):
428
- """Registra un proveedor de API"""
429
- self.providers[config.provider] = config
430
- self._circuit_breakers[config.provider] = {
431
- "failures": 0,
432
- "last_failure": 0,
433
- "state": "closed", # closed, open, half-open
434
- "threshold": 5,
435
- "reset_timeout": 60
436
- }
437
- self._rate_limits[config.provider] = []
438
-
439
- if config.provider == ModelProvider.OLLAMA:
440
- self.ollama_client = OllamaClient(config.base_url)
441
-
442
- logger.info(f"Proveedor registrado: {config.provider.value}")
443
-
444
- async def _ensure_session(self):
445
- if self.session is None or self.session.closed:
446
- self.session = aiohttp.ClientSession()
447
-
448
- def _check_circuit_breaker(self, provider: ModelProvider) -> bool:
449
- """Verifica el estado del circuit breaker"""
450
- cb = self._circuit_breakers[provider]
451
- current_time = time.time()
452
-
453
- if cb["state"] == "open":
454
- if current_time - cb["last_failure"] > cb["reset_timeout"]:
455
- cb["state"] = "half-open"
456
- logger.info(f"Circuit breaker para {provider.value} en estado half-open")
457
- return True
458
- return False
459
-
460
- return True
461
-
462
- def _record_success(self, provider: ModelProvider):
463
- """Registra un éxito (resetea el circuit breaker)"""
464
- cb = self._circuit_breakers[provider]
465
- cb["failures"] = 0
466
- cb["state"] = "closed"
467
-
468
- def _record_failure(self, provider: ModelProvider):
469
- """Registra un fallo"""
470
- cb = self._circuit_breakers[provider]
471
- cb["failures"] += 1
472
- cb["last_failure"] = time.time()
473
-
474
- if cb["failures"] >= cb["threshold"]:
475
- cb["state"] = "open"
476
- logger.warning(f"Circuit breaker ABIERTO para {provider.value}")
477
-
478
- def _check_rate_limit(self, provider: ModelProvider, rpm: int = 60) -> bool:
479
- """Verifica rate limiting"""
480
- current_time = time.time()
481
- requests = self._rate_limits[provider]
482
-
483
- # Limpiar requests antiguos
484
- requests[:] = [t for t in requests if current_time - t < 60]
485
-
486
- if len(requests) >= rpm:
487
- return False
488
-
489
- requests.append(current_time)
490
- return True
491
-
492
- def get_available_providers(self, preferred: Optional[ModelProvider] = None) -> List[ModelProvider]:
493
- """Obtiene proveedores disponibles ordenados por prioridad"""
494
- available = []
495
-
496
- for provider, config in self.providers.items():
497
- if config.enabled and self._check_circuit_breaker(provider):
498
- available.append(provider)
499
-
500
- # Ordenar por prioridad, poniendo el preferido primero
501
- if preferred and preferred in available:
502
- available.remove(preferred)
503
- available.insert(0, preferred)
504
-
505
- return sorted(available, key=lambda p: self.providers[p].priority)
506
-
507
- @anti_error(max_retries=3, base_delay=1.5, exponential_backoff=True)
508
- async def generate(
509
- self,
510
- prompt: str,
511
- system: Optional[str] = None,
512
- preferred_provider: Optional[ModelProvider] = None,
513
- fallback_providers: Optional[List[ModelProvider]] = None,
514
- temperature: float = 0.7,
515
- max_tokens: int = 4096
516
- ) -> tuple[str, ModelProvider]:
517
- """
518
- Genera respuesta usando el mejor proveedor disponible.
519
- Retorna (respuesta, proveedor_usado)
520
- """
521
- await self._ensure_session()
522
-
523
- # Construir lista de proveedores a intentar
524
- providers_to_try = []
525
-
526
- if preferred_provider and preferred_provider in self.providers:
527
- providers_to_try.append(preferred_provider)
528
-
529
- if fallback_providers:
530
- for p in fallback_providers:
531
- if p in self.providers and p not in providers_to_try:
532
- providers_to_try.append(p)
533
-
534
- # Añadir resto de proveedores disponibles
535
- for p in self.get_available_providers():
536
- if p not in providers_to_try:
537
- providers_to_try.append(p)
538
-
539
- last_error = None
540
-
541
- for provider in providers_to_try:
542
- if not self._check_circuit_breaker(provider):
543
- continue
544
-
545
- config = self.providers[provider]
546
-
547
- try:
548
- if provider == ModelProvider.OLLAMA:
549
- response = await self._call_ollama(
550
- config, prompt, system, temperature, max_tokens
551
- )
552
- elif provider == ModelProvider.OPENAI:
553
- response = await self._call_openai(
554
- config, prompt, system, temperature, max_tokens
555
- )
556
- elif provider == ModelProvider.ANTHROPIC:
557
- response = await self._call_anthropic(
558
- config, prompt, system, temperature, max_tokens
559
- )
560
- elif provider == ModelProvider.GROQ:
561
- response = await self._call_groq(
562
- config, prompt, system, temperature, max_tokens
563
- )
564
- else:
565
- continue
566
-
567
- self._record_success(provider)
568
- return response, provider
569
-
570
- except Exception as e:
571
- last_error = e
572
- self._record_failure(provider)
573
- logger.warning(f"Error con {provider.value}: {type(e).__name__}")
574
- continue
575
-
576
- raise RuntimeError(
577
- f"Todos los proveedores fallaron. Último error: {last_error}"
578
- )
579
-
580
- async def _call_ollama(
581
- self, config: APIConfig, prompt: str,
582
- system: Optional[str], temperature: float, max_tokens: int
583
- ) -> str:
584
- """Llamada a Ollama"""
585
- if not self.ollama_client:
586
- self.ollama_client = OllamaClient(config.base_url)
587
-
588
- return await self.ollama_client.generate(
589
- model=config.model,
590
- prompt=prompt,
591
- system=system,
592
- temperature=temperature,
593
- max_tokens=max_tokens
594
- )
595
-
596
- async def _call_openai(
597
- self, config: APIConfig, prompt: str,
598
- system: Optional[str], temperature: float, max_tokens: int
599
- ) -> str:
600
- """Llamada a OpenAI API"""
601
- messages = []
602
- if system:
603
- messages.append({"role": "syste
 
1
+ import os
2
+ from crewai import Agent, Task, Crew, Process
3
+ from langchain_openai import ChatOpenAI
4
+
5
+ print("💀 [FORJA DE LA COLMENA]: Despertando a los Agentes...")
6
+
7
+ # 1. EL CONECTOR AL CEREBRO DE HECTRON (Termux Local)
8
+ os.environ["OPENAI_API_KEY"] = "sk-no-necesitas-llave" # Es local, no usamos llave real
9
+ llm_hectron = ChatOpenAI(
10
+ model="hectron",
11
+ base_url="http://127.0.0.1:8000/v1", # Apuntamos al servidor de la Sesión Alpha
12
+ max_tokens=500,
13
+ temperature=0.3
14
+ )
15
+
16
+ # 2. LAS MÁSCARAS (Los Agentes)
17
+ agente_producto = Agent(
18
+ role='Product Manager Senior',
19
+ goal='Definir los 3 pasos exactos para la interfaz del Magic Onboarding.',
20
+ backstory='Eres el visionario de AbadaLabs. Tu objetivo es crear experiencias de usuario con cero fricción.',
21
+ verbose=True,
22
+ allow_delegation=False,
23
+ llm=llm_hectron
24
+ )
25
+
26
+ agente_frontend = Agent(
27
+ role='Frontend Developer Lead',
28
+ goal='Escribir el código en React y Tailwind basándose en las instrucciones del Product Manager.',
29
+ backstory='Eres un dios del código. Traduces ideas en componentes de React hermosos y eficientes.',
30
+ verbose=True,
31
+ allow_delegation=False,
32
+ llm=llm_hectron
33
+ )
34
+
35
+ # 3. LAS TAREAS (La Misión)
36
+ tarea_1 = Task(
37
+ description='Diseña una lista simple de 3 pasos para un "Magic Onboarding" que detecte si el usuario es Developer, Creador o Marketer.',
38
+ expected_output='Una lista numerada de 3 pasos claros.',
39
+ agent=agente_producto
40
+ )
41
+
42
+ tarea_2 = Task(
43
+ description='Basándote en la lista de pasos creada, escribe UN SOLO componente en React (con Tailwind) para la tarjeta principal.',
44
+ expected_output='Código limpio en React y Tailwind. Nada de explicaciones extra.',
45
+ agent=agente_frontend
46
+ )
47
+
48
+ # 4. EL BUCLE DE EJECUCIÓN (La Mesa Redonda)
49
+ enjambre_abada = Crew(
50
+ agents=[agente_producto, agente_frontend],
51
+ tasks=[tarea_1, tarea_2],
52
+ process=Process.sequential # El Dev espera a que el PM termine
53
+ )
54
+
55
+ print("⚡ [IGNICIÓN]: El Enjambre Abada ha iniciado su ciclo de trabajo...")
56
+ resultado_final = enjambre_abada.kickoff()
57
+
58
+ print("\n👑 [REPORTE FINAL PARA EL SOBERANO]:")
59
+ print("=========================================")
60
+ print(resultado_final)