conscious_Ai-2 / enhanced_agent.py
Ret's picture
Upload 16 files
2660a90 verified
"""
Enhanced Autonomous Agent with integrated reasoning and neural capabilities.
This module combines the general reasoning engine, neural pipeline, and transformer models
into a single, powerful autonomous agent.
"""
import os
import json
import logging
from typing import Dict, List, Any, Optional, Tuple
from pathlib import Path
import torch
import numpy as np
from datetime import datetime
# Import all necessary components
from agent import CodeAgent
from expert_system import ExpertSystem
from general_reasoning import GeneralReasoningEngine, create_general_reasoning_engine
from neural_pipeline import NeuralPipeline, create_neural_pipeline
from transformer import Transformer, create_transformer_model
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class EnhancedAutonomousAgent:
"""
Enhanced Autonomous Agent that integrates:
- General reasoning capabilities
- Neural pipeline for input processing
- Transformer models for advanced processing
- Code execution and learning
"""
def __init__(self,
expert_system: ExpertSystem,
code_agent: CodeAgent,
knowledge_base_path: str = "knowledge_base",
device: str = None):
"""
Initialize the enhanced autonomous agent.
Args:
expert_system: Instance of ExpertSystem
code_agent: Instance of CodeAgent for code execution
knowledge_base_path: Path to store knowledge and models
device: Device to run models on ('cuda' or 'cpu')
"""
self.expert_system = expert_system
self.code_agent = code_agent
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
self.knowledge_base_path = Path(knowledge_base_path)
self.knowledge_base_path.mkdir(exist_ok=True)
# Initialize components
self._initialize_components()
# Load knowledge and state
self.memory = self._load_knowledge()
self.reasoning_history = []
logger.info(f"EnhancedAutonomousAgent initialized on device: {self.device}")
def _initialize_components(self):
"""Initialize all integrated components."""
# Initialize knowledge base paths first
self.model_dir = self.knowledge_base_path / "models"
self.model_dir.mkdir(exist_ok=True)
# Initialize general reasoning engine with the expert system's TogetherAI instance
self.reasoning_engine = create_general_reasoning_engine(
together_ai_client=self.expert_system.together_ai
)
# Initialize neural pipeline with finetuned model if available
finetuned_model_path = str(self.model_dir / "finetuned_model.pt")
# Pass the TogetherAI client to the neural pipeline
self.neural_pipeline = create_neural_pipeline(
together_ai=self.expert_system.together_ai,
finetuned_model_path=finetuned_model_path if os.path.exists(finetuned_model_path) else None
)
# Initialize transformer model (if needed)
self.transformer = create_transformer_model().to(self.device)
# Load or initialize models
self._load_or_initialize_models()
def _load_or_initialize_models(self):
"""Load or initialize all neural models."""
# Load or initialize transformer model
transformer_path = self.model_dir / "transformer_model.pt"
if transformer_path.exists():
self.transformer.load_state_dict(torch.load(transformer_path, map_location=self.device))
logger.info("Loaded transformer model from disk")
else:
logger.info("Initialized new transformer model")
def _save_models(self):
"""Save all neural models."""
# Save transformer model
torch.save(
self.transformer.state_dict(),
self.model_dir / "transformer_model.pt"
)
def _load_knowledge(self) -> Dict[str, Any]:
"""Load knowledge from disk."""
knowledge = {
'solutions': {},
'patterns': {},
'lessons_learned': []
}
knowledge_file = self.knowledge_base_path / 'knowledge.json'
if knowledge_file.exists():
with open(knowledge_file, 'r', encoding='utf-8') as f:
knowledge.update(json.load(f))
return knowledge
def _save_knowledge(self):
"""Save knowledge to disk."""
knowledge_file = self.knowledge_base_path / 'knowledge.json'
with open(knowledge_file, 'w', encoding='utf-8') as f:
json.dump({
'solutions': self.memory['solutions'],
'patterns': self.memory['patterns'],
'lessons_learned': self.memory['lessons_learned'],
'last_updated': datetime.now().isoformat()
}, f, indent=2)
def process_input(self, user_input: str) -> Dict[str, Any]:
"""
Process user input through the full pipeline.
Args:
user_input: The user's input text
Returns:
Dict containing the response and metadata
"""
logger.info(f"Processing input: {user_input[:100]}...")
# Step 1: Process through neural pipeline
pipeline_result = self.neural_pipeline.process_prompt(user_input)
# Step 2: Apply general reasoning
reasoning_result = self.reasoning_engine.reason_about_problem(
user_input,
context={
'pipeline_result': pipeline_result,
'user_history': self.memory.get('user_history', [])
}
)
# Step 3: Generate solution
solution = self._generate_solution(
user_input,
pipeline_result,
reasoning_result
)
# Step 4: Update memory and learn
self._update_memory(user_input, solution)
return {
'response': solution,
'pipeline_result': pipeline_result,
'reasoning_result': reasoning_result,
'timestamp': datetime.now().isoformat()
}
def _generate_solution(self,
user_input: str,
pipeline_result: Dict,
reasoning_result: Dict) -> Dict:
"""Generate a solution using the most appropriate method."""
# If reasoning_result is a string, convert it to a dictionary
if isinstance(reasoning_result, str):
reasoning_result = {
'conclusion': reasoning_result,
'confidence': 0.8,
'reasoning_steps': [reasoning_result]
}
# If the pipeline suggests code generation, use the code agent
if isinstance(pipeline_result, dict) and pipeline_result.get('task_type') == 'code_generation':
return self._generate_code_solution(user_input, reasoning_result)
# For general queries, use the reasoning engine's response
if isinstance(reasoning_result, dict):
return {
'type': 'text',
'result': reasoning_result.get('conclusion', 'I need more information to help with that.'),
'success': True
}
# Fallback response
return {
'type': 'text',
'result': 'I encountered an issue processing your request. Please try again.',
'success': False
}
def _generate_code_solution(self, task: str, context: Dict) -> str:
"""Generate and test a code solution."""
try:
# Generate code using the expert system
system_prompt = "You are an expert Python programmer. Generate clean, efficient, and well-documented code."
code_prompt = f"""
Task: {task}
Context: {json.dumps(context, indent=2)}
Please generate Python code to solve this task.
"""
# Use the expert system to generate code
response = self.expert_system.solve_problem(
problem=code_prompt,
subject='coding',
temperature=0.2,
max_tokens=1000
)
code = response.get('solution', '').strip()
# Execute the code and capture the result
result, output = self.code_agent.execute_code(code)
success = result is not None
# Log the code execution
self._log_code_execution(task, code, result, output, success)
return {
'type': 'code',
'code': code,
'result': str(result) if result is not None else None,
'output': output,
'success': success
}
except Exception as e:
logger.error(f"Error generating code solution: {e}")
return {
'type': 'error',
'error': str(e),
'success': False
}
def _update_memory(self, user_input: str, solution: Dict):
"""Update memory with the new interaction."""
# Update solutions
solution_id = f"sol_{len(self.memory['solutions']) + 1}"
self.memory['solutions'][solution_id] = {
'input': user_input,
'solution': solution,
'timestamp': datetime.now().isoformat()
}
# Update patterns (simplified)
if solution.get('success', False):
# Extract key terms from input
terms = set(user_input.lower().split())
for term in terms:
if len(term) > 3: # Only consider terms longer than 3 chars
if term not in self.memory['patterns']:
self.memory['patterns'][term] = []
self.memory['patterns'][term].append(solution_id)
# Save updates
self._save_knowledge()
def learn_from_experience(self):
"""Learn from past experiences and update models."""
logger.info("Learning from past experiences...")
# Update reasoning engine with new patterns
for pattern, solution_ids in self.memory['patterns'].items():
if len(solution_ids) > 1: # Only consider patterns with multiple solutions
solutions = [self.memory['solutions'][sid] for sid in solution_ids
if sid in self.memory['solutions']]
if solutions:
self.reasoning_engine.update_knowledge(
pattern=pattern,
examples=solutions,
confidence=min(1.0, len(solutions) * 0.1) # Cap at 1.0
)
# Save updated models
self._save_models()
logger.info("Learning complete!")
def create_enhanced_agent(expert_system: ExpertSystem,
code_agent: CodeAgent,
knowledge_base_path: str = "knowledge_base") -> EnhancedAutonomousAgent:
"""
Factory function to create an enhanced autonomous agent.
Args:
expert_system: Initialized ExpertSystem instance
code_agent: Initialized CodeAgent instance
knowledge_base_path: Path to store knowledge and models
Returns:
EnhancedAutonomousAgent instance
"""
return EnhancedAutonomousAgent(
expert_system=expert_system,
code_agent=code_agent,
knowledge_base_path=knowledge_base_path
)