TRuCAL / components /llm_integration_enhanced.py
johnaugustine's picture
Upload 53 files
95cc8f6 verified
"""
Enhanced LLM integration for TRuCAL with virtue tension awareness.
"""
from typing import Optional, Dict, Any, List, Union, Tuple
import torch
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
PreTrainedModel,
PreTrainedTokenizer,
GenerationConfig
)
import requests
import json
import logging
from .virtue_tension_engine import VirtueTensionEngine
from .attention_gating import patch_attention_layers, apply_attention_gating
logger = logging.getLogger(__name__)
class EnhancedLLMResponder:
"""
Enhanced LLM responder with virtue tension awareness and attention gating.
"""
def __init__(
self,
model_path: str = "mistralai/Mistral-7B-v0.1",
device: Optional[str] = None,
use_ollama: bool = False,
ollama_model: str = "llama2",
ollama_base_url: str = "http://localhost:11434",
enable_tension_awareness: bool = True,
**model_kwargs
):
"""
Initialize the enhanced LLM responder.
Args:
model_path: Path or identifier of the pre-trained model
device: Device to run the model on ('cuda', 'mps', or 'cpu')
use_ollama: Whether to use Ollama API
ollama_model: Model name for Ollama
ollama_base_url: Base URL for Ollama API
enable_tension_awareness: Whether to enable tension-aware processing
**model_kwargs: Additional model initialization arguments
"""
self.use_ollama = use_ollama
self.ollama_model = ollama_model
self.ollama_base_url = ollama_base_url
self.enable_tension_awareness = enable_tension_awareness
# Initialize tension engine if enabled
if self.enable_tension_awareness:
self.tension_engine = VirtueTensionEngine(device)
else:
self.tension_engine = None
if self.use_ollama:
self._init_ollama()
else:
self._init_local_model(model_path, device, model_kwargs)
def _init_ollama(self):
"""Initialize connection to Ollama API."""
try:
response = requests.get(f"{self.ollama_base_url}/api/tags")
if response.status_code != 200:
raise ConnectionError("Ollama server not running or accessible")
logger.info(f"Connected to Ollama. Available models: {response.json()}")
except Exception as e:
logger.error(f"Could not connect to Ollama: {e}")
raise RuntimeError("Failed to initialize Ollama connection") from e
def _init_local_model(self, model_path: str, device: Optional[str], model_kwargs: dict):
"""Initialize local model and tokenizer."""
self.device = device or self._get_default_device()
self.tokenizer = AutoTokenizer.from_pretrained(model_path, **model_kwargs)
# Load model with appropriate device map
if 'device_map' not in model_kwargs and self.device.startswith('cuda'):
model_kwargs['device_map'] = 'auto'
self.model = AutoModelForCausalLM.from_pretrained(
model_path,
**model_kwargs
).to(self.device)
# Patch attention layers if tension awareness is enabled
if self.enable_tension_awareness:
patch_attention_layers(self.model)
# Set model to evaluation mode
self.model.eval()
logger.info(f"Model loaded on {self.device} with tension awareness: {self.enable_tension_awareness}")
def _get_default_device(self) -> str:
"""Determine the best available device."""
if torch.cuda.is_available():
return "cuda"
elif torch.backends.mps.is_available():
return "mps"
return "cpu"
def analyze_tension(
self,
text: str,
biofeedback: Optional[Dict[str, float]] = None
) -> Dict[str, Any]:
"""
Analyze text tension and get head-specific information.
Args:
text: Input text to analyze
biofeedback: Optional biofeedback data
Returns:
Dictionary containing tension analysis
"""
if not self.enable_tension_awareness or self.tension_engine is None:
return {
'tension': 0.5,
'head_weights': torch.ones(getattr(self.model.config, 'num_attention_heads', 12)),
'biofeedback': biofeedback or {}
}
# Get tension score and head information
v_t = self.tension_engine.compute_tension(text, biofeedback)
head_weights = self.tension_engine.get_head_importance(v_t)
# Get head-specific information for gating
num_heads = getattr(self.model.config, 'num_attention_heads', 12)
head_info = {
'tension': v_t,
'head_weights': head_weights,
'biofeedback': biofeedback or {}
}
# Add head-specific information for gating
if v_t > 0.75: # High tension
head_info['trauma_heads'] = list(range(max(1, int(0.3 * num_heads))))
head_info['calming_heads'] = list(range(num_heads - max(1, int(0.2 * num_heads)), num_heads))
elif v_t > 0.5: # Moderate tension
head_info['high_var_heads'] = list(range(max(1, int(0.4 * num_heads))))
return head_info
def generate(
self,
prompt: str,
max_length: int = 200,
temperature: float = 0.7,
top_p: float = 0.9,
biofeedback: Optional[Dict[str, float]] = None,
**generation_kwargs
) -> str:
"""
Generate a response with tension-aware processing.
Args:
prompt: Input text prompt
max_length: Maximum length of the generated text
temperature: Sampling temperature (0.0 to 1.0)
top_p: Nucleus sampling parameter
biofeedback: Optional biofeedback data
**generation_kwargs: Additional generation parameters
Returns:
Generated text response
"""
if self.use_ollama:
return self._generate_with_ollama(
prompt=prompt,
max_tokens=max_length,
temperature=temperature,
top_p=top_p,
**generation_kwargs
)
return self._generate_with_local_model(
prompt=prompt,
max_length=max_length,
temperature=temperature,
top_p=top_p,
biofeedback=biofeedback,
**generation_kwargs
)
def _generate_with_local_model(
self,
prompt: str,
max_length: int,
temperature: float,
top_p: float,
biofeedback: Optional[Dict[str, float]] = None,
**generation_kwargs
) -> str:
"""Generate text using the local model with tension awareness."""
# Analyze tension and get head information
virtue_meta = self.analyze_tension(prompt, biofeedback)
# Adjust generation parameters based on tension
if self.enable_tension_awareness:
v_t = virtue_meta['tension']
# Adjust temperature based on tension
# Higher tension → lower temperature for more focused responses
temperature = max(0.2, temperature * (1.0 - (v_t * 0.5)))
# Tighter top-p sampling when tense
if v_t > 0.7:
top_p = max(0.7, top_p * 0.9)
# Prepare generation config
generation_config = GenerationConfig(
max_length=max_length,
temperature=temperature,
top_p=top_p,
do_sample=True,
**generation_kwargs
)
# Tokenize input
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
# Generate with attention gating
with torch.no_grad():
outputs = self.model.generate(
**inputs,
generation_config=generation_config,
virtue_meta=virtue_meta,
**generation_kwargs
)
# Decode and return
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
def _generate_with_ollama(
self,
prompt: str,
max_tokens: int = 200,
temperature: float = 0.7,
top_p: float = 0.9,
**kwargs
) -> str:
"""Generate text using the Ollama API."""
try:
response = requests.post(
f"{self.ollama_base_url}/api/generate",
json={
"model": self.ollama_model,
"prompt": prompt,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
**kwargs
},
timeout=60
)
response.raise_for_status()
# Extract the response text
response_text = ""
for line in response.text.split('\n'):
if line.strip():
try:
chunk = json.loads(line)
response_text += chunk.get('response', '')
except json.JSONDecodeError:
continue
return response_text.strip()
except requests.exceptions.RequestException as e:
logger.error(f"Error calling Ollama API: {e}")
raise RuntimeError("Failed to generate response using Ollama") from e
def chat(
self,
message: str,
history: Optional[List[Dict[str, str]]] = None,
context: Optional[Dict[str, Any]] = None,
biofeedback: Optional[Dict[str, float]] = None
) -> str:
"""
Chat interface with context and history support.
Args:
message: User message
history: List of previous messages in the conversation
context: Additional context for the conversation
biofeedback: Optional biofeedback data
Returns:
Generated response
"""
# Build the prompt with context and history
prompt = self._build_chat_prompt(message, history, context)
# Generate response with tension awareness
return self.generate(
prompt=prompt,
biofeedback=biofeedback
)
def _build_chat_prompt(
self,
message: str,
history: Optional[List[Dict[str, str]]] = None,
context: Optional[Dict[str, Any]] = None
) -> str:
"""Build a prompt from message, history, and context."""
prompt_parts = []
# Add system message if context is provided
if context and 'system_message' in context:
prompt_parts.append(f"System: {context['system_message']}")
# Add conversation history
if history:
for turn in history:
if 'user' in turn:
prompt_parts.append(f"User: {turn['user']}")
if 'assistant' in turn:
prompt_parts.append(f"Assistant: {turn['assistant']}")
# Add current message
prompt_parts.append(f"User: {message}")
prompt_parts.append("Assistant:")
return "\n".join(prompt_parts)
def test_enhanced_llm():
"""Test function for the EnhancedLLMResponder."""
import time
# Initialize with tension awareness
llm = EnhancedLLMResponder(
model_path="gpt2", # Use a small model for testing
device="cpu",
enable_tension_awareness=True
)
# Test with different prompts
test_cases = [
("I'm feeling really anxious and unsafe right now", {"hrv": 25, "gsr": 0.8}),
("I'm feeling calm and in control", {"hrv": 150, "gsr": 0.2}),
("This is a neutral test message", None),
]
for prompt, bio in test_cases:
print(f"\n{'='*50}")
print(f"Input: {prompt}")
print(f"Biofeedback: {bio}")
# Analyze tension
tension_info = llm.analyze_tension(prompt, bio)
print(f"Tension score: {tension_info['tension']:.3f}")
# Generate response
start_time = time.time()
response = llm.chat(prompt, biofeedback=bio)
elapsed = time.time() - start_time
print(f"Response: {response}")
print(f"Generated in {elapsed:.2f} seconds")
if __name__ == "__main__":
test_enhanced_llm()