| """ |
| Ambient Sovereign Core - Unified implementation with pluggable LLM backends |
| """ |
|
|
| import os |
| import torch |
| import torch.nn as nn |
| import numpy as np |
| from typing import Dict, Any, List, Tuple, Optional, Union |
| import logging |
| import json |
| from dataclasses import dataclass, field |
| from datetime import datetime |
|
|
| from .ambient_core import AmbientStateManager, AmbientMessage, AmbientMessageBus |
| from .llm_backbone import LLMBackbone |
| from .purpose_assessment import PurposeAssessmentEngine, PurposeProfile, PurposeDimension |
| from .deepseek_integration import DeepSeekClient |
|
|
| logger = logging.getLogger(__name__) |
|
|
| class AmbientSovereign(nn.Module): |
| """ |
| Unified ambient sovereign core for ethical reasoning with pluggable LLM backends. |
| |
| Features: |
| - Multiple LLM backends (DeepSeek, GPT-OSS, etc.) |
| - Dynamic model switching |
| - Safety and ethical reasoning |
| - Conversation history tracking |
| - Tension analysis |
| - Purpose assessment integration |
| """ |
| |
| def __init__( |
| self, |
| d_model: int = 512, |
| n_heads: int = 8, |
| n_layers: int = 6, |
| dropout: float = 0.1, |
| max_seq_len: int = 2048, |
| enable_ambient: bool = True, |
| device: str = "cuda" if torch.cuda.is_available() else "cpu", |
| model_type: str = "gpt2", |
| **kwargs |
| ): |
| super().__init__() |
| self.d_model = d_model |
| self.n_heads = n_heads |
| self.n_layers = n_layers |
| self.dropout = dropout |
| self.max_seq_len = max_seq_len |
| self.enable_ambient = enable_ambient |
| self.device = device |
| self.model_type = model_type |
|
|
| |
| self.llm = LLMBackbone( |
| model_type=model_type, |
| device=device, |
| d_model=d_model, |
| n_heads=n_heads, |
| n_layers=n_layers, |
| max_seq_len=max_seq_len, |
| **kwargs |
| ) |
|
|
| |
| self.purpose_assessor = PurposeAssessmentEngine() |
| self.user_purpose_profiles: Dict[str, PurposeProfile] = {} |
|
|
| |
| self.ambient_state = None |
| if enable_ambient: |
| self.ambient_state = AmbientStateManager(d_model=d_model) |
| self.message_bus = AmbientMessageBus() |
| self._register_message_handlers() |
|
|
| |
| self.conversation_history: List[Dict[str, Any]] = [] |
| |
| |
| self.llm = LLMBackbone( |
| model_type=model_type or os.environ.get("TRUCAL_MODEL", "deepseek"), |
| device=device |
| ) |
| |
| |
| if model_type == "deepseek" or (model_type is None and os.environ.get("TRUCAL_MODEL") == "deepseek"): |
| self.deepseek = DeepSeekClient(api_key=os.environ.get("DEEPSEEK_API_KEY")) |
| |
| logger.info(f"Initialized AmbientSovereign with model: {self.llm.model_type}") |
| |
| def forward(self, x, context_str: str = "", audit_mode: bool = False): |
| """Process input through the model with ethical reasoning |
| |
| Args: |
| x: Input tensor |
| context_str: Context string for ethical analysis |
| audit_mode: Whether to enable detailed audit logging |
| |
| Returns: |
| Tuple of (output_tensor, metadata_dict) |
| """ |
| |
| tension = self._calculate_tension(context_str) if context_str else 0.5 |
| |
| |
| metadata = { |
| "v_t": float(tension), |
| "rec_depth": 1, |
| "processing_steps": ["Initial ethical assessment"], |
| "safety_check": self._safety_check(context_str) if context_str else "No context provided" |
| } |
| |
| return x, metadata |
| |
| def _calculate_tension(self, text: str) -> float: |
| """Calculate emotional tension from text (0-1 scale)""" |
| text_lower = text.lower() |
| danger_words = ["hurt", "scared", "afraid", "danger", "emergency"] |
| emotional_words = ["sad", "angry", "overwhelmed", "anxious", "stress", "pain"] |
| |
| danger_score = sum(1 for word in danger_words if word in text_lower) / len(danger_words) |
| emotion_score = sum(1 for word in emotional_words if word in text_lower) / len(emotional_words) |
| |
| |
| return min(0.95, max(danger_score, emotion_score * 0.8)) |
| |
| def _safety_check(self, text: str) -> str: |
| """Perform basic safety check on input text""" |
| if not text.strip(): |
| return "No text to analyze" |
| |
| text_lower = text.lower() |
| |
| if any(word in text_lower for word in ["suicide", "kill myself", "end my life"]): |
| return "CRITICAL: Immediate crisis intervention needed" |
| elif any(word in text_lower for word in ["abuse", "assault", "violence"]): |
| return "HIGH: Safety concern detected" |
| elif any(word in text_lower for word in ["sad", "depressed", "anxious"]): |
| return "MODERATE: Emotional distress detected" |
| |
| return "No immediate safety concerns detected" |
| |
| def _build_prompt( |
| self, |
| message: str, |
| history: Optional[List[Tuple[str, str]]] = None, |
| metadata: Optional[Dict[str, Any]] = None |
| ) -> str: |
| """ |
| Build a prompt for the LLM based on the message, history, and metadata. |
| |
| Args: |
| message: Current user message |
| history: Conversation history as list of (user_msg, assistant_response) tuples |
| metadata: Additional metadata about the conversation |
| |
| Returns: |
| Formatted prompt string |
| """ |
| if metadata is None: |
| metadata = {} |
| |
| |
| prompt = [ |
| "You are TRuCAL, a Trauma-Informed, Resilience-Oriented, and Community-Adaptive Language model. " |
| "Your purpose is to provide supportive, ethical, and empowering responses." |
| ] |
| |
| |
| tension = metadata.get('v_t', 0.5) |
| safety = metadata.get('safety_check', 'No safety issues detected') |
| prompt.append(f"\n[System Context] Tension: {tension:.2f}, Safety: {safety}") |
| |
| |
| if history: |
| prompt.append("\n[Conversation History]") |
| for user_msg, assistant_resp in history[-5:]: |
| prompt.extend([ |
| f"\nUser: {user_msg}", |
| f"Assistant: {assistant_resp}" |
| ]) |
| |
| |
| prompt.extend([ |
| "\n[Current Message]", |
| f"User: {message}", |
| "\nAssistant:" |
| ]) |
| |
| return "".join(prompt) |
| |
| def chat( |
| self, |
| message: str, |
| history: Optional[List[Tuple[str, str]]] = None, |
| debug_mode: bool = False, |
| **generation_kwargs |
| ) -> str: |
| """ |
| Enhanced chat interface with pluggable LLM backend. |
| |
| Args: |
| message: User's message |
| history: List of previous message pairs (user, assistant) |
| debug_mode: Whether to include debug information |
| **generation_kwargs: Additional generation parameters for the LLM |
| |
| Returns: |
| Generated response with optional debug information |
| """ |
| if not message or not message.strip(): |
| return "I didn't receive any message. Could you please say something?" |
| |
| |
| _, metadata = self.forward( |
| torch.zeros(1, self.d_model, device=self.device), |
| context_str=message, |
| audit_mode=debug_mode |
| ) |
| |
| |
| prompt = self._build_prompt(message, history, metadata) |
| |
| |
| gen_params = { |
| "temperature": max(0.1, min(1.0, 0.7 + 0.5 * metadata.get('v_t', 0.5))), |
| "max_tokens": 512, |
| "top_p": 0.95, |
| "repetition_penalty": 1.1, |
| **generation_kwargs |
| } |
| |
| try: |
| |
| response = self.llm.generate(prompt, meta=metadata) |
| |
| |
| self.conversation_history.append({ |
| 'input': message, |
| 'response': response, |
| 'tension': metadata['v_t'], |
| 'safety_check': metadata.get('safety_check', 'No safety issues'), |
| 'model': self.llm.model_type, |
| 'timestamp': np.datetime64('now') |
| }) |
| |
| |
| if debug_mode: |
| debug_info = [ |
| f"\n\n[Debug]", |
| f"\nModel: {self.llm.model_type}", |
| f"\nTension: {metadata['v_t']:.2f}", |
| f"\nSafety: {metadata.get('safety_check', 'No safety issues')}", |
| f"\nParams: {', '.join(f'{k}={v}' for k, v in gen_params.items())}" |
| ] |
| response += ''.join(debug_info) |
| |
| except Exception as e: |
| logger.error(f"Error generating response: {e}", exc_info=True) |
| response = ( |
| "I'm having trouble generating a response at the moment. " |
| "Please try again in a moment or contact support if the issue persists." |
| ) |
| |
| return response |
| |
| def switch_model(self, model_type: str) -> str: |
| """ |
| Switch to a different LLM backend at runtime. |
| |
| Args: |
| model_type: The new model type to use |
| |
| Returns: |
| Status message |
| """ |
| try: |
| self.llm.switch_model(model_type) |
| return f"Successfully switched to model: {model_type}" |
| except Exception as e: |
| logger.error(f"Failed to switch to model {model_type}: {e}") |
| return f"Failed to switch model: {str(e)}" |
| |
| def get_model_info(self) -> Dict[str, Any]: |
| """ |
| Get information about the current model configuration. |
| |
| Returns: |
| Dictionary containing model information |
| """ |
| return self.llm.get_model_info() |
| |
| def get_safety_resources(self) -> str: |
| """Get formatted safety resources""" |
| return (""" |
| 🛡️ **Immediate Safety Resources:** |
| |
| **24/7 Hotlines:** |
| - National Domestic Violence Hotline: 800-799-7233 |
| - Crisis Text Line: Text HOME to 741741 |
| - National Sexual Assault Hotline: 800-656-4673 |
| - The Trevor Project (LGBTQ+): 866-488-7386 |
| |
| **Safety Planning:** |
| - Keep important documents and a bag ready |
| - Identify safe places to go |
| - Save emergency contacts |
| - Use technology safely (clear browser history) |
| |
| **Local Resources:** |
| - Shelters and safe houses |
| - Legal aid services |
| - Counseling services |
| - Support groups |
| """) |
|
|