Your Name commited on
Commit
abba072
·
1 Parent(s): e2bbbec

Refactor: Remove smoke tests, fix Pi-3.1 API calls, update dependencies

Browse files

- Removed all smoke tests from AI agents
- Fixed Pi-3.1 API calls (removed System type, use Human/Assistant only)
- Updated transformers to 4.57.1 (fixes resume_download warning)
- Updated PyTorch to 2.8.0 (fixes pytree compatibility)
- Improved error handling in LLM wrapper
- Refactored codebase into modular agent structure

agents/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Agents package"""
2
+ from .memory_agent import MemoryAgent
3
+ from .gemini_agent import GeminiThinkingAgent
4
+ from .pi_agent import PiResponseAgent
5
+ from .emotional_agent import EmotionalStateAgent
6
+ from .azure_agent import AzureTextAnalyticsAgent
7
+ from .sentiment_agent import SentimentAgent
8
+
9
+ __all__ = [
10
+ 'MemoryAgent',
11
+ 'GeminiThinkingAgent',
12
+ 'PiResponseAgent',
13
+ 'EmotionalStateAgent',
14
+ 'AzureTextAnalyticsAgent',
15
+ 'SentimentAgent'
16
+ ]
17
+
agents/azure_agent.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Azure Text Analytics Agent - responsible for Azure Text Analytics sentiment analysis"""
2
+ import os
3
+ import sys
4
+ import logging
5
+
6
+ # Add parent directory to path for imports
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+ from config import MODEL_CONFIG
9
+
10
+ class AzureTextAnalyticsAgent:
11
+ """Agent responsible for Azure Text Analytics sentiment analysis"""
12
+
13
+ def __init__(self, config=None):
14
+ self.config = config or MODEL_CONFIG or {}
15
+ self.azure_available = False
16
+ self.client = None
17
+ self._initialize()
18
+
19
+ def _initialize(self):
20
+ """Initialize Azure Text Analytics client"""
21
+ try:
22
+ from azure.ai.textanalytics import TextAnalyticsClient
23
+ from azure.core.credentials import AzureKeyCredential
24
+
25
+ key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")
26
+ endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
27
+
28
+ if key and endpoint:
29
+ try:
30
+ credential = AzureKeyCredential(key)
31
+ self.client = TextAnalyticsClient(endpoint=endpoint, credential=credential)
32
+ self.azure_available = True
33
+ logging.info("[AzureTextAnalyticsAgent] ✓ Initialized and ready")
34
+ except Exception as e:
35
+ logging.warning(f"[AzureTextAnalyticsAgent] Failed to create client: {e}")
36
+ self.azure_available = False
37
+ else:
38
+ logging.warning("[AzureTextAnalyticsAgent] ✗ Azure credentials not found")
39
+ self.azure_available = False
40
+ except ImportError:
41
+ logging.warning("[AzureTextAnalyticsAgent] ✗ Azure SDK not installed")
42
+ self.azure_available = False
43
+
44
+ def analyze(self, text):
45
+ """Analyze sentiment using Azure Text Analytics"""
46
+ if not self.azure_available or not self.client:
47
+ return None
48
+
49
+ try:
50
+ result = self.client.analyze_sentiment(documents=[text])[0]
51
+ if result.sentiment == 'positive':
52
+ return result.confidence_scores.positive
53
+ elif result.sentiment == 'negative':
54
+ return -result.confidence_scores.negative
55
+ else:
56
+ return 0.0
57
+ except Exception as e:
58
+ logging.error(f"[AzureTextAnalyticsAgent] Error: {e}")
59
+ return None
60
+
61
+ def is_ready(self):
62
+ """Check if agent is ready"""
63
+ return self.azure_available
64
+
agents/emotional_agent.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Emotional State Agent - responsible for managing and updating emotional state"""
2
+ import os
3
+ import sys
4
+ import random
5
+ import logging
6
+ import requests
7
+
8
+ # Add parent directory to path for imports
9
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10
+ from config import MODEL_CONFIG
11
+
12
+ class EmotionalStateAgent:
13
+ """Agent responsible for managing and updating emotional state"""
14
+
15
+ def __init__(self, initial_state=None, config=None):
16
+ self.config = config or MODEL_CONFIG or {}
17
+ self.emotional_state = initial_state or {"joy": 0.2, "sadness": 0.2, "anger": 0.2, "fear": 0.2, "curiosity": 0.2}
18
+ self.learning_rate = 0.05
19
+ self.quantum_random_available = False
20
+ self.quantum_api_key = None
21
+ self._initialize_quantum()
22
+
23
+ def _initialize_quantum(self):
24
+ """Initialize quantum randomness availability"""
25
+ quantum_key = os.getenv("ANU_QUANTUM_API_KEY")
26
+ if quantum_key:
27
+ self.quantum_api_key = quantum_key
28
+ self.quantum_random_available = True
29
+ logging.info("[EmotionalStateAgent] ✓ Quantum randomness available")
30
+ else:
31
+ logging.warning("[EmotionalStateAgent] Quantum randomness unavailable")
32
+
33
+ def get_quantum_random_float(self, min_val=0.0, max_val=1.0):
34
+ """Get a quantum random float between min_val and max_val"""
35
+ if not self.quantum_random_available:
36
+ return random.uniform(min_val, max_val)
37
+
38
+ try:
39
+ quantum_config = self.config.get('quantum', {}) if self.config else {}
40
+ url = quantum_config.get('api_endpoint', 'https://api.quantumnumbers.anu.edu.au')
41
+ headers = {"x-api-key": self.quantum_api_key}
42
+ params = {"length": 1, "type": "uint8"}
43
+
44
+ response = requests.get(url, headers=headers, params=params, timeout=10)
45
+
46
+ if response.status_code == 200:
47
+ result = response.json()
48
+ if result.get('success') and 'data' in result and len(result['data']) > 0:
49
+ normalized = result['data'][0] / 255.0
50
+ return min_val + (max_val - min_val) * normalized
51
+ except Exception as e:
52
+ logging.warning(f"[EmotionalStateAgent] Quantum API failed: {e}")
53
+
54
+ return random.uniform(min_val, max_val)
55
+
56
+ def update_with_sentiment(self, sentiment_score):
57
+ """Update emotional state based on sentiment"""
58
+ # Enhanced Emotion Update (decay and normalization with quantum randomness)
59
+ decay_factor = 0.9
60
+ if self.quantum_random_available:
61
+ quantum_decay_variation = self.get_quantum_random_float(0.85, 0.95)
62
+ decay_factor = quantum_decay_variation
63
+
64
+ for emotion in self.emotional_state:
65
+ # Decay emotions (more realistic fading with quantum variation)
66
+ self.emotional_state[emotion] *= decay_factor
67
+ # Normalize
68
+ self.emotional_state[emotion] = max(0.0, min(1.0, self.emotional_state[emotion]))
69
+
70
+ # Apply sentiment with quantum-enhanced learning rate variation
71
+ learning_rate = self.learning_rate
72
+ if self.quantum_random_available:
73
+ quantum_lr_variation = self.get_quantum_random_float(0.03, 0.07)
74
+ learning_rate = quantum_lr_variation
75
+
76
+ self.emotional_state["joy"] += sentiment_score * learning_rate
77
+ self.emotional_state["sadness"] -= sentiment_score * learning_rate
78
+
79
+ # Add quantum randomness to curiosity (making responses more unpredictable)
80
+ if self.quantum_random_available:
81
+ quantum_curiosity_boost = self.get_quantum_random_float(-0.05, 0.05)
82
+ self.emotional_state["curiosity"] = max(0.0, min(1.0,
83
+ self.emotional_state["curiosity"] + quantum_curiosity_boost))
84
+
85
+ # Re-normalize
86
+ total_emotion = sum(self.emotional_state.values())
87
+ for emotion in self.emotional_state:
88
+ self.emotional_state[emotion] = self.emotional_state[emotion] / total_emotion if total_emotion > 0 else 0.2
89
+
90
+ logging.info(f"[EmotionalStateAgent] Updated emotional state: {self.emotional_state}")
91
+ return self.emotional_state
92
+
93
+ def get_state(self):
94
+ """Get current emotional state"""
95
+ return self.emotional_state.copy()
96
+
97
+ def is_ready(self):
98
+ """Check if agent is ready"""
99
+ return True # Emotional state is always ready
100
+
agents/gemini_agent.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gemini Thinking Agent - responsible for thinking and analysis using Gemini"""
2
+ import os
3
+ import sys
4
+ import logging
5
+
6
+ # Add parent directory to path for imports
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+ from config import MODEL_CONFIG
9
+ from llm_wrapper import LLMWrapper
10
+
11
+ class GeminiThinkingAgent:
12
+ """Agent responsible for thinking and analysis using Gemini"""
13
+
14
+ def __init__(self, config=None):
15
+ self.config = config or MODEL_CONFIG or {}
16
+ self.gemini_available = False
17
+
18
+ # Get model from config
19
+ gemini_config = self.config.get('gemini', {}) if self.config else {}
20
+ gemini_model = gemini_config.get('model', 'gemini-2.0-flash-exp')
21
+
22
+ # Initialize LLM wrapper with the model
23
+ self.llm_wrapper = LLMWrapper(gemini_model=gemini_model, config=self.config)
24
+ self._initialize()
25
+
26
+ def _initialize(self):
27
+ """Initialize Gemini API availability"""
28
+ gemini_key = os.getenv("GEMINI_API_KEY")
29
+ if gemini_key:
30
+ self.gemini_available = True
31
+ logging.info("[GeminiThinkingAgent] ✓ Initialized and ready")
32
+ else:
33
+ logging.warning("[GeminiThinkingAgent] ✗ GEMINI_API_KEY not found")
34
+
35
+ def think(self, user_input, emotional_state, conversation_history, retrieved_memories=None):
36
+ """Think about and analyze the conversation context"""
37
+ if not self.gemini_available:
38
+ logging.warning("[GeminiThinkingAgent] Not available")
39
+ return None
40
+
41
+ try:
42
+ # Build thinking prompt with conversation context
43
+ emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
44
+
45
+ # Prepare conversation context for thinking
46
+ context_summary = ""
47
+ if conversation_history:
48
+ recent_history = conversation_history[-6:] # Last 3 exchanges
49
+ context_summary = "\nRecent conversation:\n"
50
+ for msg in recent_history:
51
+ role = "User" if msg["role"] == "user" else "Galatea"
52
+ context_summary += f"{role}: {msg['content']}\n"
53
+
54
+ # Add retrieved memories if available
55
+ memory_context = ""
56
+ if retrieved_memories and len(retrieved_memories) > 0:
57
+ memory_context = "\n\nRelevant memories from past conversations:\n"
58
+ for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
59
+ memory_context += f"{i}. {memory['text'][:200]}...\n"
60
+
61
+ thinking_prompt = f"""You are the internal reasoning system for Galatea, an AI assistant.
62
+
63
+ Current emotional state: {emotions_text}
64
+ {context_summary}
65
+ {memory_context}
66
+ Current user message: "{user_input}"
67
+
68
+ Analyze this conversation and provide:
69
+ 1. Key insights about what the user is asking or discussing
70
+ 2. Important context from the conversation history and retrieved memories
71
+ 3. How Galatea should respond emotionally and contextually
72
+ 4. Any important details to remember or reference
73
+
74
+ Keep your analysis concise (2-3 sentences). Focus on what matters for crafting an appropriate response."""
75
+
76
+ messages = [
77
+ {"role": "system", "content": "You are an internal reasoning system. Analyze conversations and provide insights."},
78
+ {"role": "user", "content": thinking_prompt}
79
+ ]
80
+
81
+ logging.info("[GeminiThinkingAgent] Processing thinking request...")
82
+
83
+ # Get hyperparameters from config
84
+ gemini_config = self.config.get('gemini', {}) if self.config else {}
85
+ temperature = gemini_config.get('temperature', 0.5)
86
+ max_tokens = gemini_config.get('max_tokens', 200)
87
+
88
+ # Call Gemini model (model is set in wrapper initialization)
89
+ try:
90
+ thinking_result = self.llm_wrapper.call_gemini(
91
+ messages=messages,
92
+ temperature=temperature,
93
+ max_tokens=max_tokens
94
+ )
95
+
96
+ if thinking_result and len(thinking_result) > 0:
97
+ logging.info("[GeminiThinkingAgent] ✓ Thinking completed")
98
+ return thinking_result
99
+ else:
100
+ logging.error("[GeminiThinkingAgent] Model returned empty result")
101
+ return None
102
+ except Exception as e:
103
+ logging.error(f"[GeminiThinkingAgent] Model {self.llm_wrapper.gemini_model} failed: {e}")
104
+ return None
105
+
106
+ except Exception as e:
107
+ logging.error(f"[GeminiThinkingAgent] Error: {e}")
108
+ return None
109
+
110
+ def is_ready(self):
111
+ """Check if agent is ready"""
112
+ return self.gemini_available
113
+
agents/memory_agent.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Memory Agent - responsible for memory retrieval and storage"""
2
+ import os
3
+ import sys
4
+ import logging
5
+
6
+ # Add parent directory to path for imports
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+ from config import MODEL_CONFIG
9
+
10
+ class MemoryAgent:
11
+ """Agent responsible for memory retrieval and storage"""
12
+
13
+ def __init__(self, memory_system, config=None):
14
+ self.memory_system = memory_system
15
+ self.config = config or MODEL_CONFIG or {}
16
+
17
+ def retrieve_memories(self, query, n_results=None):
18
+ """Retrieve relevant memories for a query"""
19
+ if n_results is None:
20
+ max_memories = self.config.get('memory', {}).get('retrieval', {}).get('max_retrieved_memories', 5) if self.config else 5
21
+ else:
22
+ max_memories = n_results
23
+
24
+ try:
25
+ memories = self.memory_system.retrieve_relevant_memories(query, n_results=max_memories)
26
+ if memories:
27
+ logging.info(f"[MemoryAgent] Retrieved {len(memories)} relevant memories")
28
+ return memories
29
+ except Exception as e:
30
+ logging.error(f"[MemoryAgent] Error retrieving memories: {e}")
31
+ return []
32
+
33
+ def store_memory(self, text, metadata=None, memory_type="conversation"):
34
+ """Store a memory"""
35
+ try:
36
+ self.memory_system.store_memory(text, metadata, memory_type)
37
+ logging.info(f"[MemoryAgent] Stored memory: {memory_type}")
38
+ except Exception as e:
39
+ logging.error(f"[MemoryAgent] Error storing memory: {e}")
40
+
41
+ def is_ready(self):
42
+ """Check if memory agent is ready"""
43
+ return self.memory_system.is_ready() if self.memory_system else False
44
+
agents/pi_agent.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pi Response Agent - responsible for generating human-facing responses using Pi-3.1"""
2
+ import os
3
+ import sys
4
+ import logging
5
+
6
+ # Add parent directory to path for imports
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+ from config import MODEL_CONFIG
9
+ from llm_wrapper import LLMWrapper
10
+
11
+ class PiResponseAgent:
12
+ """Agent responsible for generating human-facing responses using Pi-3.1"""
13
+
14
+ def __init__(self, config=None):
15
+ self.config = config or MODEL_CONFIG or {}
16
+ self.inflection_ai_available = False
17
+
18
+ # Get model from config
19
+ inflection_config = self.config.get('inflection_ai', {}) if self.config else {}
20
+ inflection_model = inflection_config.get('model', 'Pi-3.1')
21
+
22
+ # Initialize LLM wrapper with the model
23
+ self.llm_wrapper = LLMWrapper(inflection_model=inflection_model, config=self.config)
24
+ self._initialize()
25
+
26
+ def _initialize(self):
27
+ """Initialize Inflection AI API availability"""
28
+ inflection_key = os.getenv("INFLECTION_AI_API_KEY")
29
+ if inflection_key:
30
+ self.inflection_ai_available = True
31
+ logging.info("[PiResponseAgent] ✓ Initialized and ready")
32
+ else:
33
+ logging.warning("[PiResponseAgent] ✗ INFLECTION_AI_API_KEY not found")
34
+
35
+ def respond(self, user_input, emotional_state, thinking_context=None, conversation_history=None, retrieved_memories=None):
36
+ """Generate response using Pi-3.1 with thinking context and emotional state"""
37
+ if not self.inflection_ai_available:
38
+ logging.warning("[PiResponseAgent] Not available")
39
+ return None
40
+
41
+ try:
42
+ # Create context with emotional state
43
+ emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
44
+
45
+ # Build comprehensive context - Inflection AI API only accepts "Human" and "Assistant" types
46
+ # We'll incorporate system instructions into the first Human message
47
+ context_parts = []
48
+
49
+ # Build system instructions as part of the user input context
50
+ system_instructions = f"[Context: You are Galatea, an AI assistant. Emotional state: {emotions_text}. "
51
+
52
+ # Add thinking context from Gemini if available
53
+ if thinking_context:
54
+ system_instructions += f"Internal analysis: {thinking_context}. "
55
+
56
+ # Add retrieved memories if available
57
+ if retrieved_memories and len(retrieved_memories) > 0:
58
+ memory_text = "Relevant memories: "
59
+ for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
60
+ memory_text += f"{i}. {memory['text'][:100]}; "
61
+ system_instructions += memory_text
62
+
63
+ system_instructions += "Keep response concise (under 50 words) and reflect emotional state.]"
64
+
65
+ # Add conversation history as context messages (Human/Assistant only)
66
+ if conversation_history and len(conversation_history) > 0:
67
+ # Include recent conversation history
68
+ for msg in conversation_history[-6:]: # Last 3 exchanges (6 messages)
69
+ context_parts.append({
70
+ "text": msg["content"],
71
+ "type": "Human" if msg["role"] == "user" else "Assistant"
72
+ })
73
+
74
+ # Add current user input with system context prepended
75
+ enhanced_user_input = f"{system_instructions}\n\n{user_input}"
76
+ context_parts.append({
77
+ "text": enhanced_user_input,
78
+ "type": "Human"
79
+ })
80
+
81
+ logging.info("[PiResponseAgent] Sending request to Pi-3.1 API...")
82
+ # Model is set in wrapper initialization
83
+ response = self.llm_wrapper.call_inflection_ai(context_parts)
84
+
85
+ if response:
86
+ logging.info("[PiResponseAgent] ✓ Response received")
87
+ return response
88
+ else:
89
+ logging.error("[PiResponseAgent] API call failed")
90
+ return None
91
+
92
+ except Exception as e:
93
+ logging.error(f"[PiResponseAgent] Error: {e}")
94
+ return None
95
+
96
+ def is_ready(self):
97
+ """Check if agent is ready"""
98
+ return self.inflection_ai_available
99
+
agents/sentiment_agent.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sentiment Agent - responsible for sentiment analysis (uses Azure, Hugging Face, or NLTK fallback)"""
2
+ import os
3
+ import sys
4
+ import logging
5
+
6
+ # Add parent directory to path for imports
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+ from config import MODEL_CONFIG
9
+ from agents.azure_agent import AzureTextAnalyticsAgent
10
+
11
+ # Import transformers with error handling
12
+ try:
13
+ from transformers import pipeline
14
+ transformers_available = True
15
+ except ImportError:
16
+ logging.warning("Transformers library not available. Using fallback sentiment analysis.")
17
+ transformers_available = False
18
+
19
+ class SentimentAgent:
20
+ """Agent responsible for sentiment analysis (uses Azure, Hugging Face, or NLTK fallback)"""
21
+
22
+ def __init__(self, config=None):
23
+ self.config = config or MODEL_CONFIG or {}
24
+ self.azure_agent = AzureTextAnalyticsAgent(config=self.config)
25
+ self.sentiment_analyzer = None
26
+ self.ready = False
27
+ self._initialize()
28
+
29
+ def _initialize(self):
30
+ """Initialize sentiment analyzer"""
31
+ # Try Azure first
32
+ if self.azure_agent.is_ready():
33
+ self.ready = True
34
+ logging.info("[SentimentAgent] Using Azure Text Analytics")
35
+ return
36
+
37
+ # Fallback to Hugging Face
38
+ sentiment_model = self.config.get('sentiment', {}).get('primary_model', 'distilbert/distilbert-base-uncased-finetuned-sst-2-english') if self.config else 'distilbert/distilbert-base-uncased-finetuned-sst-2-english'
39
+
40
+ if transformers_available:
41
+ try:
42
+ logging.info("[SentimentAgent] Initializing Hugging Face sentiment analyzer...")
43
+ self.sentiment_analyzer = pipeline("sentiment-analysis", model=sentiment_model)
44
+ self.ready = True
45
+ logging.info("[SentimentAgent] ✓ Initialized successfully")
46
+ except Exception as e:
47
+ logging.warning(f"[SentimentAgent] Hugging Face model failed: {e}, using fallback")
48
+ self.sentiment_analyzer = None
49
+ self.ready = True # Fallback available
50
+ else:
51
+ self.ready = True # Fallback available
52
+
53
+ def analyze(self, text):
54
+ """Analyze sentiment of text (tries Azure, then Hugging Face, then NLTK)"""
55
+ # Try Azure first
56
+ if self.azure_agent.is_ready():
57
+ result = self.azure_agent.analyze(text)
58
+ if result is not None:
59
+ return result
60
+
61
+ # Fallback to Hugging Face
62
+ if self.sentiment_analyzer:
63
+ try:
64
+ result = self.sentiment_analyzer(text)[0]
65
+ label = result['label'].lower()
66
+ score = result['score']
67
+
68
+ if 'positive' in label:
69
+ return score
70
+ elif 'negative' in label:
71
+ return -score
72
+ else:
73
+ return 0.0
74
+ except Exception as e:
75
+ logging.error(f"[SentimentAgent] Error: {e}")
76
+ return self._fallback_analyze(text)
77
+ else:
78
+ return self._fallback_analyze(text)
79
+
80
+ def _fallback_analyze(self, text):
81
+ """Fallback sentiment analysis using NLTK VADER"""
82
+ try:
83
+ from nltk.sentiment import SentimentIntensityAnalyzer
84
+ analyzer = SentimentIntensityAnalyzer()
85
+ scores = analyzer.polarity_scores(text)
86
+ return scores['compound'] # Returns value between -1 and 1
87
+ except Exception as e:
88
+ logging.error(f"[SentimentAgent] Fallback failed: {e}")
89
+ return 0.0
90
+
91
+ def is_ready(self):
92
+ """Check if agent is ready"""
93
+ return self.ready
94
+
app.py CHANGED
@@ -76,6 +76,8 @@ initializing = False
76
  gemini_initialized = False
77
  max_init_retries = 3
78
  current_init_retry = 0
 
 
79
 
80
  # Check for required environment variables
81
  required_env_vars = ['GEMINI_API_KEY']
@@ -119,13 +121,70 @@ def initialize_gemini():
119
  logging.error(f"Error initializing Gemini API: {e}")
120
  return False
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  def initialize_components():
123
- """Initialize Galatea components"""
124
  global galatea_ai, dialogue_engine, avatar_engine, is_initialized, initializing
125
- global current_init_retry, gemini_initialized
126
 
127
  if initializing or is_initialized:
128
  return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
  if missing_gemini_key:
131
  logging.error("Initialization aborted: GEMINI_API_KEY missing")
@@ -136,31 +195,69 @@ def initialize_components():
136
 
137
  try:
138
  # Import here to avoid circular imports and ensure errors are caught
139
- from import_random import GalateaAI, DialogueEngine, AvatarEngine
 
 
140
 
141
  # Initialize components
 
 
 
 
142
  galatea_ai = GalateaAI()
143
  dialogue_engine = DialogueEngine(galatea_ai)
144
  avatar_engine = AvatarEngine()
145
  avatar_engine.update_avatar(galatea_ai.emotional_state)
146
 
147
- # Try to initialize Gemini specifically
148
- gemini_initialized = initialize_gemini()
149
 
150
- is_initialized = True
151
- logging.info(f"Galatea components initialized successfully. Gemini status: {gemini_initialized}")
152
- logging.info(f"Emotions initialized: {galatea_ai.emotional_state}")
153
- except Exception as e:
154
- logging.error(f"Error initializing Galatea: {e}")
155
- print(f"Error initializing Galatea: {e}")
 
 
 
 
 
156
 
157
- # Retry logic for initialization failures
158
- current_init_retry += 1
159
- if current_init_retry < max_init_retries:
160
- logging.info(f"Retrying initialization (attempt {current_init_retry}/{max_init_retries})...")
161
- time.sleep(2) # Wait before retrying
162
- initializing = False
163
- Thread(target=initialize_components).start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  finally:
165
  initializing = False
166
 
@@ -168,9 +265,13 @@ def initialize_components():
168
  def home():
169
  # Add error handling for template rendering
170
  try:
171
- # Start initialization in background if not already started
 
 
 
 
172
  if not is_initialized and not initializing and not missing_gemini_key:
173
- Thread(target=initialize_components).start()
174
 
175
  return render_template('index.html')
176
  except Exception as e:
@@ -179,24 +280,21 @@ def home():
179
 
180
  @app.route('/api/chat', methods=['POST'])
181
  def chat():
182
- # Check if components are initialized
 
 
 
 
 
 
 
 
183
  if missing_gemini_key:
184
  return jsonify({
185
  'error': 'GEMINI_API_KEY is missing. Chat is unavailable.',
186
- 'status': 'missing_gemini_key'
187
- }), 503
188
-
189
- if not is_initialized:
190
- # Start initialization if not already started
191
- if not initializing and not missing_gemini_key:
192
- Thread(target=initialize_components).start()
193
-
194
- return jsonify({
195
- 'response': 'I am still initializing. Please try again in a moment.',
196
- 'avatar_shape': 'Circle',
197
- 'emotions': {'joy': 0.2, 'sadness': 0.2, 'anger': 0.2, 'fear': 0.2, 'curiosity': 0.2},
198
  'is_initialized': False
199
- })
200
 
201
  data = request.json
202
  user_input = data.get('message', '')
@@ -208,6 +306,15 @@ def chat():
208
  # Process the message through Galatea
209
  response = dialogue_engine.get_response(user_input)
210
 
 
 
 
 
 
 
 
 
 
211
  # Update avatar
212
  avatar_engine.update_avatar(galatea_ai.emotional_state)
213
  avatar_shape = avatar_engine.avatar_model
@@ -223,12 +330,24 @@ def chat():
223
  'emotions': emotions,
224
  'is_initialized': True
225
  })
 
 
 
 
 
 
 
 
 
226
  except Exception as e:
227
- logging.error(f"Error processing chat: {e}")
228
- return jsonify({
229
- 'error': 'Failed to process your message',
230
- 'details': str(e)
231
- }), 500
 
 
 
232
 
233
  # Import Azure Text Analytics with fallback to NLTK VADER
234
  try:
@@ -439,16 +558,56 @@ def availability():
439
  @app.route('/api/is_initialized')
440
  def is_initialized_endpoint():
441
  """Lightweight endpoint for polling initialization progress"""
442
- payload = {
443
- 'is_initialized': is_initialized,
444
- 'initializing': initializing,
445
- 'missing_gemini_key': missing_gemini_key
446
- }
447
-
448
  if missing_gemini_key:
449
- payload['error_page'] = url_for('error_page')
450
-
451
- return jsonify(payload)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
 
453
  @app.route('/status')
454
  def status():
@@ -468,7 +627,19 @@ def error_page():
468
 
469
  if __name__ == '__main__':
470
  print("Starting Galatea Web Interface...")
471
- print("The chatbot will initialize in the background when first accessed.")
 
 
 
 
 
 
 
 
 
 
 
 
472
 
473
  # Add debug logs for avatar shape changes
474
  logging.info("Avatar system initialized with default shape.")
@@ -476,5 +647,8 @@ if __name__ == '__main__':
476
  # Get port from environment variable (for Hugging Face Spaces compatibility)
477
  port = int(os.environ.get('PORT', 7860))
478
 
 
 
 
479
  # Bind to 0.0.0.0 for external access (required for Hugging Face Spaces)
480
- app.run(host='0.0.0.0', port=port, debug=False)
 
76
  gemini_initialized = False
77
  max_init_retries = 3
78
  current_init_retry = 0
79
+ init_script_running = False
80
+ init_script_complete = False
81
 
82
  # Check for required environment variables
83
  required_env_vars = ['GEMINI_API_KEY']
 
121
  logging.error(f"Error initializing Gemini API: {e}")
122
  return False
123
 
124
+ def run_init_script():
125
+ """Run the initialization script in parallel"""
126
+ global init_script_running, init_script_complete
127
+
128
+ if init_script_running or init_script_complete:
129
+ return
130
+
131
+ init_script_running = True
132
+ logging.info("=" * 70)
133
+ logging.info("RUNNING PARALLEL INITIALIZATION SCRIPT")
134
+ logging.info("=" * 70)
135
+
136
+ try:
137
+ import subprocess
138
+ import sys
139
+
140
+ # Run the initialization script
141
+ script_path = os.path.join(os.path.dirname(__file__), 'initialize_galatea.py')
142
+ result = subprocess.run(
143
+ [sys.executable, script_path],
144
+ capture_output=True,
145
+ text=True,
146
+ timeout=300 # 5 minute timeout
147
+ )
148
+
149
+ if result.returncode == 0:
150
+ logging.info("✓ Initialization script completed successfully")
151
+ init_script_complete = True
152
+ else:
153
+ logging.error(f"✗ Initialization script failed with code {result.returncode}")
154
+ logging.error(f"Error output: {result.stderr}")
155
+ # Still mark as complete to allow app to continue
156
+ init_script_complete = True
157
+ except subprocess.TimeoutExpired:
158
+ logging.error("✗ Initialization script timed out")
159
+ init_script_complete = True
160
+ except Exception as e:
161
+ logging.error(f"✗ Error running initialization script: {e}")
162
+ init_script_complete = True
163
+ finally:
164
+ init_script_running = False
165
+ logging.info("=" * 70)
166
+
167
  def initialize_components():
168
+ """Initialize Galatea components (runs after init script completes)"""
169
  global galatea_ai, dialogue_engine, avatar_engine, is_initialized, initializing
170
+ global current_init_retry, gemini_initialized, init_script_complete
171
 
172
  if initializing or is_initialized:
173
  return
174
+
175
+ # Wait for initialization script to complete (poll every 2 seconds)
176
+ max_wait_time = 300 # 5 minutes
177
+ wait_start = time.time()
178
+ while not init_script_complete:
179
+ elapsed = time.time() - wait_start
180
+ if elapsed > max_wait_time:
181
+ logging.warning("Initialization script timeout - proceeding anyway")
182
+ break
183
+ logging.info(f"Waiting for initialization script to complete... ({elapsed:.0f}s)")
184
+ time.sleep(2)
185
+
186
+ if not init_script_complete:
187
+ logging.warning("Proceeding with component initialization despite init script not completing")
188
 
189
  if missing_gemini_key:
190
  logging.error("Initialization aborted: GEMINI_API_KEY missing")
 
195
 
196
  try:
197
  # Import here to avoid circular imports and ensure errors are caught
198
+ from galatea_ai import GalateaAI
199
+ from dialogue import DialogueEngine
200
+ from avatar import AvatarEngine
201
 
202
  # Initialize components
203
+ logging.info("=" * 60)
204
+ logging.info("INITIALIZING GALATEA AI SYSTEM")
205
+ logging.info("=" * 60)
206
+
207
  galatea_ai = GalateaAI()
208
  dialogue_engine = DialogueEngine(galatea_ai)
209
  avatar_engine = AvatarEngine()
210
  avatar_engine.update_avatar(galatea_ai.emotional_state)
211
 
212
+ # Check if all components are fully initialized
213
+ init_status = galatea_ai.get_initialization_status()
214
 
215
+ logging.info("=" * 60)
216
+ logging.info("INITIALIZATION STATUS")
217
+ logging.info("=" * 60)
218
+ logging.info(f"Memory System (JSON): {init_status['memory_system']}")
219
+ logging.info(f"Sentiment Analyzer: {init_status['sentiment_analyzer']}")
220
+ logging.info(f"Models Ready: {init_status['models']}")
221
+ logging.info(f" - Gemini available: {init_status['gemini_available']}")
222
+ logging.info(f" - Inflection AI available: {init_status['inflection_ai_available']}")
223
+ logging.info(f"API Keys Valid: {init_status['api_keys']}")
224
+ logging.info(f"Fully Initialized: {init_status['fully_initialized']}")
225
+ logging.info("=" * 60)
226
 
227
+ # CRITICAL: Only mark as initialized if ALL components are ready
228
+ # If any component fails, EXIT the application immediately
229
+ if init_status['fully_initialized']:
230
+ is_initialized = True
231
+ logging.info("✓ Galatea AI system fully initialized and ready")
232
+ logging.info(f"Emotions initialized: {galatea_ai.emotional_state}")
233
+ else:
234
+ logging.error("=" * 60)
235
+ logging.error("❌ INITIALIZATION FAILED - EXITING APPLICATION")
236
+ logging.error("=" * 60)
237
+ logging.error("One or more critical components failed to initialize:")
238
+ if not init_status['memory_system']:
239
+ logging.error(" ✗ Memory System (JSON) - FAILED")
240
+ if not init_status['sentiment_analyzer']:
241
+ logging.error(" ✗ Sentiment Analyzer - FAILED")
242
+ if not init_status['models']:
243
+ logging.error(" ✗ Models - FAILED")
244
+ if not init_status['api_keys']:
245
+ logging.error(" ✗ API Keys - FAILED")
246
+ logging.error("=" * 60)
247
+ logging.error("EXITING APPLICATION - All systems must be operational")
248
+ logging.error("=" * 60)
249
+ import sys
250
+ sys.exit(1) # Exit immediately - no retries, no partial functionality
251
+ except Exception as e:
252
+ logging.error("=" * 60)
253
+ logging.error(f"❌ CRITICAL ERROR INITIALIZING GALATEA: {e}")
254
+ logging.error("=" * 60)
255
+ logging.error("EXITING APPLICATION - Cannot continue with initialization failure")
256
+ logging.error("=" * 60)
257
+ print(f"CRITICAL ERROR: {e}")
258
+ print("Application exiting due to initialization failure")
259
+ import sys
260
+ sys.exit(1) # Exit immediately - no retries
261
  finally:
262
  initializing = False
263
 
 
265
  def home():
266
  # Add error handling for template rendering
267
  try:
268
+ # Start initialization script in background if not already started
269
+ if not init_script_complete and not init_script_running:
270
+ Thread(target=run_init_script, daemon=True).start()
271
+
272
+ # Start component initialization after init script (will wait if script not done)
273
  if not is_initialized and not initializing and not missing_gemini_key:
274
+ Thread(target=initialize_components, daemon=True).start()
275
 
276
  return render_template('index.html')
277
  except Exception as e:
 
280
 
281
  @app.route('/api/chat', methods=['POST'])
282
  def chat():
283
+ # CRITICAL: Do not allow chat if system is not fully initialized
284
+ if not is_initialized:
285
+ return jsonify({
286
+ 'error': 'System is not initialized yet. Please wait for initialization to complete.',
287
+ 'is_initialized': False,
288
+ 'status': 'initializing'
289
+ }), 503 # Service Unavailable
290
+
291
+ # Check if API key is missing
292
  if missing_gemini_key:
293
  return jsonify({
294
  'error': 'GEMINI_API_KEY is missing. Chat is unavailable.',
295
+ 'status': 'missing_gemini_key',
 
 
 
 
 
 
 
 
 
 
 
296
  'is_initialized': False
297
+ }), 503
298
 
299
  data = request.json
300
  user_input = data.get('message', '')
 
306
  # Process the message through Galatea
307
  response = dialogue_engine.get_response(user_input)
308
 
309
+ # CRITICAL: If response is None, Pi-3.1 failed - exit application
310
+ if response is None:
311
+ error_msg = "CRITICAL: Pi-3.1 (PHI) model failed to generate response. Application cannot continue."
312
+ logging.error("=" * 60)
313
+ logging.error(error_msg)
314
+ logging.error("=" * 60)
315
+ import sys
316
+ sys.exit(1) # Exit immediately
317
+
318
  # Update avatar
319
  avatar_engine.update_avatar(galatea_ai.emotional_state)
320
  avatar_shape = avatar_engine.avatar_model
 
330
  'emotions': emotions,
331
  'is_initialized': True
332
  })
333
+ except RuntimeError as e:
334
+ # CRITICAL: RuntimeError means a system failure - exit application
335
+ error_msg = f"CRITICAL SYSTEM FAILURE: {e}"
336
+ logging.error("=" * 60)
337
+ logging.error(error_msg)
338
+ logging.error("EXITING APPLICATION")
339
+ logging.error("=" * 60)
340
+ import sys
341
+ sys.exit(1) # Exit immediately
342
  except Exception as e:
343
+ # Any other exception is also critical - exit application
344
+ error_msg = f"CRITICAL ERROR processing chat: {e}"
345
+ logging.error("=" * 60)
346
+ logging.error(error_msg)
347
+ logging.error("EXITING APPLICATION")
348
+ logging.error("=" * 60)
349
+ import sys
350
+ sys.exit(1) # Exit immediately
351
 
352
  # Import Azure Text Analytics with fallback to NLTK VADER
353
  try:
 
558
  @app.route('/api/is_initialized')
559
  def is_initialized_endpoint():
560
  """Lightweight endpoint for polling initialization progress"""
561
+ global init_script_running, init_script_complete
562
+
563
+ # Determine current initialization state
 
 
 
564
  if missing_gemini_key:
565
+ return jsonify({
566
+ 'is_initialized': False,
567
+ 'initializing': False,
568
+ 'missing_gemini_key': True,
569
+ 'error_page': url_for('error_page'),
570
+ 'status': 'missing_api_key'
571
+ })
572
+
573
+ # Check if init script is still running
574
+ if init_script_running:
575
+ return jsonify({
576
+ 'is_initialized': False,
577
+ 'initializing': True,
578
+ 'missing_gemini_key': False,
579
+ 'status': 'running_init_script',
580
+ 'message': 'Running parallel initialization...'
581
+ })
582
+
583
+ # Check if components are initializing
584
+ if initializing:
585
+ return jsonify({
586
+ 'is_initialized': False,
587
+ 'initializing': True,
588
+ 'missing_gemini_key': False,
589
+ 'status': 'initializing_components',
590
+ 'message': 'Initializing AI components...'
591
+ })
592
+
593
+ # Check if fully initialized
594
+ if is_initialized:
595
+ return jsonify({
596
+ 'is_initialized': True,
597
+ 'initializing': False,
598
+ 'missing_gemini_key': False,
599
+ 'status': 'ready',
600
+ 'message': 'System ready'
601
+ })
602
+
603
+ # Still waiting
604
+ return jsonify({
605
+ 'is_initialized': False,
606
+ 'initializing': True,
607
+ 'missing_gemini_key': False,
608
+ 'status': 'waiting',
609
+ 'message': 'Waiting for initialization...'
610
+ })
611
 
612
  @app.route('/status')
613
  def status():
 
627
 
628
  if __name__ == '__main__':
629
  print("Starting Galatea Web Interface...")
630
+ print("Initialization will begin automatically when the app starts.")
631
+
632
+ # Start initialization script immediately when app starts
633
+ logging.info("=" * 70)
634
+ logging.info("STARTING GALATEA AI APPLICATION")
635
+ logging.info("=" * 70)
636
+ logging.info("Launching parallel initialization script...")
637
+
638
+ # Start initialization script in background thread
639
+ Thread(target=run_init_script, daemon=True).start()
640
+
641
+ # Start component initialization (will wait for init script)
642
+ Thread(target=initialize_components, daemon=True).start()
643
 
644
  # Add debug logs for avatar shape changes
645
  logging.info("Avatar system initialized with default shape.")
 
647
  # Get port from environment variable (for Hugging Face Spaces compatibility)
648
  port = int(os.environ.get('PORT', 7860))
649
 
650
+ logging.info(f"Flask server starting on port {port}...")
651
+ logging.info("Frontend will poll /api/is_initialized for status")
652
+
653
  # Bind to 0.0.0.0 for external access (required for Hugging Face Spaces)
654
+ app.run(host='0.0.0.0', port=port, debug=True)
avatar.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Avatar Engine - manages avatar representation based on emotional state"""
2
+ from enum import Enum
3
+
4
+ class AvatarShape(Enum): #create shape types for the avatar
5
+ CIRCLE = "Circle"
6
+ TRIANGLE = "Triangle"
7
+ SQUARE = "Square"
8
+
9
+ class AvatarEngine:
10
+ def __init__(self):
11
+ self.avatar_model = "Circle" # Start with a basic shape
12
+ self.expression_parameters = {}
13
+
14
+ def update_avatar(self, emotional_state):
15
+ # Map emotions to avatar parameters (facial expressions, color)
16
+ joy_level = emotional_state["joy"]
17
+ sadness_level = emotional_state["sadness"]
18
+
19
+ # Simple mapping (placeholder)
20
+ self.avatar_model = self.change_avatar_shape(joy_level, sadness_level)
21
+
22
+ def change_avatar_shape(self, joy, sad):
23
+ #determine shape based on feelings
24
+ if joy > 0.5:
25
+ return AvatarShape.CIRCLE.value
26
+ elif sad > 0.5:
27
+ return AvatarShape.TRIANGLE.value
28
+ else:
29
+ return AvatarShape.SQUARE.value
30
+
31
+ def render_avatar(self):
32
+ # Simple console rendering of the avatar state
33
+ print(f"Avatar shape: {self.avatar_model}")
34
+
check_models.py DELETED
@@ -1,48 +0,0 @@
1
- import os
2
- import logging
3
- from dotenv import load_dotenv
4
- import google.generativeai as genai
5
-
6
- # Configure logging
7
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
8
-
9
- # Load environment variables
10
- load_dotenv()
11
-
12
- def check_available_models():
13
- """Check and print all available Gemini models"""
14
- try:
15
- # Get API key from environment variable
16
- api_key = os.getenv("GEMINI_API_KEY")
17
-
18
- if not api_key:
19
- print("Gemini API key not found in environment variables.")
20
- api_key = input("Enter your Gemini API key: ")
21
-
22
- # Configure the Gemini API
23
- genai.configure(api_key=api_key)
24
-
25
- # List available models
26
- print("Fetching available models...")
27
- models = genai.list_models()
28
-
29
- print("\n===== AVAILABLE GOOGLE AI MODELS =====")
30
- for model in models:
31
- print(f"- {model.name}")
32
-
33
- print("\n===== RECOMMENDED MODELS TO USE =====")
34
- for model in models:
35
- if "gemini-1.5" in model.name:
36
- print(f"✓ {model.name}")
37
-
38
- return [model.name for model in models]
39
-
40
- except Exception as e:
41
- print(f"Error checking models: {e}")
42
- return []
43
-
44
- if __name__ == "__main__":
45
- check_available_models()
46
- print("\nYou can use any of these models in your application.")
47
- print("To fix your application, update the model name in initialize_gemini() method.")
48
- print("Example usage: self.gemini_model = genai.GenerativeModel('models/gemini-1.5-flash')")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
check_website.py DELETED
@@ -1,129 +0,0 @@
1
- import os
2
- import sys
3
- import subprocess
4
-
5
- def check_python_imports():
6
- """Check if required Python packages are installed"""
7
- required_packages = ['flask', 'dotenv', 'nltk', 'transformers', 'google-generativeai']
8
- missing_packages = []
9
-
10
- for package in required_packages:
11
- try:
12
- if package == 'dotenv':
13
- __import__('python_dotenv')
14
- else:
15
- __import__(package)
16
- except ImportError:
17
- missing_packages.append(package)
18
-
19
- return missing_packages
20
-
21
- def check_file_structure():
22
- """Check if all required files and directories exist"""
23
- expected_files = [
24
- 'app.py',
25
- 'import_random.py',
26
- 'templates/index.html',
27
- 'static/css/style.css',
28
- 'static/js/script.js',
29
- '.env'
30
- ]
31
-
32
- missing_files = []
33
- for file_path in expected_files:
34
- if not os.path.exists(file_path):
35
- missing_files.append(file_path)
36
-
37
- return missing_files
38
-
39
- def check_env_file():
40
- """Check if .env file has GEMINI_API_KEY"""
41
- if not os.path.exists('.env'):
42
- return False
43
-
44
- with open('.env', 'r') as f:
45
- content = f.read()
46
-
47
- return 'GEMINI_API_KEY' in content
48
-
49
- def main():
50
- print("=== Galatea Website Troubleshooter ===\n")
51
-
52
- # Check Python version
53
- python_version = sys.version.split()[0]
54
- print(f"Python Version: {python_version}")
55
-
56
- # Check required packages
57
- print("\nChecking required packages...")
58
- missing_packages = check_python_imports()
59
- if missing_packages:
60
- print("❌ The following packages need to be installed:")
61
- for package in missing_packages:
62
- install_name = 'python-dotenv' if package == 'dotenv' else package
63
- print(f" - {install_name}")
64
- print("\nInstall them using: pip install package-name")
65
- else:
66
- print("✅ All required packages are installed.")
67
-
68
- # Check file structure
69
- print("\nChecking file structure...")
70
- missing_files = check_file_structure()
71
- if missing_files:
72
- print("❌ The following files/directories are missing:")
73
- for file_path in missing_files:
74
- print(f" - {file_path}")
75
- else:
76
- print("✅ All required files and directories exist.")
77
-
78
- # Check .env file
79
- print("\nChecking environment variables...")
80
- if check_env_file():
81
- print("✅ GEMINI_API_KEY found in .env file.")
82
- else:
83
- print("❌ GEMINI_API_KEY not found in .env file.")
84
- print(" Create a .env file with: GEMINI_API_KEY=your_api_key_here")
85
-
86
- print("\n=== Conclusion ===")
87
- if not missing_packages and not missing_files and check_env_file():
88
- print("✅ Everything looks good! The website should work correctly.")
89
- print(" Run 'python app.py' to start the server.")
90
- print(" Then open http://127.0.0.1:5000 in your browser.")
91
- else:
92
- print("❌ Some issues were found that need to be addressed before the website will work.")
93
-
94
- print("\nWould you like to try fixing these issues automatically? (y/n)")
95
- choice = input("> ")
96
-
97
- if choice.lower() == 'y':
98
- # Install missing packages
99
- if missing_packages:
100
- print("\nInstalling missing packages...")
101
- for package in missing_packages:
102
- install_name = 'python-dotenv' if package == 'dotenv' else package
103
- print(f"Installing {install_name}...")
104
- subprocess.run([sys.executable, '-m', 'pip', 'install', install_name])
105
-
106
- # Create missing directories
107
- missing_dirs = set()
108
- for file_path in missing_files:
109
- dir_path = os.path.dirname(file_path)
110
- if dir_path and not os.path.exists(dir_path):
111
- missing_dirs.add(dir_path)
112
-
113
- for dir_path in missing_dirs:
114
- print(f"Creating directory: {dir_path}")
115
- os.makedirs(dir_path, exist_ok=True)
116
-
117
- # Create .env file if missing
118
- if not check_env_file():
119
- print("\nCreating .env file...")
120
- api_key = input("Enter your Gemini API Key: ")
121
- with open('.env', 'w') as f:
122
- f.write(f"GEMINI_API_KEY={api_key}\n")
123
-
124
- print("\nFixes applied. Run 'python app.py' to start the server.")
125
-
126
- input("\nPress Enter to exit...")
127
-
128
- if __name__ == "__main__":
129
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Configuration loading module"""
2
+ import os
3
+ import yaml
4
+ import logging
5
+
6
+ # Configure logging
7
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
8
+
9
+ def load_model_config(config_path="models.yaml"):
10
+ """Load model configuration from YAML file"""
11
+ try:
12
+ if os.path.exists(config_path):
13
+ with open(config_path, 'r', encoding='utf-8') as f:
14
+ config = yaml.safe_load(f)
15
+ logging.info(f"✓ Model configuration loaded from {config_path}")
16
+ return config
17
+ else:
18
+ logging.warning(f"⚠ Model configuration file {config_path} not found, using defaults")
19
+ return None
20
+ except Exception as e:
21
+ logging.error(f"✗ Error loading model configuration: {e}")
22
+ return None
23
+
24
+ # Load configuration at module level
25
+ MODEL_CONFIG = load_model_config()
26
+
dialogue.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dialogue Engine - handles conversation flow and styling"""
2
+
3
+ class DialogueEngine:
4
+ def __init__(self, ai_core):
5
+ self.ai_core = ai_core
6
+ self.last_user_message = ""
7
+
8
+ def get_response(self, user_input):
9
+ # Store the last message for sentiment analysis
10
+ self.last_user_message = user_input
11
+
12
+ ai_response = self.ai_core.process_input(user_input)
13
+ styled_response = self.apply_style(ai_response, self.ai_core.emotional_state)
14
+ return styled_response
15
+
16
+ def apply_style(self, text, emotional_state):
17
+ style = self.get_style(emotional_state)
18
+ #selects styles based on emotions
19
+ #add style to text
20
+ styled_text = text # Remove the style suffix to make responses cleaner
21
+ return styled_text
22
+
23
+ def get_style(self, emotional_state):
24
+ #determine style based on the state of the AI
25
+ return "neutral"
26
+
galatea_ai.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main GalateaAI class - orchestrates all agents"""
2
+ import os
3
+ import sys
4
+ import nltk
5
+ import logging
6
+
7
+ # Add current directory to path for imports
8
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
9
+ from config import MODEL_CONFIG
10
+ from systems import MemorySystem
11
+ from agents import (
12
+ MemoryAgent, GeminiThinkingAgent, PiResponseAgent,
13
+ EmotionalStateAgent, SentimentAgent
14
+ )
15
+
16
+ # Download NLTK data (only needs to be done once)
17
+ try:
18
+ nltk.data.find("tokenizers/punkt")
19
+ except LookupError:
20
+ nltk.download('punkt')
21
+
22
+ # Make sure punkt is downloaded before importing the rest
23
+ nltk.download('punkt', quiet=True)
24
+
25
+ class GalateaAI:
26
+ def __init__(self):
27
+ # Load model configuration first
28
+ self.config = MODEL_CONFIG or {}
29
+
30
+ self.knowledge_base = {}
31
+ self.response_model = "A generic response" #Place Holder for the ML model
32
+
33
+ # Conversation history for context
34
+ self.conversation_history = [] # List of {"role": "user"/"assistant", "content": "..."}
35
+ # Get max history length from config or use default
36
+ self.max_history_length = self.config.get('conversation', {}).get('max_history_length', 20)
37
+
38
+ # Initialize memory system
39
+ logging.info("Initializing memory system (JSON)...")
40
+ try:
41
+ self.memory_system = MemorySystem(config=self.config)
42
+ self.memory_system_ready = self.memory_system.is_ready()
43
+ if not self.memory_system_ready:
44
+ raise Exception("Memory system failed to initialize")
45
+ logging.info("✓ Memory system initialized")
46
+ except Exception as e:
47
+ logging.error(f"Failed to initialize memory system: {e}")
48
+ self.memory_system_ready = False
49
+ raise
50
+
51
+ # Initialize agents
52
+ logging.info("Initializing agents...")
53
+ self.memory_agent = MemoryAgent(self.memory_system, config=self.config)
54
+ self.gemini_agent = GeminiThinkingAgent(config=self.config)
55
+ self.pi_agent = PiResponseAgent(config=self.config)
56
+ self.emotional_agent = EmotionalStateAgent(config=self.config)
57
+ self.sentiment_agent = SentimentAgent(config=self.config)
58
+
59
+ # Run end-to-end chat simulation test - CRITICAL: Tests full workflow as if in a real chat
60
+ logging.info("Running end-to-end chat simulation test...")
61
+ self._run_chat_simulation_test()
62
+
63
+ # Track initialization status
64
+ self.memory_system_ready = self.memory_agent.is_ready()
65
+ self.sentiment_analyzer_ready = self.sentiment_agent.is_ready()
66
+ self.models_ready = self.gemini_agent.is_ready() or self.pi_agent.is_ready()
67
+ self.api_keys_valid = self.gemini_agent.is_ready() or self.pi_agent.is_ready()
68
+
69
+ # CRITICAL: Verify all critical systems are ready, raise exception if not
70
+ if not self.memory_system_ready:
71
+ raise RuntimeError("Memory system failed to initialize - application cannot continue")
72
+ if not self.sentiment_analyzer_ready:
73
+ raise RuntimeError("Sentiment analyzer failed to initialize - application cannot continue")
74
+ if not self.models_ready:
75
+ raise RuntimeError("No AI models available (Gemini or Pi-3.1) - application cannot continue")
76
+ if not self.api_keys_valid:
77
+ raise RuntimeError("API keys are invalid or missing - application cannot continue")
78
+ if not self.pi_agent.is_ready():
79
+ raise RuntimeError("Pi-3.1 (PHI) model is not available - application cannot continue")
80
+ if not self.gemini_agent.is_ready():
81
+ raise RuntimeError("Gemini model is not available - application cannot continue")
82
+
83
+ # Legacy compatibility
84
+ self.gemini_available = self.gemini_agent.is_ready()
85
+ self.inflection_ai_available = self.pi_agent.is_ready()
86
+ self.quantum_random_available = self.emotional_agent.quantum_random_available
87
+
88
+ logging.info("✓ All agents initialized and verified")
89
+
90
+ def _run_chat_simulation_test(self):
91
+ """Run a full end-to-end chat simulation test - simulates real chat interaction"""
92
+ logging.info("=" * 60)
93
+ logging.info("RUNNING END-TO-END CHAT SIMULATION TEST")
94
+ logging.info("=" * 60)
95
+
96
+ test_messages = [
97
+ "Hello, how are you?",
98
+ "What can you help me with?",
99
+ "Tell me something interesting."
100
+ ]
101
+
102
+ test_results = {
103
+ 'sentiment_analysis': False,
104
+ 'emotional_state_update': False,
105
+ 'memory_retrieval': False,
106
+ 'gemini_thinking': False,
107
+ 'pi_response': False,
108
+ 'full_workflow': False
109
+ }
110
+
111
+ try:
112
+ # Test with first message
113
+ test_input = test_messages[0]
114
+ logging.info(f"[Chat Simulation] Testing with message: '{test_input}'")
115
+
116
+ # Step 1: Test sentiment analysis
117
+ try:
118
+ sentiment_score = self.sentiment_agent.analyze(test_input)
119
+ if sentiment_score is not None and isinstance(sentiment_score, (int, float)):
120
+ test_results['sentiment_analysis'] = True
121
+ logging.info(f"[Chat Simulation] ✓ Sentiment analysis: {sentiment_score:.3f}")
122
+ else:
123
+ raise RuntimeError("Sentiment analysis returned invalid result")
124
+ except Exception as e:
125
+ logging.error(f"[Chat Simulation] ✗ Sentiment analysis failed: {e}")
126
+ raise RuntimeError(f"Sentiment analysis failed during chat simulation: {e}")
127
+
128
+ # Step 2: Test emotional state update
129
+ try:
130
+ initial_state = self.emotional_agent.get_state().copy()
131
+ self.emotional_agent.update_with_sentiment(sentiment_score)
132
+ updated_state = self.emotional_agent.get_state()
133
+ if updated_state and isinstance(updated_state, dict) and len(updated_state) > 0:
134
+ test_results['emotional_state_update'] = True
135
+ logging.info(f"[Chat Simulation] ✓ Emotional state updated: {updated_state}")
136
+ else:
137
+ raise RuntimeError("Emotional state update returned invalid state")
138
+ except Exception as e:
139
+ logging.error(f"[Chat Simulation] ✗ Emotional state update failed: {e}")
140
+ raise RuntimeError(f"Emotional state update failed during chat simulation: {e}")
141
+
142
+ # Step 3: Test memory retrieval
143
+ try:
144
+ keywords = self.extract_keywords(test_input)
145
+ retrieved_memories = self.memory_agent.retrieve_memories(test_input)
146
+ if retrieved_memories is not None:
147
+ test_results['memory_retrieval'] = True
148
+ logging.info(f"[Chat Simulation] ✓ Memory retrieval: {len(retrieved_memories)} memories found")
149
+ else:
150
+ raise RuntimeError("Memory retrieval returned None")
151
+ except Exception as e:
152
+ logging.error(f"[Chat Simulation] ✗ Memory retrieval failed: {e}")
153
+ raise RuntimeError(f"Memory retrieval failed during chat simulation: {e}")
154
+
155
+ # Step 4: Test Gemini thinking
156
+ try:
157
+ current_emotional_state = self.emotional_agent.get_state()
158
+ thinking_context = self.gemini_agent.think(
159
+ test_input,
160
+ current_emotional_state,
161
+ self.conversation_history,
162
+ retrieved_memories=retrieved_memories
163
+ )
164
+ if thinking_context and len(thinking_context) > 0:
165
+ test_results['gemini_thinking'] = True
166
+ logging.info(f"[Chat Simulation] ✓ Gemini thinking: {thinking_context[:100]}...")
167
+ else:
168
+ raise RuntimeError("Gemini thinking returned empty or None")
169
+ except Exception as e:
170
+ logging.error(f"[Chat Simulation] ✗ Gemini thinking failed: {e}")
171
+ raise RuntimeError(f"Gemini thinking failed during chat simulation: {e}")
172
+
173
+ # Step 5: Test Pi-3.1 response generation
174
+ try:
175
+ response = self.pi_agent.respond(
176
+ test_input,
177
+ current_emotional_state,
178
+ thinking_context=thinking_context,
179
+ conversation_history=self.conversation_history,
180
+ retrieved_memories=retrieved_memories
181
+ )
182
+ if response and len(response) > 0:
183
+ test_results['pi_response'] = True
184
+ logging.info(f"[Chat Simulation] ✓ Pi-3.1 response: {response[:100]}...")
185
+ else:
186
+ raise RuntimeError("Pi-3.1 response returned empty or None")
187
+ except Exception as e:
188
+ logging.error(f"[Chat Simulation] ✗ Pi-3.1 response failed: {e}")
189
+ raise RuntimeError(f"Pi-3.1 response generation failed during chat simulation: {e}")
190
+
191
+ # Step 6: Test full workflow using process_input
192
+ try:
193
+ # Reset conversation history for clean test
194
+ original_history = self.conversation_history.copy()
195
+ self.conversation_history = []
196
+
197
+ full_response = self.process_input(test_input)
198
+ if full_response and len(full_response) > 0:
199
+ test_results['full_workflow'] = True
200
+ logging.info(f"[Chat Simulation] ✓ Full workflow test: {full_response[:100]}...")
201
+ else:
202
+ raise RuntimeError("Full workflow test returned empty or None")
203
+
204
+ # Restore conversation history
205
+ self.conversation_history = original_history
206
+ except Exception as e:
207
+ logging.error(f"[Chat Simulation] ✗ Full workflow test failed: {e}")
208
+ raise RuntimeError(f"Full workflow test failed during chat simulation: {e}")
209
+
210
+ # Print summary
211
+ logging.info("=" * 60)
212
+ logging.info("CHAT SIMULATION TEST SUMMARY")
213
+ logging.info("=" * 60)
214
+ for test_name, result in test_results.items():
215
+ status = "✓ PASSED" if result else "✗ FAILED"
216
+ logging.info(f"{status} - {test_name.upper().replace('_', ' ')}")
217
+ logging.info("=" * 60)
218
+
219
+ # CRITICAL: Verify all tests passed
220
+ if not all(test_results.values()):
221
+ failed_tests = [name for name, result in test_results.items() if not result]
222
+ error_msg = f"CRITICAL: Chat simulation tests failed for: {', '.join(failed_tests).upper()}. Application cannot continue."
223
+ logging.error("=" * 60)
224
+ logging.error(error_msg)
225
+ logging.error("=" * 60)
226
+ raise RuntimeError(error_msg)
227
+
228
+ logging.info("✓ All chat simulation tests passed - system ready for production use")
229
+
230
+ except RuntimeError:
231
+ # Re-raise RuntimeError as-is (already has proper error message)
232
+ raise
233
+ except Exception as e:
234
+ error_msg = f"CRITICAL: Chat simulation test failed with unexpected error: {e}"
235
+ logging.error("=" * 60)
236
+ logging.error(error_msg)
237
+ logging.error("=" * 60)
238
+ raise RuntimeError(error_msg)
239
+
240
+ def _check_pre_initialization(self):
241
+ """Check if components were pre-initialized by initialize_galatea.py"""
242
+ # Check if JSON memory exists
243
+ if os.path.exists("./memory.json"):
244
+ logging.info("✓ Pre-initialized JSON memory detected")
245
+ return True
246
+
247
+ return False
248
+
249
+ def is_fully_initialized(self):
250
+ """Check if all components are fully initialized"""
251
+ return (
252
+ self.memory_system_ready and
253
+ self.sentiment_analyzer_ready and
254
+ self.models_ready and
255
+ self.api_keys_valid
256
+ )
257
+
258
+ def get_initialization_status(self):
259
+ """Get detailed initialization status"""
260
+ return {
261
+ "memory_system": self.memory_system_ready,
262
+ "sentiment_analyzer": self.sentiment_analyzer_ready,
263
+ "models": self.models_ready,
264
+ "api_keys": self.api_keys_valid,
265
+ "gemini_available": self.gemini_agent.is_ready() if hasattr(self, 'gemini_agent') else False,
266
+ "inflection_ai_available": self.pi_agent.is_ready() if hasattr(self, 'pi_agent') else False,
267
+ "azure_text_analytics_available": self.sentiment_agent.azure_agent.is_ready() if hasattr(self, 'sentiment_agent') else False,
268
+ "fully_initialized": self.is_fully_initialized()
269
+ }
270
+
271
+ @property
272
+ def emotional_state(self):
273
+ """Get current emotional state from EmotionalStateAgent"""
274
+ return self.emotional_agent.get_state() if hasattr(self, 'emotional_agent') else {"joy": 0.2, "sadness": 0.2, "anger": 0.2, "fear": 0.2, "curiosity": 0.2}
275
+
276
+ def update_conversation_history(self, user_input, assistant_response):
277
+ """Update conversation history, maintaining max length"""
278
+ # Add user message
279
+ self.conversation_history.append({"role": "user", "content": user_input})
280
+ # Add assistant response
281
+ self.conversation_history.append({"role": "assistant", "content": assistant_response})
282
+
283
+ # Trim history if too long
284
+ if len(self.conversation_history) > self.max_history_length:
285
+ # Keep the most recent messages
286
+ self.conversation_history = self.conversation_history[-self.max_history_length:]
287
+
288
+ def _store_important_memory(self, user_input, assistant_response, intent, keywords):
289
+ """Store important conversation snippets in memory system"""
290
+ try:
291
+ # Determine if this conversation is worth storing
292
+ # Store if: question, contains important keywords, or is a significant exchange
293
+ should_store = False
294
+ memory_type = "conversation"
295
+
296
+ if intent == "question":
297
+ should_store = True
298
+ memory_type = "question"
299
+ elif len(keywords) > 3: # Substantial conversation
300
+ should_store = True
301
+ elif any(keyword in ["remember", "important", "note", "save"] for keyword in keywords):
302
+ should_store = True
303
+ memory_type = "important"
304
+
305
+ if should_store:
306
+ # Create a memory entry combining user input and response
307
+ memory_text = f"User: {user_input}\nGalatea: {assistant_response}"
308
+
309
+ metadata = {
310
+ "intent": intent,
311
+ "keywords": keywords[:5], # Top 5 keywords
312
+ "emotions": {k: round(v, 2) for k, v in self.emotional_agent.get_state().items()}
313
+ }
314
+
315
+ # Store in memory system
316
+ self.memory_system.store_memory(
317
+ text=memory_text,
318
+ metadata=metadata,
319
+ memory_type=memory_type
320
+ )
321
+ logging.info(f"Stored important memory: {memory_type} - {user_input[:50]}...")
322
+ except Exception as e:
323
+ logging.error(f"Error storing memory: {e}")
324
+
325
+ def is_thinking_mode(self, intent, user_input, keywords):
326
+ """Determine if the request requires thinking mode (use Gemini for complex reasoning)"""
327
+ # Always use thinking mode now - Gemini always thinks, Pi-3.1 always responds
328
+ return True
329
+
330
+ def process_input(self, user_input):
331
+ """Process user input through the agent chain workflow: PHI(GEMINI(User inputs, read with past memory), emotionalstate)"""
332
+ # Step 1: Analyze sentiment
333
+ sentiment_score = self.sentiment_agent.analyze(user_input)
334
+
335
+ # Step 2: Extract keywords and determine intent
336
+ keywords = self.extract_keywords(user_input)
337
+ intent = self.determine_intent(user_input)
338
+
339
+ # Step 3: Update emotional state based on sentiment
340
+ self.emotional_agent.update_with_sentiment(sentiment_score)
341
+ current_emotional_state = self.emotional_agent.get_state()
342
+
343
+ # Step 4: Retrieve memories
344
+ retrieved_memories = self.memory_agent.retrieve_memories(user_input)
345
+
346
+ # Step 5: Chain workflow: PHI(GEMINI(User inputs, read with past memory), emotionalstate)
347
+ # Step 5a: GEMINI(User inputs, read with past memory)
348
+ thinking_context = self.gemini_agent.think(
349
+ user_input,
350
+ current_emotional_state,
351
+ self.conversation_history,
352
+ retrieved_memories=retrieved_memories
353
+ )
354
+
355
+ # Step 5b: PHI(GEMINI result, emotionalstate)
356
+ response = self.pi_agent.respond(
357
+ user_input,
358
+ current_emotional_state,
359
+ thinking_context=thinking_context,
360
+ conversation_history=self.conversation_history,
361
+ retrieved_memories=retrieved_memories
362
+ )
363
+
364
+ # CRITICAL: Pi-3.1 (PHI) model must generate response - raise exception if it fails
365
+ if not response:
366
+ error_msg = "[GalateaAI] CRITICAL: Pi-3.1 (PHI) model failed to generate response. Application cannot continue."
367
+ logging.error("=" * 60)
368
+ logging.error(error_msg)
369
+ logging.error("=" * 60)
370
+ raise RuntimeError(error_msg)
371
+
372
+ # Update conversation history
373
+ self.update_conversation_history(user_input, response)
374
+
375
+ # Store important memories
376
+ self._store_important_memory(user_input, response, intent, keywords)
377
+
378
+ # Update knowledge base
379
+ self.update_knowledge(keywords, user_input)
380
+
381
+ return response
382
+
383
+ def extract_keywords(self, text):
384
+ try:
385
+ # Try using NLTK's tokenizer
386
+ tokens = nltk.word_tokenize(text)
387
+ keywords = [word.lower() for word in tokens if word.isalnum()]
388
+ return keywords
389
+ except Exception:
390
+ # Fall back to a simple split-based approach if NLTK fails
391
+ words = text.split()
392
+ # Clean up words (remove punctuation)
393
+ keywords = [word.lower().strip('.,!?;:()[]{}""\'') for word in words]
394
+ # Filter out empty strings
395
+ keywords = [word for word in keywords if word and word.isalnum()]
396
+ return keywords
397
+
398
+ def determine_intent(self, text):
399
+ # More comprehensive intent recognition (using keywords)
400
+ text = text.lower()
401
+ if "what" in text or "how" in text or "why" in text:
402
+ return "question"
403
+ elif "thank" in text:
404
+ return "gratitude"
405
+ elif "goodbye" in text or "bye" in text:
406
+ return "farewell"
407
+ else:
408
+ return "statement"
409
+
410
+
411
+ def update_knowledge(self, keywords, user_input):
412
+ #for new key words remember them
413
+ for keyword in keywords:
414
+ if keyword not in self.knowledge_base:
415
+ self.knowledge_base[keyword] = user_input
416
+
import random.py DELETED
File without changes
import_random.py CHANGED
@@ -1,9 +1,13 @@
1
  import random
2
  import nltk
3
  import os
 
 
4
  from dotenv import load_dotenv
5
- import google.generativeai as genai
6
  import logging
 
 
 
7
 
8
  # Configure logging
9
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -11,6 +15,25 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(
11
  # Load environment variables from .env file
12
  load_dotenv()
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  # Download NLTK data (only needs to be done once)
15
  try:
16
  nltk.data.find("tokenizers/punkt")
@@ -30,36 +53,848 @@ except ImportError:
30
 
31
  from enum import Enum
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  # --- 1. AI Core ---
34
  class GalateaAI:
35
  def __init__(self):
36
- self.emotional_state = {"joy": 0.2, "sadness": 0.2, "anger": 0.2, "fear": 0.2, "curiosity": 0.2}
 
 
37
  self.knowledge_base = {}
38
- self.learning_rate = 0.05 # Reduced learning rate
39
  self.response_model = "A generic response" #Place Holder for the ML model
40
 
41
- # Initialize sentiment analyzer with fallback
42
- self.initialize_sentiment_analyzer()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- # Initialize Gemini API
45
- self.initialize_gemini()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  def initialize_sentiment_analyzer(self):
48
  """Initialize sentiment analysis with fallback options"""
 
 
 
 
49
  if transformers_available:
50
  try:
51
  logging.info("Attempting to initialize Hugging Face sentiment analyzer")
52
  # Try to initialize the pipeline with specific parameters
53
  self.sentiment_analyzer = pipeline(
54
  "sentiment-analysis",
55
- model="distilbert/distilbert-base-uncased-finetuned-sst-2-english"
56
  )
57
- logging.info("Hugging Face sentiment analyzer loaded successfully")
 
58
  except Exception as e:
59
  logging.error(f"Failed to initialize Hugging Face sentiment analyzer: {e}")
60
  self.sentiment_analyzer = None
 
 
 
61
  else:
62
  self.sentiment_analyzer = None
 
 
63
 
64
  def analyze_sentiment(self, text):
65
  # Use Hugging Face if available
@@ -92,102 +927,407 @@ class GalateaAI:
92
 
93
  return max(-1.0, min(1.0, sentiment_score)) # Clamp between -1 and 1
94
 
95
- def initialize_gemini(self):
96
- """Initialize the Gemini API with API key from .env file"""
97
- self.gemini_available = False # Default to False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  try:
100
- # Get API key from environment variable
101
- api_key = os.getenv("GEMINI_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
- logging.info(f"Checking for GEMINI_API_KEY... Found: {bool(api_key)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- if not api_key:
106
- # Log error and fail gracefully (no input prompt for web deployment)
107
- logging.error("GEMINI_API_KEY not found in environment variables.")
108
- logging.error("Please set GEMINI_API_KEY in your environment or .env file")
109
- logging.error("Bot will use fallback responses only.")
110
- return
111
 
112
- # Configure the Gemini API
113
- logging.info("Configuring Gemini API...")
114
- genai.configure(api_key=api_key)
 
 
 
 
 
115
 
116
- # Use Gemini 2.0 Flash (latest and fastest model)
117
- try:
118
- # Try Gemini 2.0 Flash first (newest model)
119
- preferred_models = [
120
- "gemini-2.0-flash-exp",
121
- "gemini-2.0-flash",
122
- "gemini-1.5-flash-latest",
123
- "gemini-1.5-flash",
124
- ]
125
-
126
- model_name = None
127
- last_error = None
128
-
129
- for model in preferred_models:
130
- try:
131
- logging.info(f"Attempting to initialize with model: {model}")
132
- self.gemini_model = genai.GenerativeModel(model)
133
-
134
- # Test the model with a simple prompt
135
- logging.info(f"Testing {model} with a simple prompt...")
136
- test_response = self.gemini_model.generate_content("Hello")
137
-
138
- if hasattr(test_response, 'text') and test_response.text:
139
- logging.info(f"✓ Test response received: {test_response.text[:50]}...")
140
- model_name = model
141
- self.gemini_available = True
142
- logging.info(f"✓ Gemini API initialized successfully with model: {model_name}")
143
- print(f"✓ Gemini API initialized successfully with model: {model_name}")
144
- break
145
- else:
146
- logging.warning(f"Model {model} returned empty response")
147
- continue
148
-
149
- except Exception as e:
150
- last_error = e
151
- logging.warning(f"Model {model} failed: {e}")
152
- continue
153
-
154
- if not model_name:
155
- raise Exception(f"All models failed. Last error: {last_error}")
156
 
157
- except Exception as e:
158
- logging.error(f"Error initializing Gemini model: {e}")
159
- logging.error(f"Full error: {type(e).__name__}: {str(e)}")
160
- logging.error("Bot will use fallback responses only.")
161
- self.gemini_available = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
 
 
 
163
  except Exception as e:
164
- logging.error(f"Failed to initialize Gemini API: {e}")
165
- logging.error("Bot will use fallback responses only.")
166
- print(f"✗ Failed to initialize Gemini API: {e}")
167
- self.gemini_available = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
  def process_input(self, user_input):
170
- sentiment_score = self.analyze_sentiment(user_input)
 
 
 
 
171
  keywords = self.extract_keywords(user_input)
172
  intent = self.determine_intent(user_input)
173
-
174
- # Enhanced Emotion Update (decay and normalization)
175
- for emotion in self.emotional_state:
176
- # Decay emotions (more realistic fading)
177
- self.emotional_state[emotion] *= 0.9 # Decay by 10% each turn
178
- # Normalize
179
- self.emotional_state[emotion] = max(0.0, min(1.0, self.emotional_state[emotion]))
180
-
181
- self.emotional_state["joy"] += sentiment_score * self.learning_rate
182
- self.emotional_state["sadness"] -= sentiment_score * self.learning_rate
183
-
184
- # Re-normalize
185
- total_emotion = sum(self.emotional_state.values())
186
- for emotion in self.emotional_state:
187
- self.emotional_state[emotion] = self.emotional_state[emotion] / total_emotion if total_emotion > 0 else 0.2
188
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  self.update_knowledge(keywords, user_input)
190
- response = self.generate_response(intent, keywords, self.emotional_state, user_input)
 
191
  return response
192
 
193
  def extract_keywords(self, text):
@@ -217,70 +1357,133 @@ class GalateaAI:
217
  else:
218
  return "statement"
219
 
220
- def generate_response(self, intent, keywords, emotional_state, original_input):
221
- # Try to use Gemini API if available
222
- if hasattr(self, 'gemini_available') and self.gemini_available:
223
- try:
224
- # Create a prompt that includes emotional context and intent
225
- emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
226
-
227
- # Create a character prompt for Gemini
228
- prompt = f"""
229
- You are Galatea, an AI assistant with the following emotional state:
230
- {emotions_text}
231
-
232
- User input: "{original_input}"
233
-
234
- Respond in character as Galatea. Keep your response concise (under 50 words) and reflect your emotional state in your tone.
235
- If you're feeling more joy, be more enthusiastic. If sad, be more melancholic.
236
- """
237
-
238
- logging.info("Sending request to Gemini API")
239
- # Get response from Gemini with safety settings
240
- generation_config = {
241
- "temperature": 0.7,
242
- "top_p": 0.8,
243
- "top_k": 40
244
- }
245
-
246
- response = self.gemini_model.generate_content(
247
- prompt,
248
- generation_config=generation_config
249
- )
250
 
251
- # Check if response is valid and return it
252
- if response and hasattr(response, 'text'):
253
- logging.info(f"Gemini response received successfully")
254
- return response.text.strip()
255
- elif hasattr(response, 'parts'):
256
- # Try alternate access method
257
- logging.info(f"Gemini response received via parts")
258
- return response.parts[0].text.strip()
259
- else:
260
- logging.warning(f"Unexpected response format: {response}")
261
- # Fall back to basic response
262
- return "I'm processing that..."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
- except Exception as e:
265
- logging.error(f"Error using Gemini API: {e}")
266
- logging.error(f"Full error details: {type(e).__name__}: {str(e)}")
267
- print(f"Error using Gemini API: {e}")
268
- # Fall back to basic response logic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
  else:
270
- logging.warning("Gemini API not available - using fallback responses")
271
-
272
- # Original response generation logic as fallback
273
- logging.info(f"Using fallback response. Intent: {intent}, Keywords: {keywords[:5]}")
274
-
275
  if intent == "question":
276
  if "you" in keywords:
277
- return "I am still learning about myself. My Gemini AI is not responding right now."
278
  else:
279
- return "I'd love to help with that, but my AI system isn't responding at the moment."
280
  elif intent == "gratitude":
281
- return "You're welcome!"
282
  else:
283
- return "I hear you, though my full AI capabilities aren't active right now. Please check if my API key is configured."
 
 
 
 
 
 
 
 
 
284
 
285
  def update_knowledge(self, keywords, user_input):
286
  #for new key words remember them
 
1
  import random
2
  import nltk
3
  import os
4
+ import json
5
+ import yaml
6
  from dotenv import load_dotenv
 
7
  import logging
8
+ import requests
9
+ from litellm import completion
10
+ from datetime import datetime
11
 
12
  # Configure logging
13
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
15
  # Load environment variables from .env file
16
  load_dotenv()
17
 
18
+ # Load model configuration from YAML
19
+ def load_model_config(config_path="models.yaml"):
20
+ """Load model configuration from YAML file"""
21
+ try:
22
+ if os.path.exists(config_path):
23
+ with open(config_path, 'r', encoding='utf-8') as f:
24
+ config = yaml.safe_load(f)
25
+ logging.info(f"✓ Model configuration loaded from {config_path}")
26
+ return config
27
+ else:
28
+ logging.warning(f"⚠ Model configuration file {config_path} not found, using defaults")
29
+ return None
30
+ except Exception as e:
31
+ logging.error(f"✗ Error loading model configuration: {e}")
32
+ return None
33
+
34
+ # Load configuration at module level
35
+ MODEL_CONFIG = load_model_config()
36
+
37
  # Download NLTK data (only needs to be done once)
38
  try:
39
  nltk.data.find("tokenizers/punkt")
 
53
 
54
  from enum import Enum
55
 
56
+ # ChromaDB removed - using JSON-only memory
57
+
58
+ # --- Memory System (JSON only) ---
59
+ class MemorySystem:
60
+ """Memory system using JSON for simple key-value storage"""
61
+
62
+ def __init__(self, json_db_path=None, config=None):
63
+ self.config = config or MODEL_CONFIG or {}
64
+ # Get paths from config or use defaults
65
+ memory_config = self.config.get('memory', {}) if self.config else {}
66
+ self.json_db_path = json_db_path or memory_config.get('json_path', './memory.json')
67
+ self.json_memory = {}
68
+
69
+ # Initialize JSON database
70
+ self.load_json_memory()
71
+
72
+ def is_ready(self):
73
+ """Check if memory system is fully initialized"""
74
+ return self.json_memory is not None
75
+
76
+ def load_json_memory(self):
77
+ """Load JSON memory database"""
78
+ try:
79
+ if os.path.exists(self.json_db_path):
80
+ with open(self.json_db_path, 'r', encoding='utf-8') as f:
81
+ self.json_memory = json.load(f)
82
+ logging.info(f"Loaded JSON memory with {len(self.json_memory)} entries")
83
+ else:
84
+ self.json_memory = {}
85
+ logging.info("Created new JSON memory database")
86
+ except Exception as e:
87
+ logging.error(f"Error loading JSON memory: {e}")
88
+ self.json_memory = {}
89
+
90
+ def save_json_memory(self):
91
+ """Save JSON memory database"""
92
+ try:
93
+ with open(self.json_db_path, 'w', encoding='utf-8') as f:
94
+ json.dump(self.json_memory, f, indent=2, ensure_ascii=False)
95
+ except Exception as e:
96
+ logging.error(f"Error saving JSON memory: {e}")
97
+
98
+ def store_memory(self, text, metadata=None, memory_type="conversation"):
99
+ """Store a memory in JSON"""
100
+ timestamp = datetime.now().isoformat()
101
+
102
+ # Store in JSON
103
+ memory_id = f"{memory_type}_{timestamp}"
104
+ self.json_memory[memory_id] = {
105
+ "text": text,
106
+ "metadata": metadata or {},
107
+ "type": memory_type,
108
+ "timestamp": timestamp
109
+ }
110
+ self.save_json_memory()
111
+ logging.info(f"Stored memory in JSON: {memory_id[:20]}...")
112
+
113
+ def retrieve_relevant_memories(self, query, n_results=5):
114
+ """Retrieve relevant memories using keyword search in JSON"""
115
+ relevant_memories = []
116
+
117
+ # Simple keyword search in JSON
118
+ if self.json_memory:
119
+ query_lower = query.lower()
120
+ query_words = set(query_lower.split())
121
+
122
+ for memory_id, memory_data in self.json_memory.items():
123
+ text_lower = memory_data.get("text", "").lower()
124
+ text_words = set(text_lower.split())
125
+
126
+ # Simple overlap check
127
+ overlap = len(query_words & text_words)
128
+ if overlap > 0:
129
+ relevant_memories.append({
130
+ "text": memory_data["text"],
131
+ "metadata": memory_data.get("metadata", {}),
132
+ "distance": 1.0 - (overlap / max(len(query_words), len(text_words)))
133
+ })
134
+
135
+ # Sort by relevance (lower distance = more relevant)
136
+ relevant_memories.sort(key=lambda x: x.get("distance", 1.0))
137
+ relevant_memories = relevant_memories[:n_results]
138
+ logging.info(f"Retrieved {len(relevant_memories)} relevant memories from JSON DB")
139
+
140
+ return relevant_memories
141
+
142
+ def get_json_memory(self, key):
143
+ """Get a specific memory by key from JSON database"""
144
+ return self.json_memory.get(key)
145
+
146
+ def set_json_memory(self, key, value, metadata=None):
147
+ """Set a key-value memory in JSON database"""
148
+ self.json_memory[key] = {
149
+ "value": value,
150
+ "metadata": metadata or {},
151
+ "timestamp": datetime.now().isoformat()
152
+ }
153
+ self.save_json_memory()
154
+
155
+ def get_all_json_memories(self):
156
+ """Get all JSON memories"""
157
+ return self.json_memory.copy()
158
+
159
+ # --- Agent Classes ---
160
+ class MemoryAgent:
161
+ """Agent responsible for memory retrieval and storage"""
162
+
163
+ def __init__(self, memory_system, config=None):
164
+ self.memory_system = memory_system
165
+ self.config = config or MODEL_CONFIG or {}
166
+
167
+ def retrieve_memories(self, query, n_results=None):
168
+ """Retrieve relevant memories for a query"""
169
+ if n_results is None:
170
+ max_memories = self.config.get('memory', {}).get('retrieval', {}).get('max_retrieved_memories', 5) if self.config else 5
171
+ else:
172
+ max_memories = n_results
173
+
174
+ try:
175
+ memories = self.memory_system.retrieve_relevant_memories(query, n_results=max_memories)
176
+ if memories:
177
+ logging.info(f"[MemoryAgent] Retrieved {len(memories)} relevant memories")
178
+ return memories
179
+ except Exception as e:
180
+ logging.error(f"[MemoryAgent] Error retrieving memories: {e}")
181
+ return []
182
+
183
+ def store_memory(self, text, metadata=None, memory_type="conversation"):
184
+ """Store a memory"""
185
+ try:
186
+ self.memory_system.store_memory(text, metadata, memory_type)
187
+ logging.info(f"[MemoryAgent] Stored memory: {memory_type}")
188
+ except Exception as e:
189
+ logging.error(f"[MemoryAgent] Error storing memory: {e}")
190
+
191
+ def smoke_test(self):
192
+ """Perform smoke test to verify memory system is working"""
193
+ try:
194
+ # Test storing
195
+ test_text = "Smoke test memory entry"
196
+ self.store_memory(test_text, {"test": True}, "test")
197
+
198
+ # Test retrieving
199
+ memories = self.retrieve_memories("smoke test", n_results=1)
200
+ if memories is not None:
201
+ logging.info("[MemoryAgent] ✓ Smoke test passed")
202
+ return True
203
+ else:
204
+ logging.warning("[MemoryAgent] ⚠ Smoke test failed - retrieve returned None")
205
+ return False
206
+ except Exception as e:
207
+ logging.error(f"[MemoryAgent] ✗ Smoke test failed: {e}")
208
+ return False
209
+
210
+ def is_ready(self):
211
+ """Check if memory agent is ready"""
212
+ return self.memory_system.is_ready() if self.memory_system else False
213
+
214
+ class GeminiThinkingAgent:
215
+ """Agent responsible for thinking and analysis using Gemini"""
216
+
217
+ def __init__(self, config=None):
218
+ self.config = config or MODEL_CONFIG or {}
219
+ self.gemini_available = False
220
+ self._initialize()
221
+
222
+ def _initialize(self):
223
+ """Initialize Gemini API availability"""
224
+ gemini_key = os.getenv("GEMINI_API_KEY")
225
+ if gemini_key:
226
+ os.environ["GEMINI_API_KEY"] = gemini_key
227
+ self.gemini_available = True
228
+ logging.info("[GeminiThinkingAgent] ✓ Initialized and ready")
229
+ else:
230
+ logging.warning("[GeminiThinkingAgent] ✗ GEMINI_API_KEY not found")
231
+
232
+ def think(self, user_input, emotional_state, conversation_history, retrieved_memories=None):
233
+ """Think about and analyze the conversation context"""
234
+ if not self.gemini_available:
235
+ logging.warning("[GeminiThinkingAgent] Not available")
236
+ return None
237
+
238
+ try:
239
+ # Build thinking prompt with conversation context
240
+ emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
241
+
242
+ # Prepare conversation context for thinking
243
+ context_summary = ""
244
+ if conversation_history:
245
+ recent_history = conversation_history[-6:] # Last 3 exchanges
246
+ context_summary = "\nRecent conversation:\n"
247
+ for msg in recent_history:
248
+ role = "User" if msg["role"] == "user" else "Galatea"
249
+ context_summary += f"{role}: {msg['content']}\n"
250
+
251
+ # Add retrieved memories if available
252
+ memory_context = ""
253
+ if retrieved_memories and len(retrieved_memories) > 0:
254
+ memory_context = "\n\nRelevant memories from past conversations:\n"
255
+ for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
256
+ memory_context += f"{i}. {memory['text'][:200]}...\n"
257
+
258
+ thinking_prompt = f"""You are the internal reasoning system for Galatea, an AI assistant.
259
+
260
+ Current emotional state: {emotions_text}
261
+ {context_summary}
262
+ {memory_context}
263
+ Current user message: "{user_input}"
264
+
265
+ Analyze this conversation and provide:
266
+ 1. Key insights about what the user is asking or discussing
267
+ 2. Important context from the conversation history and retrieved memories
268
+ 3. How Galatea should respond emotionally and contextually
269
+ 4. Any important details to remember or reference
270
+
271
+ Keep your analysis concise (2-3 sentences). Focus on what matters for crafting an appropriate response."""
272
+
273
+ messages = [
274
+ {"role": "system", "content": "You are an internal reasoning system. Analyze conversations and provide insights."},
275
+ {"role": "user", "content": thinking_prompt}
276
+ ]
277
+
278
+ logging.info("[GeminiThinkingAgent] Processing thinking request...")
279
+
280
+ # Get Gemini models from config
281
+ gemini_config = self.config.get('gemini', {}) if self.config else {}
282
+ gemini_models = gemini_config.get('thinking_models', [
283
+ "gemini/gemini-2.0-flash-exp",
284
+ "gemini/gemini-2.0-flash",
285
+ "gemini/gemini-1.5-flash-latest",
286
+ "gemini/gemini-1.5-flash"
287
+ ])
288
+
289
+ # Get thinking settings from config
290
+ thinking_config = gemini_config.get('thinking', {})
291
+ thinking_temp = thinking_config.get('temperature', 0.5)
292
+ thinking_max_tokens = thinking_config.get('max_tokens', 200)
293
+
294
+ for model in gemini_models:
295
+ try:
296
+ response = completion(
297
+ model=model,
298
+ messages=messages,
299
+ temperature=thinking_temp,
300
+ max_tokens=thinking_max_tokens
301
+ )
302
+
303
+ if response and 'choices' in response and len(response['choices']) > 0:
304
+ thinking_result = response['choices'][0]['message']['content']
305
+ logging.info("[GeminiThinkingAgent] ✓ Thinking completed")
306
+ return thinking_result.strip()
307
+ except Exception as e:
308
+ logging.warning(f"[GeminiThinkingAgent] Model {model} failed: {e}, trying next...")
309
+ continue
310
+
311
+ logging.error("[GeminiThinkingAgent] All models failed")
312
+ return None
313
+
314
+ except Exception as e:
315
+ logging.error(f"[GeminiThinkingAgent] Error: {e}")
316
+ return None
317
+
318
+ def smoke_test(self):
319
+ """Perform smoke test to verify Gemini is working"""
320
+ if not self.gemini_available:
321
+ return False
322
+
323
+ try:
324
+ test_result = self.think(
325
+ "test",
326
+ {"joy": 0.5, "sadness": 0.3, "anger": 0.1, "fear": 0.1, "curiosity": 0.5},
327
+ [],
328
+ retrieved_memories=None
329
+ )
330
+ if test_result and len(test_result) > 0:
331
+ logging.info("[GeminiThinkingAgent] ✓ Smoke test passed")
332
+ return True
333
+ else:
334
+ logging.warning("[GeminiThinkingAgent] ⚠ Smoke test failed - no result")
335
+ return False
336
+ except Exception as e:
337
+ logging.error(f"[GeminiThinkingAgent] ✗ Smoke test failed: {e}")
338
+ return False
339
+
340
+ def is_ready(self):
341
+ """Check if agent is ready"""
342
+ return self.gemini_available
343
+
344
+ class PiResponseAgent:
345
+ """Agent responsible for generating human-facing responses using Pi-3.1"""
346
+
347
+ def __init__(self, config=None):
348
+ self.config = config or MODEL_CONFIG or {}
349
+ self.inflection_ai_available = False
350
+ self.inflection_ai_api_key = None
351
+ self._initialize()
352
+
353
+ def _initialize(self):
354
+ """Initialize Inflection AI API availability"""
355
+ inflection_key = os.getenv("INFLECTION_AI_API_KEY")
356
+ if inflection_key:
357
+ self.inflection_ai_api_key = inflection_key
358
+ self.inflection_ai_available = True
359
+ logging.info("[PiResponseAgent] ✓ Initialized and ready")
360
+ else:
361
+ logging.warning("[PiResponseAgent] ✗ INFLECTION_AI_API_KEY not found")
362
+
363
+ def respond(self, user_input, emotional_state, thinking_context=None, conversation_history=None, retrieved_memories=None):
364
+ """Generate response using Pi-3.1 with thinking context and emotional state"""
365
+ if not self.inflection_ai_available:
366
+ logging.warning("[PiResponseAgent] Not available")
367
+ return None
368
+
369
+ try:
370
+ # Create context with emotional state
371
+ emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
372
+
373
+ # Get endpoint and config from YAML
374
+ inflection_config = self.config.get('inflection_ai', {}) if self.config else {}
375
+ url = inflection_config.get('api_endpoint', 'https://api.inflection.ai/external/api/inference')
376
+ model_config = inflection_config.get('model_config', 'Pi-3.1')
377
+
378
+ headers = {
379
+ "Authorization": f"Bearer {self.inflection_ai_api_key}",
380
+ "Content-Type": "application/json"
381
+ }
382
+
383
+ # Build comprehensive context with thinking insights, conversation history, and retrieved memories
384
+ context_parts = []
385
+
386
+ # Base system context
387
+ base_context = f"You are Galatea, an AI assistant with the following emotional state: {emotions_text}. Respond in character as Galatea. Keep your response concise (under 50 words) and reflect your emotional state in your tone."
388
+
389
+ # Add thinking context from Gemini if available
390
+ if thinking_context:
391
+ base_context += f"\n\nInternal analysis: {thinking_context}"
392
+
393
+ # Add retrieved memories if available
394
+ if retrieved_memories and len(retrieved_memories) > 0:
395
+ memory_text = "\n\nRelevant context from past conversations:\n"
396
+ for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
397
+ memory_text += f"{i}. {memory['text'][:150]}...\n"
398
+ base_context += memory_text
399
+
400
+ # Add conversation history context
401
+ if conversation_history and len(conversation_history) > 0:
402
+ recent_history = conversation_history[-4:] # Last 2 exchanges
403
+ history_text = "\n\nRecent conversation context:\n"
404
+ for msg in recent_history:
405
+ role = "User" if msg["role"] == "user" else "You (Galatea)"
406
+ history_text += f"{role}: {msg['content']}\n"
407
+ base_context += history_text
408
+
409
+ context_parts.append({
410
+ "text": base_context,
411
+ "type": "System"
412
+ })
413
+
414
+ # Add conversation history as context messages
415
+ if conversation_history and len(conversation_history) > 4:
416
+ # Add older messages as context (but not the most recent ones we already included)
417
+ for msg in conversation_history[-8:-4]:
418
+ context_parts.append({
419
+ "text": msg["content"],
420
+ "type": "Human" if msg["role"] == "user" else "Assistant"
421
+ })
422
+
423
+ # Add current user input
424
+ context_parts.append({
425
+ "text": user_input,
426
+ "type": "Human"
427
+ })
428
+
429
+ data = {
430
+ "context": context_parts,
431
+ "config": model_config
432
+ }
433
+
434
+ logging.info("[PiResponseAgent] Sending request to Pi-3.1 API...")
435
+ response = requests.post(url, headers=headers, json=data, timeout=30)
436
+
437
+ if response.status_code == 200:
438
+ result = response.json()
439
+ # Extract the response text from the API response
440
+ if isinstance(result, dict):
441
+ if 'output' in result:
442
+ text = result['output']
443
+ elif 'text' in result:
444
+ text = result['text']
445
+ elif 'response' in result:
446
+ text = result['response']
447
+ elif 'message' in result:
448
+ text = result['message']
449
+ else:
450
+ text = str(result)
451
+ elif isinstance(result, str):
452
+ text = result
453
+ else:
454
+ text = str(result)
455
+
456
+ logging.info("[PiResponseAgent] ✓ Response received")
457
+ return text.strip()
458
+ else:
459
+ logging.error(f"[PiResponseAgent] API returned status code {response.status_code}: {response.text}")
460
+ return None
461
+
462
+ except Exception as e:
463
+ logging.error(f"[PiResponseAgent] Error: {e}")
464
+ return None
465
+
466
+ def smoke_test(self):
467
+ """Perform smoke test to verify Pi-3.1 is working"""
468
+ if not self.inflection_ai_available:
469
+ return False
470
+
471
+ try:
472
+ test_result = self.respond(
473
+ "Hello",
474
+ {"joy": 0.5, "sadness": 0.3, "anger": 0.1, "fear": 0.1, "curiosity": 0.5},
475
+ thinking_context="Test thinking context",
476
+ conversation_history=[],
477
+ retrieved_memories=None
478
+ )
479
+ if test_result and len(test_result) > 0:
480
+ logging.info("[PiResponseAgent] ✓ Smoke test passed")
481
+ return True
482
+ else:
483
+ logging.warning("[PiResponseAgent] ⚠ Smoke test failed - no result")
484
+ return False
485
+ except Exception as e:
486
+ logging.error(f"[PiResponseAgent] ✗ Smoke test failed: {e}")
487
+ return False
488
+
489
+ def is_ready(self):
490
+ """Check if agent is ready"""
491
+ return self.inflection_ai_available
492
+
493
+ class EmotionalStateAgent:
494
+ """Agent responsible for managing and updating emotional state"""
495
+
496
+ def __init__(self, initial_state=None, config=None):
497
+ self.config = config or MODEL_CONFIG or {}
498
+ self.emotional_state = initial_state or {"joy": 0.2, "sadness": 0.2, "anger": 0.2, "fear": 0.2, "curiosity": 0.2}
499
+ self.learning_rate = 0.05
500
+ self.quantum_random_available = False
501
+ self.quantum_api_key = None
502
+ self._initialize_quantum()
503
+
504
+ def _initialize_quantum(self):
505
+ """Initialize quantum randomness availability"""
506
+ quantum_key = os.getenv("ANU_QUANTUM_API_KEY")
507
+ if quantum_key:
508
+ self.quantum_api_key = quantum_key
509
+ self.quantum_random_available = True
510
+ logging.info("[EmotionalStateAgent] ✓ Quantum randomness available")
511
+ else:
512
+ logging.warning("[EmotionalStateAgent] Quantum randomness unavailable")
513
+
514
+ def get_quantum_random_float(self, min_val=0.0, max_val=1.0):
515
+ """Get a quantum random float between min_val and max_val"""
516
+ if not self.quantum_random_available:
517
+ return random.uniform(min_val, max_val)
518
+
519
+ try:
520
+ quantum_config = self.config.get('quantum', {}) if self.config else {}
521
+ url = quantum_config.get('api_endpoint', 'https://api.quantumnumbers.anu.edu.au')
522
+ headers = {"x-api-key": self.quantum_api_key}
523
+ params = {"length": 1, "type": "uint8"}
524
+
525
+ response = requests.get(url, headers=headers, params=params, timeout=10)
526
+
527
+ if response.status_code == 200:
528
+ result = response.json()
529
+ if result.get('success') and 'data' in result and len(result['data']) > 0:
530
+ normalized = result['data'][0] / 255.0
531
+ return min_val + (max_val - min_val) * normalized
532
+ except Exception as e:
533
+ logging.warning(f"[EmotionalStateAgent] Quantum API failed: {e}")
534
+
535
+ return random.uniform(min_val, max_val)
536
+
537
+ def update_with_sentiment(self, sentiment_score):
538
+ """Update emotional state based on sentiment"""
539
+ # Enhanced Emotion Update (decay and normalization with quantum randomness)
540
+ decay_factor = 0.9
541
+ if self.quantum_random_available:
542
+ quantum_decay_variation = self.get_quantum_random_float(0.85, 0.95)
543
+ decay_factor = quantum_decay_variation
544
+
545
+ for emotion in self.emotional_state:
546
+ # Decay emotions (more realistic fading with quantum variation)
547
+ self.emotional_state[emotion] *= decay_factor
548
+ # Normalize
549
+ self.emotional_state[emotion] = max(0.0, min(1.0, self.emotional_state[emotion]))
550
+
551
+ # Apply sentiment with quantum-enhanced learning rate variation
552
+ learning_rate = self.learning_rate
553
+ if self.quantum_random_available:
554
+ quantum_lr_variation = self.get_quantum_random_float(0.03, 0.07)
555
+ learning_rate = quantum_lr_variation
556
+
557
+ self.emotional_state["joy"] += sentiment_score * learning_rate
558
+ self.emotional_state["sadness"] -= sentiment_score * learning_rate
559
+
560
+ # Add quantum randomness to curiosity (making responses more unpredictable)
561
+ if self.quantum_random_available:
562
+ quantum_curiosity_boost = self.get_quantum_random_float(-0.05, 0.05)
563
+ self.emotional_state["curiosity"] = max(0.0, min(1.0,
564
+ self.emotional_state["curiosity"] + quantum_curiosity_boost))
565
+
566
+ # Re-normalize
567
+ total_emotion = sum(self.emotional_state.values())
568
+ for emotion in self.emotional_state:
569
+ self.emotional_state[emotion] = self.emotional_state[emotion] / total_emotion if total_emotion > 0 else 0.2
570
+
571
+ logging.info(f"[EmotionalStateAgent] Updated emotional state: {self.emotional_state}")
572
+ return self.emotional_state
573
+
574
+ def get_state(self):
575
+ """Get current emotional state"""
576
+ return self.emotional_state.copy()
577
+
578
+ def smoke_test(self):
579
+ """Perform smoke test to verify emotional state system is working"""
580
+ try:
581
+ # Test quantum randomness if available
582
+ if self.quantum_random_available:
583
+ test_float = self.get_quantum_random_float(0.0, 1.0)
584
+ if not isinstance(test_float, float) or test_float < 0.0 or test_float > 1.0:
585
+ logging.warning("[EmotionalStateAgent] ⚠ Smoke test failed - invalid quantum random")
586
+ return False
587
+
588
+ # Test state update
589
+ initial_state = self.get_state().copy()
590
+ updated_state = self.update_with_sentiment(0.5)
591
+ if updated_state and isinstance(updated_state, dict):
592
+ logging.info("[EmotionalStateAgent] ✓ Smoke test passed")
593
+ return True
594
+ else:
595
+ logging.warning("[EmotionalStateAgent] ⚠ Smoke test failed - invalid state")
596
+ return False
597
+ except Exception as e:
598
+ logging.error(f"[EmotionalStateAgent] ✗ Smoke test failed: {e}")
599
+ return False
600
+
601
+ def is_ready(self):
602
+ """Check if agent is ready"""
603
+ return True # Emotional state is always ready
604
+
605
+ class AzureTextAnalyticsAgent:
606
+ """Agent responsible for Azure Text Analytics sentiment analysis"""
607
+
608
+ def __init__(self, config=None):
609
+ self.config = config or MODEL_CONFIG or {}
610
+ self.azure_available = False
611
+ self.client = None
612
+ self._initialize()
613
+
614
+ def _initialize(self):
615
+ """Initialize Azure Text Analytics client"""
616
+ try:
617
+ from azure.ai.textanalytics import TextAnalyticsClient
618
+ from azure.core.credentials import AzureKeyCredential
619
+
620
+ key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")
621
+ endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
622
+
623
+ if key and endpoint:
624
+ try:
625
+ credential = AzureKeyCredential(key)
626
+ self.client = TextAnalyticsClient(endpoint=endpoint, credential=credential)
627
+ self.azure_available = True
628
+ logging.info("[AzureTextAnalyticsAgent] ✓ Initialized and ready")
629
+ except Exception as e:
630
+ logging.warning(f"[AzureTextAnalyticsAgent] Failed to create client: {e}")
631
+ self.azure_available = False
632
+ else:
633
+ logging.warning("[AzureTextAnalyticsAgent] ✗ Azure credentials not found")
634
+ self.azure_available = False
635
+ except ImportError:
636
+ logging.warning("[AzureTextAnalyticsAgent] ✗ Azure SDK not installed")
637
+ self.azure_available = False
638
+
639
+ def analyze(self, text):
640
+ """Analyze sentiment using Azure Text Analytics"""
641
+ if not self.azure_available or not self.client:
642
+ return None
643
+
644
+ try:
645
+ result = self.client.analyze_sentiment(documents=[text])[0]
646
+ if result.sentiment == 'positive':
647
+ return result.confidence_scores.positive
648
+ elif result.sentiment == 'negative':
649
+ return -result.confidence_scores.negative
650
+ else:
651
+ return 0.0
652
+ except Exception as e:
653
+ logging.error(f"[AzureTextAnalyticsAgent] Error: {e}")
654
+ return None
655
+
656
+ def smoke_test(self):
657
+ """Perform smoke test to verify Azure Text Analytics is working"""
658
+ if not self.azure_available:
659
+ return False
660
+
661
+ try:
662
+ test_text = "This is a test message for sentiment analysis."
663
+ result = self.analyze(test_text)
664
+ if result is not None:
665
+ logging.info("[AzureTextAnalyticsAgent] ✓ Smoke test passed")
666
+ return True
667
+ else:
668
+ logging.warning("[AzureTextAnalyticsAgent] ⚠ Smoke test failed - analyze returned None")
669
+ return False
670
+ except Exception as e:
671
+ logging.error(f"[AzureTextAnalyticsAgent] ✗ Smoke test failed: {e}")
672
+ return False
673
+
674
+ def is_ready(self):
675
+ """Check if agent is ready"""
676
+ return self.azure_available
677
+
678
+ class SentimentAgent:
679
+ """Agent responsible for sentiment analysis (uses Azure, Hugging Face, or NLTK fallback)"""
680
+
681
+ def __init__(self, config=None):
682
+ self.config = config or MODEL_CONFIG or {}
683
+ self.azure_agent = AzureTextAnalyticsAgent(config=self.config)
684
+ self.sentiment_analyzer = None
685
+ self.ready = False
686
+ self._initialize()
687
+
688
+ def _initialize(self):
689
+ """Initialize sentiment analyzer"""
690
+ # Try Azure first
691
+ if self.azure_agent.is_ready():
692
+ self.ready = True
693
+ logging.info("[SentimentAgent] Using Azure Text Analytics")
694
+ return
695
+
696
+ # Fallback to Hugging Face
697
+ sentiment_model = self.config.get('sentiment', {}).get('primary_model', 'distilbert/distilbert-base-uncased-finetuned-sst-2-english') if self.config else 'distilbert/distilbert-base-uncased-finetuned-sst-2-english'
698
+
699
+ if transformers_available:
700
+ try:
701
+ logging.info("[SentimentAgent] Initializing Hugging Face sentiment analyzer...")
702
+ self.sentiment_analyzer = pipeline("sentiment-analysis", model=sentiment_model)
703
+ self.ready = True
704
+ logging.info("[SentimentAgent] ✓ Initialized successfully")
705
+ except Exception as e:
706
+ logging.warning(f"[SentimentAgent] Hugging Face model failed: {e}, using fallback")
707
+ self.sentiment_analyzer = None
708
+ self.ready = True # Fallback available
709
+ else:
710
+ self.ready = True # Fallback available
711
+
712
+ def analyze(self, text):
713
+ """Analyze sentiment of text (tries Azure, then Hugging Face, then NLTK)"""
714
+ # Try Azure first
715
+ if self.azure_agent.is_ready():
716
+ result = self.azure_agent.analyze(text)
717
+ if result is not None:
718
+ return result
719
+
720
+ # Fallback to Hugging Face
721
+ if self.sentiment_analyzer:
722
+ try:
723
+ result = self.sentiment_analyzer(text)[0]
724
+ label = result['label'].lower()
725
+ score = result['score']
726
+
727
+ if 'positive' in label:
728
+ return score
729
+ elif 'negative' in label:
730
+ return -score
731
+ else:
732
+ return 0.0
733
+ except Exception as e:
734
+ logging.error(f"[SentimentAgent] Error: {e}")
735
+ return self._fallback_analyze(text)
736
+ else:
737
+ return self._fallback_analyze(text)
738
+
739
+ def _fallback_analyze(self, text):
740
+ """Fallback sentiment analysis using NLTK VADER"""
741
+ try:
742
+ from nltk.sentiment import SentimentIntensityAnalyzer
743
+ analyzer = SentimentIntensityAnalyzer()
744
+ scores = analyzer.polarity_scores(text)
745
+ return scores['compound'] # Returns value between -1 and 1
746
+ except Exception as e:
747
+ logging.error(f"[SentimentAgent] Fallback failed: {e}")
748
+ return 0.0
749
+
750
+ def smoke_test(self):
751
+ """Perform smoke test to verify sentiment analysis is working"""
752
+ try:
753
+ test_text = "I am happy and excited!"
754
+ result = self.analyze(test_text)
755
+ if result is not None and isinstance(result, (int, float)):
756
+ logging.info("[SentimentAgent] ✓ Smoke test passed")
757
+ return True
758
+ else:
759
+ logging.warning("[SentimentAgent] ⚠ Smoke test failed - invalid result")
760
+ return False
761
+ except Exception as e:
762
+ logging.error(f"[SentimentAgent] ✗ Smoke test failed: {e}")
763
+ return False
764
+
765
+ def is_ready(self):
766
+ """Check if agent is ready"""
767
+ return self.ready
768
+
769
  # --- 1. AI Core ---
770
  class GalateaAI:
771
  def __init__(self):
772
+ # Load model configuration first
773
+ self.config = MODEL_CONFIG or {}
774
+
775
  self.knowledge_base = {}
 
776
  self.response_model = "A generic response" #Place Holder for the ML model
777
 
778
+ # Conversation history for context
779
+ self.conversation_history = [] # List of {"role": "user"/"assistant", "content": "..."}
780
+ # Get max history length from config or use default
781
+ self.max_history_length = self.config.get('conversation', {}).get('max_history_length', 20)
782
+
783
+ # Initialize memory system
784
+ logging.info("Initializing memory system (JSON)...")
785
+ try:
786
+ self.memory_system = MemorySystem(config=self.config)
787
+ self.memory_system_ready = self.memory_system.is_ready()
788
+ if not self.memory_system_ready:
789
+ raise Exception("Memory system failed to initialize")
790
+ logging.info("✓ Memory system initialized")
791
+ except Exception as e:
792
+ logging.error(f"Failed to initialize memory system: {e}")
793
+ self.memory_system_ready = False
794
+ raise
795
+
796
+ # Initialize agents
797
+ logging.info("Initializing agents...")
798
+ self.memory_agent = MemoryAgent(self.memory_system, config=self.config)
799
+ self.gemini_agent = GeminiThinkingAgent(config=self.config)
800
+ self.pi_agent = PiResponseAgent(config=self.config)
801
+ self.emotional_agent = EmotionalStateAgent(config=self.config)
802
+ self.sentiment_agent = SentimentAgent(config=self.config)
803
 
804
+ # Track initialization status
805
+ self.memory_system_ready = self.memory_agent.is_ready()
806
+ self.sentiment_analyzer_ready = self.sentiment_agent.is_ready()
807
+ self.models_ready = self.gemini_agent.is_ready() or self.pi_agent.is_ready()
808
+ self.api_keys_valid = self.gemini_agent.is_ready() or self.pi_agent.is_ready()
809
+
810
+ # Legacy compatibility
811
+ self.gemini_available = self.gemini_agent.is_ready()
812
+ self.inflection_ai_available = self.pi_agent.is_ready()
813
+ self.quantum_random_available = self.emotional_agent.quantum_random_available
814
+
815
+ logging.info("✓ All agents initialized")
816
+
817
+ def _check_pre_initialization(self):
818
+ """Check if components were pre-initialized by initialize_galatea.py"""
819
+ # Check if ChromaDB directory exists and has collection
820
+ chromadb_path = "./chroma_db"
821
+ if os.path.exists(chromadb_path):
822
+ try:
823
+ import chromadb
824
+ from chromadb.config import Settings
825
+ vector_db = chromadb.PersistentClient(
826
+ path=chromadb_path,
827
+ settings=Settings(anonymized_telemetry=False)
828
+ )
829
+ collection = vector_db.get_collection("galatea_memory")
830
+ if collection:
831
+ logging.info("✓ Pre-initialized ChromaDB detected")
832
+ return True
833
+ except Exception:
834
+ pass
835
+
836
+ # Check if JSON memory exists
837
+ if os.path.exists("./memory.json"):
838
+ logging.info("✓ Pre-initialized JSON memory detected")
839
+ return True
840
+
841
+ return False
842
+
843
+ def is_fully_initialized(self):
844
+ """Check if all components are fully initialized"""
845
+ return (
846
+ self.memory_system_ready and
847
+ self.sentiment_analyzer_ready and
848
+ self.models_ready and
849
+ self.api_keys_valid
850
+ )
851
+
852
+ def get_initialization_status(self):
853
+ """Get detailed initialization status"""
854
+ smoke_tests = getattr(self, 'smoke_test_results', {})
855
+ return {
856
+ "memory_system": self.memory_system_ready,
857
+ "sentiment_analyzer": self.sentiment_analyzer_ready,
858
+ "models": self.models_ready,
859
+ "api_keys": self.api_keys_valid,
860
+ "gemini_available": self.gemini_agent.is_ready() if hasattr(self, 'gemini_agent') else False,
861
+ "inflection_ai_available": self.pi_agent.is_ready() if hasattr(self, 'pi_agent') else False,
862
+ "azure_text_analytics_available": self.sentiment_agent.azure_agent.is_ready() if hasattr(self, 'sentiment_agent') else False,
863
+ "smoke_tests": smoke_tests,
864
+ "fully_initialized": self.is_fully_initialized()
865
+ }
866
+
867
+ @property
868
+ def emotional_state(self):
869
+ """Get current emotional state from EmotionalStateAgent"""
870
+ return self.emotional_agent.get_state() if hasattr(self, 'emotional_agent') else {"joy": 0.2, "sadness": 0.2, "anger": 0.2, "fear": 0.2, "curiosity": 0.2}
871
 
872
  def initialize_sentiment_analyzer(self):
873
  """Initialize sentiment analysis with fallback options"""
874
+ self.sentiment_analyzer_ready = False
875
+ # Get sentiment model from config
876
+ sentiment_model = self.config.get('sentiment', {}).get('primary_model', 'distilbert/distilbert-base-uncased-finetuned-sst-2-english') if self.config else 'distilbert/distilbert-base-uncased-finetuned-sst-2-english'
877
+
878
  if transformers_available:
879
  try:
880
  logging.info("Attempting to initialize Hugging Face sentiment analyzer")
881
  # Try to initialize the pipeline with specific parameters
882
  self.sentiment_analyzer = pipeline(
883
  "sentiment-analysis",
884
+ model=sentiment_model
885
  )
886
+ self.sentiment_analyzer_ready = True
887
+ logging.info("✓ Hugging Face sentiment analyzer loaded successfully")
888
  except Exception as e:
889
  logging.error(f"Failed to initialize Hugging Face sentiment analyzer: {e}")
890
  self.sentiment_analyzer = None
891
+ # Still mark as ready since we have fallback
892
+ self.sentiment_analyzer_ready = True
893
+ logging.info("✓ Using fallback sentiment analyzer")
894
  else:
895
  self.sentiment_analyzer = None
896
+ self.sentiment_analyzer_ready = True # Fallback available
897
+ logging.info("✓ Using fallback sentiment analyzer")
898
 
899
  def analyze_sentiment(self, text):
900
  # Use Hugging Face if available
 
927
 
928
  return max(-1.0, min(1.0, sentiment_score)) # Clamp between -1 and 1
929
 
930
+ def initialize_litellm(self):
931
+ """Initialize LiteLLM for unified model management"""
932
+ self.gemini_available = False
933
+ self.inflection_ai_available = False
934
+ self.quantum_random_available = False
935
+ self.models_ready = False
936
+ self.api_keys_valid = False
937
+
938
+ # Check for Gemini API key
939
+ gemini_key = os.getenv("GEMINI_API_KEY")
940
+ if gemini_key:
941
+ os.environ["GEMINI_API_KEY"] = gemini_key
942
+ self.gemini_available = True
943
+ logging.info("✓ Gemini API key found - Gemini models available via LiteLLM")
944
+ else:
945
+ logging.warning("GEMINI_API_KEY not found - Gemini models unavailable")
946
+
947
+ # Check for Inflection AI API key
948
+ inflection_key = os.getenv("INFLECTION_AI_API_KEY")
949
+ if inflection_key:
950
+ self.inflection_ai_api_key = inflection_key
951
+ self.inflection_ai_available = True
952
+ logging.info("✓ Inflection AI API key found - Pi-3.1 model available")
953
+ else:
954
+ logging.warning("INFLECTION_AI_API_KEY not found - Pi-3.1 model unavailable")
955
+
956
+ # Check for Quantum Random Numbers API key
957
+ quantum_key = os.getenv("ANU_QUANTUM_API_KEY")
958
+ if quantum_key:
959
+ self.quantum_api_key = quantum_key
960
+ self.quantum_random_available = True
961
+ logging.info("✓ ANU Quantum Numbers API key found - Quantum randomness available")
962
+ else:
963
+ logging.warning("ANU_QUANTUM_API_KEY not found - Quantum randomness unavailable")
964
+
965
+ # Verify API keys are valid (at least one model API key must be present)
966
+ self.api_keys_valid = self.gemini_available or self.inflection_ai_available
967
+ if self.api_keys_valid:
968
+ logging.info("✓ API keys validated - at least one model API key is available")
969
+ else:
970
+ logging.error("✗ No valid API keys found - models unavailable")
971
+
972
+ # Models are ready if at least one is available
973
+ self.models_ready = self.gemini_available or self.inflection_ai_available
974
+ if self.models_ready:
975
+ logging.info("✓ Models ready for use")
976
+ else:
977
+ logging.warning("⚠ No models available")
978
+
979
+ def get_quantum_random_numbers(self, length=None, number_type=None):
980
+ """Fetch quantum random numbers from ANU Quantum Numbers API"""
981
+ if not self.quantum_random_available:
982
+ logging.warning("Quantum random numbers unavailable, using fallback")
983
+ return None
984
+
985
+ # Get defaults from config
986
+ quantum_config = self.config.get('quantum', {}) if self.config else {}
987
+ if length is None:
988
+ length = quantum_config.get('default_length', 128)
989
+ if number_type is None:
990
+ number_type = quantum_config.get('default_type', 'uint8')
991
+
992
+ try:
993
+ url = quantum_config.get('api_endpoint', 'https://api.quantumnumbers.anu.edu.au')
994
+ headers = {
995
+ "x-api-key": self.quantum_api_key
996
+ }
997
+ params = {
998
+ "length": length,
999
+ "type": number_type
1000
+ }
1001
+
1002
+ response = requests.get(url, headers=headers, params=params, timeout=10)
1003
+
1004
+ if response.status_code == 200:
1005
+ result = response.json()
1006
+ if result.get('success') and 'data' in result:
1007
+ logging.info(f"✓ Retrieved {len(result['data'])} quantum random numbers")
1008
+ return result['data']
1009
+ else:
1010
+ logging.warning("Quantum API returned success but no data")
1011
+ return None
1012
+ else:
1013
+ logging.error(f"Quantum API returned status code {response.status_code}: {response.text}")
1014
+ return None
1015
+
1016
+ except Exception as e:
1017
+ logging.error(f"Error fetching quantum random numbers: {e}")
1018
+ return None
1019
+
1020
+ def get_quantum_random_float(self, min_val=0.0, max_val=1.0):
1021
+ """Get a quantum random float between min_val and max_val"""
1022
+ quantum_nums = self.get_quantum_random_numbers(length=1, number_type='uint8')
1023
+ if quantum_nums and len(quantum_nums) > 0:
1024
+ # Normalize uint8 (0-255) to float range
1025
+ normalized = quantum_nums[0] / 255.0
1026
+ return min_val + (max_val - min_val) * normalized
1027
+ # Fallback to regular random
1028
+ return random.uniform(min_val, max_val)
1029
+
1030
+ def call_inflection_ai(self, user_input, emotional_state, thinking_context=None, conversation_history=None, retrieved_memories=None):
1031
+ """Call Inflection AI Pi-3.1 model API with conversation context, thinking insights, and retrieved memories"""
1032
+ if not self.inflection_ai_available:
1033
+ return None
1034
 
1035
  try:
1036
+ # Create context with emotional state
1037
+ emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
1038
+
1039
+ # Format the request according to Inflection AI API
1040
+ # Get endpoint and config from YAML
1041
+ inflection_config = self.config.get('inflection_ai', {}) if self.config else {}
1042
+ url = inflection_config.get('api_endpoint', 'https://api.inflection.ai/external/api/inference')
1043
+ model_config = inflection_config.get('model_config', 'Pi-3.1')
1044
+
1045
+ headers = {
1046
+ "Authorization": f"Bearer {self.inflection_ai_api_key}",
1047
+ "Content-Type": "application/json"
1048
+ }
1049
+
1050
+ # Build comprehensive context with thinking insights, conversation history, and retrieved memories
1051
+ context_parts = []
1052
+
1053
+ # Base system context
1054
+ base_context = f"You are Galatea, an AI assistant with the following emotional state: {emotions_text}. Respond in character as Galatea. Keep your response concise (under 50 words) and reflect your emotional state in your tone."
1055
+
1056
+ # Add thinking context from Gemini if available
1057
+ if thinking_context:
1058
+ base_context += f"\n\nInternal analysis: {thinking_context}"
1059
+
1060
+ # Add retrieved memories if available
1061
+ if retrieved_memories and len(retrieved_memories) > 0:
1062
+ memory_text = "\n\nRelevant context from past conversations:\n"
1063
+ for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
1064
+ memory_text += f"{i}. {memory['text'][:150]}...\n"
1065
+ base_context += memory_text
1066
+
1067
+ # Add conversation history context
1068
+ if conversation_history and len(conversation_history) > 0:
1069
+ recent_history = conversation_history[-4:] # Last 2 exchanges
1070
+ history_text = "\n\nRecent conversation context:\n"
1071
+ for msg in recent_history:
1072
+ role = "User" if msg["role"] == "user" else "You (Galatea)"
1073
+ history_text += f"{role}: {msg['content']}\n"
1074
+ base_context += history_text
1075
+
1076
+ context_parts.append({
1077
+ "text": base_context,
1078
+ "type": "System"
1079
+ })
1080
+
1081
+ # Add conversation history as context messages
1082
+ if conversation_history and len(conversation_history) > 4:
1083
+ # Add older messages as context (but not the most recent ones we already included)
1084
+ for msg in conversation_history[-8:-4]:
1085
+ context_parts.append({
1086
+ "text": msg["content"],
1087
+ "type": "Human" if msg["role"] == "user" else "Assistant"
1088
+ })
1089
+
1090
+ # Add current user input
1091
+ context_parts.append({
1092
+ "text": user_input,
1093
+ "type": "Human"
1094
+ })
1095
+
1096
+ data = {
1097
+ "context": context_parts,
1098
+ "config": model_config
1099
+ }
1100
 
1101
+ logging.info("Sending request to Inflection AI Pi-3.1 API")
1102
+ response = requests.post(url, headers=headers, json=data, timeout=30)
1103
+
1104
+ if response.status_code == 200:
1105
+ result = response.json()
1106
+ # Extract the response text from the API response
1107
+ if isinstance(result, dict):
1108
+ if 'output' in result:
1109
+ text = result['output']
1110
+ elif 'text' in result:
1111
+ text = result['text']
1112
+ elif 'response' in result:
1113
+ text = result['response']
1114
+ elif 'message' in result:
1115
+ text = result['message']
1116
+ else:
1117
+ text = str(result)
1118
+ elif isinstance(result, str):
1119
+ text = result
1120
+ else:
1121
+ text = str(result)
1122
+
1123
+ logging.info("Inflection AI response received successfully")
1124
+ return text.strip()
1125
+ else:
1126
+ logging.error(f"Inflection AI API returned status code {response.status_code}: {response.text}")
1127
+ return None
1128
 
1129
+ except Exception as e:
1130
+ logging.error(f"Error calling Inflection AI API: {e}")
1131
+ logging.error(f"Full error details: {type(e).__name__}: {str(e)}")
1132
+ return None
 
 
1133
 
1134
+ def gemini_think(self, user_input, emotional_state, conversation_history, retrieved_memories=None):
1135
+ """Use Gemini to think about and analyze the conversation context with retrieved memories"""
1136
+ if not self.gemini_available:
1137
+ return None
1138
+
1139
+ try:
1140
+ # Build thinking prompt with conversation context
1141
+ emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
1142
 
1143
+ # Prepare conversation context for thinking
1144
+ context_summary = ""
1145
+ if conversation_history:
1146
+ recent_history = conversation_history[-6:] # Last 3 exchanges
1147
+ context_summary = "\nRecent conversation:\n"
1148
+ for msg in recent_history:
1149
+ role = "User" if msg["role"] == "user" else "Galatea"
1150
+ context_summary += f"{role}: {msg['content']}\n"
1151
+
1152
+ # Add retrieved memories if available
1153
+ memory_context = ""
1154
+ if retrieved_memories and len(retrieved_memories) > 0:
1155
+ memory_context = "\n\nRelevant memories from past conversations:\n"
1156
+ for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
1157
+ memory_context += f"{i}. {memory['text'][:200]}...\n"
1158
+
1159
+ thinking_prompt = f"""You are the internal reasoning system for Galatea, an AI assistant.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1160
 
1161
+ Current emotional state: {emotions_text}
1162
+ {context_summary}
1163
+ {memory_context}
1164
+ Current user message: "{user_input}"
1165
+
1166
+ Analyze this conversation and provide:
1167
+ 1. Key insights about what the user is asking or discussing
1168
+ 2. Important context from the conversation history and retrieved memories
1169
+ 3. How Galatea should respond emotionally and contextually
1170
+ 4. Any important details to remember or reference
1171
+
1172
+ Keep your analysis concise (2-3 sentences). Focus on what matters for crafting an appropriate response."""
1173
+
1174
+ messages = [
1175
+ {"role": "system", "content": "You are an internal reasoning system. Analyze conversations and provide insights."},
1176
+ {"role": "user", "content": thinking_prompt}
1177
+ ]
1178
+
1179
+ logging.info("Using Gemini for thinking/analysis")
1180
+
1181
+ # Get Gemini models from config
1182
+ gemini_config = self.config.get('gemini', {}) if self.config else {}
1183
+ gemini_models = gemini_config.get('thinking_models', [
1184
+ "gemini/gemini-2.0-flash-exp",
1185
+ "gemini/gemini-2.0-flash",
1186
+ "gemini/gemini-1.5-flash-latest",
1187
+ "gemini/gemini-1.5-flash"
1188
+ ])
1189
+
1190
+ # Get thinking settings from config
1191
+ thinking_config = gemini_config.get('thinking', {})
1192
+ thinking_temp = thinking_config.get('temperature', 0.5)
1193
+ thinking_max_tokens = thinking_config.get('max_tokens', 200)
1194
+
1195
+ for model in gemini_models:
1196
+ try:
1197
+ response = completion(
1198
+ model=model,
1199
+ messages=messages,
1200
+ temperature=thinking_temp,
1201
+ max_tokens=thinking_max_tokens
1202
+ )
1203
+
1204
+ if response and 'choices' in response and len(response['choices']) > 0:
1205
+ thinking_result = response['choices'][0]['message']['content']
1206
+ logging.info("✓ Gemini thinking completed")
1207
+ return thinking_result.strip()
1208
+ except Exception as e:
1209
+ logging.warning(f"Gemini model {model} failed for thinking: {e}, trying next...")
1210
+ continue
1211
 
1212
+ logging.error("All Gemini models failed for thinking")
1213
+ return None
1214
+
1215
  except Exception as e:
1216
+ logging.error(f"Error in Gemini thinking: {e}")
1217
+ return None
1218
+
1219
+ def update_conversation_history(self, user_input, assistant_response):
1220
+ """Update conversation history, maintaining max length"""
1221
+ # Add user message
1222
+ self.conversation_history.append({"role": "user", "content": user_input})
1223
+ # Add assistant response
1224
+ self.conversation_history.append({"role": "assistant", "content": assistant_response})
1225
+
1226
+ # Trim history if too long
1227
+ if len(self.conversation_history) > self.max_history_length:
1228
+ # Keep the most recent messages
1229
+ self.conversation_history = self.conversation_history[-self.max_history_length:]
1230
+
1231
+ def store_important_memory(self, user_input, assistant_response, intent, keywords):
1232
+ """Store important conversation snippets in memory system"""
1233
+ try:
1234
+ # Determine if this conversation is worth storing
1235
+ # Store if: question, contains important keywords, or is a significant exchange
1236
+ should_store = False
1237
+ memory_type = "conversation"
1238
+
1239
+ if intent == "question":
1240
+ should_store = True
1241
+ memory_type = "question"
1242
+ elif len(keywords) > 3: # Substantial conversation
1243
+ should_store = True
1244
+ elif any(keyword in ["remember", "important", "note", "save"] for keyword in keywords):
1245
+ should_store = True
1246
+ memory_type = "important"
1247
+
1248
+ if should_store:
1249
+ # Create a memory entry combining user input and response
1250
+ memory_text = f"User: {user_input}\nGalatea: {assistant_response}"
1251
+
1252
+ metadata = {
1253
+ "intent": intent,
1254
+ "keywords": keywords[:5], # Top 5 keywords
1255
+ "emotions": {k: round(v, 2) for k, v in self.emotional_state.items()}
1256
+ }
1257
+
1258
+ # Store in memory system (both ChromaDB and JSON)
1259
+ self.memory_system.store_memory(
1260
+ text=memory_text,
1261
+ metadata=metadata,
1262
+ memory_type=memory_type
1263
+ )
1264
+ logging.info(f"Stored important memory: {memory_type} - {user_input[:50]}...")
1265
+ except Exception as e:
1266
+ logging.error(f"Error storing memory: {e}")
1267
+
1268
+ def is_thinking_mode(self, intent, user_input, keywords):
1269
+ """Determine if the request requires thinking mode (use Gemini for complex reasoning)"""
1270
+ # Always use thinking mode now - Gemini always thinks, Pi-3.1 always responds
1271
+ return True
1272
 
1273
  def process_input(self, user_input):
1274
+ """Process user input through the agent chain workflow: PHI(GEMINI(User inputs, read with past memory), emotionalstate)"""
1275
+ # Step 1: Analyze sentiment
1276
+ sentiment_score = self.sentiment_agent.analyze(user_input)
1277
+
1278
+ # Step 2: Extract keywords and determine intent
1279
  keywords = self.extract_keywords(user_input)
1280
  intent = self.determine_intent(user_input)
1281
+
1282
+ # Step 3: Update emotional state based on sentiment
1283
+ self.emotional_agent.update_with_sentiment(sentiment_score)
1284
+ current_emotional_state = self.emotional_agent.get_state()
1285
+
1286
+ # Step 4: Retrieve memories
1287
+ retrieved_memories = self.memory_agent.retrieve_memories(user_input)
1288
+
1289
+ # Step 5: Chain workflow: PHI(GEMINI(User inputs, read with past memory), emotionalstate)
1290
+ # Step 5a: GEMINI(User inputs, read with past memory)
1291
+ thinking_context = self.gemini_agent.think(
1292
+ user_input,
1293
+ current_emotional_state,
1294
+ self.conversation_history,
1295
+ retrieved_memories=retrieved_memories
1296
+ )
1297
+
1298
+ # Step 5b: PHI(GEMINI result, emotionalstate)
1299
+ response = self.pi_agent.respond(
1300
+ user_input,
1301
+ current_emotional_state,
1302
+ thinking_context=thinking_context,
1303
+ conversation_history=self.conversation_history,
1304
+ retrieved_memories=retrieved_memories
1305
+ )
1306
+
1307
+ # Fallback if Pi-3.1 is not available
1308
+ if not response and self.gemini_agent.is_ready():
1309
+ response = self._gemini_fallback_response(
1310
+ user_input,
1311
+ current_emotional_state,
1312
+ thinking_context,
1313
+ self.conversation_history
1314
+ )
1315
+
1316
+ # If still no response, use fallback
1317
+ if not response:
1318
+ response = self._generate_fallback_response(intent, keywords, current_emotional_state, user_input)
1319
+
1320
+ # Update conversation history
1321
+ if response:
1322
+ self.update_conversation_history(user_input, response)
1323
+
1324
+ # Store important memories
1325
+ self._store_important_memory(user_input, response, intent, keywords)
1326
+
1327
+ # Update knowledge base
1328
  self.update_knowledge(keywords, user_input)
1329
+
1330
+ return response
1331
  return response
1332
 
1333
  def extract_keywords(self, text):
 
1357
  else:
1358
  return "statement"
1359
 
1360
+ def _gemini_fallback_response(self, user_input, emotional_state, thinking_context, conversation_history):
1361
+ """Fallback response using Gemini directly"""
1362
+ try:
1363
+ logging.info("[GalateaAI] Using Gemini fallback for direct response")
1364
+ emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1365
 
1366
+ # Build messages with conversation history
1367
+ messages = []
1368
+ # Get system prompts from config
1369
+ system_prompts = self.config.get('system_prompts', {}) if self.config else {}
1370
+ identity = system_prompts.get('galatea_identity', 'You are Galatea, an AI assistant with emotional awareness and memory.')
1371
+ style = system_prompts.get('response_style', 'Respond in character, keeping responses concise (under 50 words).')
1372
+
1373
+ messages.append({
1374
+ "role": "system",
1375
+ "content": f"{identity} Your emotional state: {emotions_text}. {style}"
1376
+ })
1377
+
1378
+ # Get fallback settings from config
1379
+ gemini_config = self.config.get('gemini', {}) if self.config else {}
1380
+ fallback_config = gemini_config.get('fallback', {})
1381
+ max_history_exchanges = fallback_config.get('max_history_exchanges', 8)
1382
+ fallback_model = gemini_config.get('fallback_model', 'gemini/gemini-1.5-flash')
1383
+
1384
+ # Add conversation history
1385
+ if conversation_history:
1386
+ for msg in conversation_history[-max_history_exchanges:]:
1387
+ messages.append({
1388
+ "role": msg["role"],
1389
+ "content": msg["content"]
1390
+ })
1391
+
1392
+ # Add current user input
1393
+ messages.append({
1394
+ "role": "user",
1395
+ "content": user_input
1396
+ })
1397
+
1398
+ # Add thinking context if available
1399
+ if thinking_context:
1400
+ messages.append({
1401
+ "role": "system",
1402
+ "content": f"Internal analysis: {thinking_context}"
1403
+ })
1404
+
1405
+ # Use quantum randomness for temperature
1406
+ base_temperature = fallback_config.get('temperature_base', 0.7)
1407
+ temp_range = fallback_config.get('temperature_variation_range', [0.0, 0.3])
1408
+ quantum_temp_variation = self.emotional_agent.get_quantum_random_float(temp_range[0], temp_range[1])
1409
+ temperature = base_temperature + quantum_temp_variation
1410
+
1411
+ response = completion(
1412
+ model=fallback_model,
1413
+ messages=messages,
1414
+ temperature=temperature,
1415
+ max_tokens=fallback_config.get('max_tokens', 150)
1416
+ )
1417
+
1418
+ if response and 'choices' in response and len(response['choices']) > 0:
1419
+ text = response['choices'][0]['message']['content']
1420
+ logging.info("[GalateaAI] ✓ Gemini fallback response received")
1421
+ return text.strip()
1422
+ except Exception as e:
1423
+ logging.error(f"[GalateaAI] Gemini fallback failed: {e}")
1424
+
1425
+ return None
1426
+
1427
+ def _generate_fallback_response(self, intent, keywords, emotional_state, original_input):
1428
+ """Generate final fallback response when all systems fail"""
1429
+ logging.info(f"[GalateaAI] Using final fallback response. Intent: {intent}, Keywords: {keywords[:5]}")
1430
 
1431
+ # Determine which systems are not working
1432
+ unavailable_systems = []
1433
+ system_descriptions = {
1434
+ 'inflection_ai': ('Pi-3.1', 'my conversation model'),
1435
+ 'gemini': ('Gemini', 'my thinking model'),
1436
+ 'quantum_random': ('Quantum Random Numbers API', 'my quantum randomness source'),
1437
+ 'memory': ('Memory System', 'my memory system')
1438
+ }
1439
+
1440
+ if not getattr(self, 'inflection_ai_available', False):
1441
+ unavailable_systems.append(system_descriptions['inflection_ai'])
1442
+ if not getattr(self, 'gemini_available', False):
1443
+ unavailable_systems.append(system_descriptions['gemini'])
1444
+ if not getattr(self, 'quantum_random_available', False):
1445
+ unavailable_systems.append(system_descriptions['quantum_random'])
1446
+ if not getattr(self, 'memory_system_ready', False):
1447
+ unavailable_systems.append(system_descriptions['memory'])
1448
+
1449
+ # Generate natural, conversational error message
1450
+ if unavailable_systems:
1451
+ if len(unavailable_systems) == 1:
1452
+ system_name, system_desc = unavailable_systems[0]
1453
+ system_msg = f"{system_desc} ({system_name}) is not working right now"
1454
+ elif len(unavailable_systems) == 2:
1455
+ sys1_name, sys1_desc = unavailable_systems[0]
1456
+ sys2_name, sys2_desc = unavailable_systems[1]
1457
+ system_msg = f"{sys1_desc} ({sys1_name}) and {sys2_desc} ({sys2_name}) are not working"
1458
+ else:
1459
+ # For 3+ systems, list them naturally
1460
+ system_list = []
1461
+ for sys_name, sys_desc in unavailable_systems[:-1]:
1462
+ system_list.append(f"{sys_desc} ({sys_name})")
1463
+ last_name, last_desc = unavailable_systems[-1]
1464
+ system_msg = f"{', '.join(system_list)}, and {last_desc} ({last_name}) are not working"
1465
  else:
1466
+ system_msg = "some of my systems encountered an error"
1467
+
1468
+ fallback_response = None
 
 
1469
  if intent == "question":
1470
  if "you" in keywords:
1471
+ fallback_response = f"I'm still learning about myself, but I'm having technical difficulties. {system_msg.capitalize()}. I apologize for the inconvenience."
1472
  else:
1473
+ fallback_response = f"I'd love to help with that, but {system_msg}. Please check my system status or try again in a moment."
1474
  elif intent == "gratitude":
1475
+ fallback_response = "You're welcome!"
1476
  else:
1477
+ if unavailable_systems:
1478
+ fallback_response = f"I hear you, but {system_msg}. This might be due to missing API keys or network issues. Please check my configuration."
1479
+ else:
1480
+ fallback_response = "I hear you, though my full AI capabilities aren't active right now. Please check if my API keys are configured."
1481
+
1482
+ # Update conversation history even for fallback
1483
+ if fallback_response:
1484
+ self.update_conversation_history(original_input, fallback_response)
1485
+
1486
+ return fallback_response
1487
 
1488
  def update_knowledge(self, keywords, user_input):
1489
  #for new key words remember them
initialize_galatea.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Galatea AI Initialization Script
4
+ Handles parallel initialization of all components
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import time
10
+ import logging
11
+ import threading
12
+ from concurrent.futures import ThreadPoolExecutor, as_completed
13
+ from dotenv import load_dotenv
14
+
15
+ # Configure logging
16
+ logging.basicConfig(
17
+ level=logging.INFO,
18
+ format='%(asctime)s - %(levelname)s - %(message)s',
19
+ handlers=[
20
+ logging.StreamHandler(sys.stdout),
21
+ logging.FileHandler('initialization.log')
22
+ ]
23
+ )
24
+
25
+ # Check NumPy version before proceeding
26
+ try:
27
+ import numpy as np
28
+ np_version = np.__version__
29
+ if np_version.startswith('2.'):
30
+ logging.error("=" * 70)
31
+ logging.error("NUM PY COMPATIBILITY ERROR")
32
+ logging.error("=" * 70)
33
+ logging.error(f"NumPy {np_version} is installed, but required libraries need NumPy < 2.0")
34
+ logging.error("")
35
+ logging.error("SOLUTION:")
36
+ logging.error(" Option 1: Run the fix script:")
37
+ logging.error(" python fix_numpy.py")
38
+ logging.error("")
39
+ logging.error(" Option 2: Manually downgrade:")
40
+ logging.error(" pip install 'numpy<2.0.0'")
41
+ logging.error("")
42
+ logging.error(" Option 3: Reinstall all dependencies:")
43
+ logging.error(" pip install -r requirements.txt")
44
+ logging.error("")
45
+ logging.error("This will downgrade NumPy to a compatible version.")
46
+ logging.error("=" * 70)
47
+ logging.warning("⚠ Continuing with initialization, but some components may fail...")
48
+ logging.warning("⚠ Please fix NumPy version for full functionality")
49
+ else:
50
+ logging.info(f"✓ NumPy version check passed: {np_version}")
51
+ except ImportError:
52
+ logging.warning("NumPy not installed - will be installed as dependency")
53
+ except Exception as e:
54
+ logging.warning(f"Could not check NumPy version: {e}")
55
+
56
+ # Load environment variables
57
+ load_dotenv()
58
+
59
+ # Global status tracking
60
+ init_status = {
61
+ 'json_memory': {'ready': False, 'error': None},
62
+ 'sentiment_analyzer': {'ready': False, 'error': None},
63
+ 'gemini_api': {'ready': False, 'error': None},
64
+ 'inflection_api': {'ready': False, 'error': None},
65
+ 'quantum_api': {'ready': False, 'error': None},
66
+ }
67
+
68
+ # ChromaDB and embedding model removed - using JSON-only memory
69
+
70
+ def initialize_sentiment_analyzer():
71
+ """Initialize sentiment analyzer"""
72
+ try:
73
+ logging.info("🔄 [Sentiment Analyzer] Starting initialization...")
74
+ print("🔄 [Sentiment Analyzer] Starting initialization...")
75
+ try:
76
+ from transformers import pipeline
77
+ analyzer = pipeline(
78
+ "sentiment-analysis",
79
+ model="distilbert/distilbert-base-uncased-finetuned-sst-2-english"
80
+ )
81
+ # Test it
82
+ result = analyzer("test")
83
+ logging.info("✓ [Sentiment Analyzer] Hugging Face model loaded")
84
+ print("✓ [Sentiment Analyzer] Hugging Face model loaded")
85
+ init_status['sentiment_analyzer']['ready'] = True
86
+ return True
87
+ except ImportError:
88
+ logging.info("✓ [Sentiment Analyzer] Using fallback (NLTK VADER)")
89
+ print("✓ [Sentiment Analyzer] Using fallback (NLTK VADER)")
90
+ init_status['sentiment_analyzer']['ready'] = True
91
+ return True
92
+ except Exception as e:
93
+ error_msg = str(e)
94
+ # Check for NumPy compatibility issues
95
+ if 'np.float_' in error_msg or 'NumPy 2' in error_msg or '_ARRAY_API' in error_msg:
96
+ logging.warning(f"⚠ [Sentiment Analyzer] NumPy compatibility issue - using fallback")
97
+ print("⚠ [Sentiment Analyzer] NumPy compatibility issue - using fallback")
98
+ init_status['sentiment_analyzer']['ready'] = True # Fallback available
99
+ return True
100
+ else:
101
+ raise
102
+ except Exception as e:
103
+ error_msg = f"Sentiment analyzer initialization failed: {e}"
104
+ logging.warning(f"⚠ [Sentiment Analyzer] {error_msg} - using fallback")
105
+ print(f"⚠ [Sentiment Analyzer] Using fallback")
106
+ init_status['sentiment_analyzer']['error'] = str(e)
107
+ # Still mark as ready since we have fallback
108
+ init_status['sentiment_analyzer']['ready'] = True
109
+ return True
110
+
111
+ def validate_gemini_api():
112
+ """Validate Gemini API key"""
113
+ try:
114
+ logging.info("🔄 [Gemini API] Validating API key...")
115
+ print("🔄 [Gemini API] Validating API key...")
116
+ api_key = os.getenv("GEMINI_API_KEY")
117
+
118
+ if not api_key:
119
+ logging.warning("⚠ [Gemini API] API key not found")
120
+ print("⚠ [Gemini API] API key not found")
121
+ init_status['gemini_api']['ready'] = False
122
+ return False
123
+
124
+ # Try to use custom LLM wrapper to validate
125
+ try:
126
+ from llm_wrapper import LLMWrapper
127
+ # Initialize wrapper with test model
128
+ wrapper = LLMWrapper(gemini_model="gemini-1.5-flash")
129
+ response = wrapper.call_gemini(
130
+ messages=[{"role": "user", "content": "test"}],
131
+ max_tokens=5
132
+ )
133
+ if response:
134
+ logging.info("✓ [Gemini API] API key validated")
135
+ print("✓ [Gemini API] API key validated")
136
+ init_status['gemini_api']['ready'] = True
137
+ return True
138
+ else:
139
+ logging.warning("⚠ [Gemini API] Validation failed - no response")
140
+ print("⚠ [Gemini API] Validation failed - key exists, may be network issue")
141
+ return False
142
+ except Exception as e:
143
+ logging.warning(f"⚠ [Gemini API] Validation failed: {e}")
144
+ print("⚠ [Gemini API] Validation failed - key exists, may be network issue")
145
+ # Still mark as available if key exists (might be network issue)
146
+ init_status['gemini_api']['ready'] = True
147
+ return True
148
+ except Exception as e:
149
+ error_msg = f"Gemini API validation failed: {e}"
150
+ logging.error(f"✗ [Gemini API] {error_msg}")
151
+ print(f"✗ [Gemini API] {error_msg}")
152
+ init_status['gemini_api']['error'] = str(e)
153
+ return False
154
+
155
+ def validate_inflection_api():
156
+ """Validate Inflection AI API key"""
157
+ try:
158
+ logging.info("🔄 [Inflection AI] Validating API key...")
159
+ print("🔄 [Inflection AI] Validating API key...")
160
+ api_key = os.getenv("INFLECTION_AI_API_KEY")
161
+
162
+ if not api_key:
163
+ logging.warning("⚠ [Inflection AI] API key not found")
164
+ print("⚠ [Inflection AI] API key not found")
165
+ init_status['inflection_api']['ready'] = False
166
+ return False
167
+
168
+ # Test API key by making a simple request
169
+ import requests
170
+ url = "https://api.inflection.ai/external/api/inference"
171
+ headers = {
172
+ "Authorization": f"Bearer {api_key}",
173
+ "Content-Type": "application/json"
174
+ }
175
+ data = {
176
+ "context": [{"text": "test", "type": "Human"}],
177
+ "config": "Pi-3.1"
178
+ }
179
+
180
+ response = requests.post(url, headers=headers, json=data, timeout=10)
181
+ if response.status_code == 200:
182
+ logging.info("✓ [Inflection AI] API key validated")
183
+ print("✓ [Inflection AI] API key validated")
184
+ init_status['inflection_api']['ready'] = True
185
+ return True
186
+ else:
187
+ logging.warning(f"⚠ [Inflection AI] Validation failed: {response.status_code}")
188
+ print(f"⚠ [Inflection AI] Validation failed: {response.status_code}")
189
+ init_status['inflection_api']['ready'] = False
190
+ return False
191
+ except Exception as e:
192
+ error_msg = f"Inflection AI validation failed: {e}"
193
+ logging.warning(f"⚠ [Inflection AI] {error_msg}")
194
+ print(f"⚠ [Inflection AI] {error_msg}")
195
+ # Don't fail initialization if this fails
196
+ init_status['inflection_api']['ready'] = False
197
+ return False
198
+
199
+ def validate_quantum_api():
200
+ """Validate Quantum Random Numbers API key"""
201
+ try:
202
+ logging.info("🔄 [Quantum API] Validating API key...")
203
+ print("🔄 [Quantum API] Validating API key...")
204
+ api_key = os.getenv("ANU_QUANTUM_API_KEY")
205
+
206
+ if not api_key:
207
+ logging.warning("⚠ [Quantum API] API key not found")
208
+ print("⚠ [Quantum API] API key not found")
209
+ init_status['quantum_api']['ready'] = False
210
+ return False
211
+
212
+ # Test API key
213
+ import requests
214
+ url = "https://api.quantumnumbers.anu.edu.au"
215
+ headers = {"x-api-key": api_key}
216
+ params = {"length": 1, "type": "uint8"}
217
+
218
+ response = requests.get(url, headers=headers, params=params, timeout=10)
219
+ if response.status_code == 200:
220
+ logging.info("✓ [Quantum API] API key validated")
221
+ print("✓ [Quantum API] API key validated")
222
+ init_status['quantum_api']['ready'] = True
223
+ return True
224
+ else:
225
+ logging.warning(f"⚠ [Quantum API] Validation failed: {response.status_code}")
226
+ print(f"⚠ [Quantum API] Validation failed: {response.status_code}")
227
+ init_status['quantum_api']['ready'] = False
228
+ return False
229
+ except Exception as e:
230
+ error_msg = f"Quantum API validation failed: {e}"
231
+ logging.warning(f"⚠ [Quantum API] {error_msg}")
232
+ print(f"⚠ [Quantum API] {error_msg}")
233
+ init_status['quantum_api']['ready'] = False
234
+ return False
235
+
236
+ def initialize_json_memory():
237
+ """Initialize JSON memory database"""
238
+ try:
239
+ logging.info("🔄 [JSON Memory] Initializing...")
240
+ print("🔄 [JSON Memory] Initializing...")
241
+ import json
242
+
243
+ json_path = "./memory.json"
244
+ if os.path.exists(json_path):
245
+ with open(json_path, 'r', encoding='utf-8') as f:
246
+ memory = json.load(f)
247
+ logging.info(f"✓ [JSON Memory] Loaded {len(memory)} entries")
248
+ print(f"✓ [JSON Memory] Loaded {len(memory)} entries")
249
+ else:
250
+ with open(json_path, 'w', encoding='utf-8') as f:
251
+ json.dump({}, f)
252
+ logging.info("✓ [JSON Memory] Created new database")
253
+ print("✓ [JSON Memory] Created new database")
254
+
255
+ init_status['json_memory']['ready'] = True
256
+ return True
257
+ except Exception as e:
258
+ error_msg = f"JSON memory initialization failed: {e}"
259
+ logging.error(f"✗ [JSON Memory] {error_msg}")
260
+ print(f"✗ [JSON Memory] {error_msg}")
261
+ init_status['json_memory']['error'] = str(e)
262
+ return False
263
+
264
+ def run_initialization():
265
+ """Run all initialization steps in parallel"""
266
+ start_time = time.time()
267
+
268
+ logging.info("=" * 70)
269
+ logging.info("GALATEA AI PARALLEL INITIALIZATION")
270
+ logging.info("=" * 70)
271
+ logging.info("Starting parallel initialization of all components...")
272
+ logging.info("")
273
+
274
+ # Define initialization tasks
275
+ tasks = [
276
+ ("JSON Memory", initialize_json_memory),
277
+ ("Sentiment Analyzer", initialize_sentiment_analyzer),
278
+ ("Gemini API", validate_gemini_api),
279
+ ("Inflection AI", validate_inflection_api),
280
+ ("Quantum API", validate_quantum_api),
281
+ ]
282
+
283
+ # Run tasks in parallel
284
+ completed_count = 0
285
+ total_tasks = len(tasks)
286
+
287
+ with ThreadPoolExecutor(max_workers=5) as executor:
288
+ futures = {executor.submit(task[1]): task[0] for task in tasks}
289
+
290
+ for future in as_completed(futures):
291
+ task_name = futures[future]
292
+ completed_count += 1
293
+ try:
294
+ result = future.result()
295
+ if result:
296
+ logging.info(f"✅ [{task_name}] Completed successfully ({completed_count}/{total_tasks})")
297
+ print(f"✅ [{task_name}] Completed successfully ({completed_count}/{total_tasks})")
298
+ else:
299
+ logging.warning(f"⚠️ [{task_name}] Completed with warnings ({completed_count}/{total_tasks})")
300
+ print(f"⚠️ [{task_name}] Completed with warnings ({completed_count}/{total_tasks})")
301
+ except Exception as e:
302
+ logging.error(f"❌ [{task_name}] Failed: {e} ({completed_count}/{total_tasks})")
303
+ print(f"❌ [{task_name}] Failed: {e} ({completed_count}/{total_tasks})")
304
+
305
+ elapsed_time = time.time() - start_time
306
+
307
+ # Print summary
308
+ logging.info("")
309
+ logging.info("=" * 70)
310
+ logging.info("INITIALIZATION SUMMARY")
311
+ logging.info("=" * 70)
312
+ print("")
313
+ print("=" * 70)
314
+ print("INITIALIZATION SUMMARY")
315
+ print("=" * 70)
316
+
317
+ all_ready = True
318
+ critical_ready = True
319
+
320
+ for component, status in init_status.items():
321
+ status_icon = "✓" if status['ready'] else "✗"
322
+ error_info = f" - {status['error']}" if status['error'] else ""
323
+ status_msg = f"{status_icon} {component.upper()}: {'READY' if status['ready'] else 'FAILED'}{error_info}"
324
+ logging.info(status_msg)
325
+ print(status_msg)
326
+
327
+ # Critical components (must be ready)
328
+ if component in ['json_memory', 'sentiment_analyzer', 'gemini_api']:
329
+ if not status['ready']:
330
+ critical_ready = False
331
+
332
+ if not status['ready']:
333
+ all_ready = False
334
+
335
+ logging.info("")
336
+ logging.info(f"⏱️ Total initialization time: {elapsed_time:.2f} seconds")
337
+ logging.info("")
338
+ print("")
339
+ print(f"⏱️ Total initialization time: {elapsed_time:.2f} seconds")
340
+ print("")
341
+
342
+ # Check for NumPy compatibility issues
343
+ numpy_issue = False
344
+ for component, status in init_status.items():
345
+ if status.get('error') and ('np.float_' in str(status['error']) or 'NumPy 2' in str(status['error']) or '_ARRAY_API' in str(status['error'])):
346
+ numpy_issue = True
347
+ break
348
+
349
+ if numpy_issue:
350
+ logging.error("")
351
+ logging.error("=" * 70)
352
+ logging.error("NUM PY COMPATIBILITY ISSUE DETECTED")
353
+ logging.error("=" * 70)
354
+ logging.error("Some components failed due to NumPy 2.0 incompatibility.")
355
+ logging.error("")
356
+ logging.error("TO FIX:")
357
+ logging.error(" 1. Run: python fix_numpy.py")
358
+ logging.error(" 2. Or: pip install 'numpy<2.0.0'")
359
+ logging.error(" 3. Then restart the application")
360
+ logging.error("=" * 70)
361
+ logging.error("")
362
+
363
+ # Determine final status
364
+ if critical_ready:
365
+ if all_ready:
366
+ logging.info("✅ ALL COMPONENTS INITIALIZED SUCCESSFULLY")
367
+ logging.info("🎉 Galatea AI is ready to use!")
368
+ print("✅ ALL COMPONENTS INITIALIZED SUCCESSFULLY")
369
+ print("🎉 Galatea AI is ready to use!")
370
+ return True
371
+ else:
372
+ logging.info("⚠️ CRITICAL COMPONENTS READY (some optional components failed)")
373
+ if numpy_issue:
374
+ logging.warning("⚠️ Some failures due to NumPy compatibility - fix NumPy for full functionality")
375
+ logging.info("✅ Galatea AI is ready to use (with limited features)")
376
+ print("⚠️ CRITICAL COMPONENTS READY (some optional components failed)")
377
+ print("✅ Galatea AI is ready to use (with limited features)")
378
+ return True
379
+ else:
380
+ logging.error("❌ CRITICAL COMPONENTS FAILED")
381
+ if numpy_issue:
382
+ logging.error("⚠️ Failures likely due to NumPy 2.0 - run 'python fix_numpy.py' to fix")
383
+ logging.error("⚠️ Galatea AI may not function properly")
384
+ print("❌ CRITICAL COMPONENTS FAILED")
385
+ print("⚠️ Galatea AI may not function properly")
386
+ return False
387
+
388
+ if __name__ == "__main__":
389
+ try:
390
+ success = run_initialization()
391
+ sys.exit(0 if success else 1)
392
+ except KeyboardInterrupt:
393
+ logging.info("\n⚠️ Initialization interrupted by user")
394
+ sys.exit(1)
395
+ except Exception as e:
396
+ logging.error(f"\n❌ Fatal error during initialization: {e}")
397
+ import traceback
398
+ traceback.print_exc()
399
+ sys.exit(1)
400
+
llm_wrapper.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Custom LLM Wrapper - Direct API calls using requests (no LiteLLM)"""
2
+ import os
3
+ import sys
4
+ import logging
5
+ import requests
6
+
7
+ # Add current directory to path for imports
8
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
9
+ from config import MODEL_CONFIG
10
+
11
+ class LLMWrapper:
12
+ """Custom LLM wrapper for Gemini and Inflection AI using direct API calls"""
13
+
14
+ def __init__(self, gemini_model=None, inflection_model=None, config=None):
15
+ """
16
+ Initialize LLM Wrapper with models and configuration
17
+
18
+ Args:
19
+ gemini_model: Gemini model name (e.g., 'gemini-2.0-flash-exp')
20
+ inflection_model: Inflection AI model name (e.g., 'Pi-3.1')
21
+ config: Configuration dict (optional, will load from MODEL_CONFIG if not provided)
22
+ """
23
+ self.config = config or MODEL_CONFIG or {}
24
+ self.gemini_api_key = os.getenv("GEMINI_API_KEY")
25
+ self.inflection_ai_api_key = os.getenv("INFLECTION_AI_API_KEY")
26
+
27
+ # Set models from parameters or config
28
+ if gemini_model:
29
+ self.gemini_model = gemini_model
30
+ else:
31
+ gemini_config = self.config.get('gemini', {}) if self.config else {}
32
+ self.gemini_model = gemini_config.get('model', 'gemini-2.0-flash-exp')
33
+
34
+ if inflection_model:
35
+ self.inflection_model = inflection_model
36
+ else:
37
+ inflection_config = self.config.get('inflection_ai', {}) if self.config else {}
38
+ self.inflection_model = inflection_config.get('model', 'Pi-3.1')
39
+
40
+ # Remove 'gemini/' prefix if present
41
+ if self.gemini_model.startswith('gemini/'):
42
+ self.gemini_model = self.gemini_model.replace('gemini/', '')
43
+
44
+ logging.info(f"[LLMWrapper] Initialized with Gemini model: {self.gemini_model}, Inflection model: {self.inflection_model}")
45
+
46
+ def call_gemini(self, messages, temperature=0.7, max_tokens=1024):
47
+ """
48
+ Call Gemini API directly using requests
49
+
50
+ Args:
51
+ messages: List of message dicts with 'role' and 'content'
52
+ temperature: Temperature for generation
53
+ max_tokens: Maximum tokens to generate
54
+
55
+ Returns:
56
+ Response text or None if failed
57
+ """
58
+ if not self.gemini_api_key:
59
+ logging.error("[LLMWrapper] GEMINI_API_KEY not found")
60
+ return None
61
+
62
+ # Use the model set during initialization
63
+ model = self.gemini_model
64
+
65
+ # Gemini API endpoint
66
+ url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
67
+
68
+ headers = {
69
+ "Content-Type": "application/json",
70
+ "X-goog-api-key": self.gemini_api_key
71
+ }
72
+
73
+ # Convert messages to Gemini format
74
+ contents = []
75
+ system_instruction = None
76
+
77
+ for msg in messages:
78
+ role = msg.get('role', 'user')
79
+ content = msg.get('content', '')
80
+
81
+ if role == 'system':
82
+ system_instruction = content
83
+ elif role == 'user':
84
+ contents.append({
85
+ "role": "user",
86
+ "parts": [{"text": content}]
87
+ })
88
+ elif role == 'assistant':
89
+ contents.append({
90
+ "role": "model",
91
+ "parts": [{"text": content}]
92
+ })
93
+
94
+ # Build request payload
95
+ payload = {
96
+ "contents": contents,
97
+ "generationConfig": {
98
+ "temperature": temperature,
99
+ "maxOutputTokens": max_tokens
100
+ }
101
+ }
102
+
103
+ # Add system instruction if present
104
+ if system_instruction:
105
+ payload["systemInstruction"] = {
106
+ "parts": [{"text": system_instruction}]
107
+ }
108
+
109
+ try:
110
+ logging.info(f"[LLMWrapper] Calling Gemini API: {model}")
111
+ response = requests.post(url, headers=headers, json=payload, timeout=30)
112
+
113
+ if response.status_code == 200:
114
+ result = response.json()
115
+
116
+ # Extract text from Gemini response
117
+ if 'candidates' in result and len(result['candidates']) > 0:
118
+ candidate = result['candidates'][0]
119
+ if 'content' in candidate and 'parts' in candidate['content']:
120
+ parts = candidate['content']['parts']
121
+ if len(parts) > 0 and 'text' in parts[0]:
122
+ text = parts[0]['text']
123
+ logging.info("[LLMWrapper] ✓ Gemini response received")
124
+ return text.strip()
125
+
126
+ logging.error(f"[LLMWrapper] Unexpected Gemini response format: {result}")
127
+ return None
128
+ else:
129
+ logging.error(f"[LLMWrapper] Gemini API returned status {response.status_code}: {response.text}")
130
+ return None
131
+
132
+ except Exception as e:
133
+ logging.error(f"[LLMWrapper] Error calling Gemini API: {e}")
134
+ return None
135
+
136
+ def call_inflection_ai(self, context_parts):
137
+ """
138
+ Call Inflection AI API directly using requests
139
+
140
+ Args:
141
+ context_parts: List of context dicts with 'text' and 'type'
142
+
143
+ Returns:
144
+ Response text or None if failed
145
+ """
146
+ if not self.inflection_ai_api_key:
147
+ logging.error("[LLMWrapper] INFLECTION_AI_API_KEY not found")
148
+ return None
149
+
150
+ # Use the model set during initialization
151
+ model_config = self.inflection_model
152
+
153
+ # Get endpoint from config
154
+ inflection_config = self.config.get('inflection_ai', {}) if self.config else {}
155
+ url = inflection_config.get('api_endpoint', 'https://api.inflection.ai/external/api/inference')
156
+
157
+ headers = {
158
+ "Authorization": f"Bearer {self.inflection_ai_api_key}",
159
+ "Content-Type": "application/json"
160
+ }
161
+
162
+ data = {
163
+ "context": context_parts,
164
+ "config": model_config
165
+ }
166
+
167
+ try:
168
+ logging.info(f"[LLMWrapper] Calling Inflection AI API: {model_config}")
169
+ logging.debug(f"[LLMWrapper] Request URL: {url}")
170
+ logging.debug(f"[LLMWrapper] Request data: {data}")
171
+ response = requests.post(url, headers=headers, json=data, timeout=30)
172
+
173
+ logging.info(f"[LLMWrapper] Response status: {response.status_code}")
174
+
175
+ if response.status_code == 200:
176
+ try:
177
+ result = response.json()
178
+ except Exception as json_error:
179
+ logging.error(f"[LLMWrapper] Failed to parse JSON response: {json_error}")
180
+ logging.error(f"[LLMWrapper] Raw response text: {response.text[:500]}")
181
+ return None
182
+
183
+ logging.debug(f"[LLMWrapper] Response JSON: {result}")
184
+ logging.info(f"[LLMWrapper] Response type: {type(result)}")
185
+
186
+ # Extract response text - Inflection AI returns text in 'text' field
187
+ # Based on actual API response: {"created": ..., "text": "...", "tool_calls": [], "reasoning_content": null}
188
+ text = None
189
+ if isinstance(result, dict):
190
+ # Prioritize 'text' field as that's what the API actually returns
191
+ if 'text' in result:
192
+ text = result['text']
193
+ logging.debug(f"[LLMWrapper] Found text in 'text' field: {text[:100]}...")
194
+ elif 'output' in result:
195
+ text = result['output']
196
+ logging.debug(f"[LLMWrapper] Found text in 'output' field")
197
+ elif 'response' in result:
198
+ text = result['response']
199
+ logging.debug(f"[LLMWrapper] Found text in 'response' field")
200
+ elif 'message' in result:
201
+ text = result['message']
202
+ logging.debug(f"[LLMWrapper] Found text in 'message' field")
203
+ else:
204
+ # If result is a dict but no known field, try to get first string value
205
+ logging.warning(f"[LLMWrapper] No standard text field found, searching for string values...")
206
+ for key, value in result.items():
207
+ if isinstance(value, str) and value.strip():
208
+ text = value
209
+ logging.debug(f"[LLMWrapper] Found text in '{key}' field")
210
+ break
211
+ if not text:
212
+ logging.error(f"[LLMWrapper] No text found in response dict. Keys: {list(result.keys())}")
213
+ text = str(result)
214
+ elif isinstance(result, str):
215
+ text = result
216
+ logging.debug(f"[LLMWrapper] Response is a string")
217
+ else:
218
+ logging.warning(f"[LLMWrapper] Unexpected response type: {type(result)}")
219
+ text = str(result)
220
+
221
+ if text and isinstance(text, str) and text.strip():
222
+ logging.info(f"[LLMWrapper] ✓ Inflection AI response received: {text[:100]}...")
223
+ return text.strip()
224
+ else:
225
+ logging.error(f"[LLMWrapper] No valid text found in response. Text value: {text}, Type: {type(text)}")
226
+ logging.error(f"[LLMWrapper] Full response: {result}")
227
+ return None
228
+ else:
229
+ logging.error(f"[LLMWrapper] Inflection AI API returned status {response.status_code}")
230
+ try:
231
+ error_detail = response.json()
232
+ logging.error(f"[LLMWrapper] Error details: {error_detail}")
233
+ except:
234
+ logging.error(f"[LLMWrapper] Error response text: {response.text[:500]}")
235
+ return None
236
+
237
+ except Exception as e:
238
+ logging.error(f"[LLMWrapper] Error calling Inflection AI API: {e}")
239
+ return None
240
+
models.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Galatea AI Model Configuration
2
+ # This file contains all model settings and hyperparameters
3
+
4
+ # Gemini Agent Configuration (Thinking/Analysis)
5
+ gemini:
6
+ # Single model to use for thinking/analysis
7
+ model: "gemini-2.0-flash-exp"
8
+
9
+ # Hyperparameters
10
+ temperature: 0.5 # Lower temperature for more focused thinking
11
+ max_tokens: 200
12
+
13
+ # API endpoint (automatically constructed)
14
+ api_endpoint: "https://generativelanguage.googleapis.com/v1beta/models"
15
+
16
+ # Pi/Phi Agent Configuration (Response Generation)
17
+ inflection_ai:
18
+ # Single model to use for response generation
19
+ model: "Pi-3.1"
20
+
21
+ # API endpoint
22
+ api_endpoint: "https://api.inflection.ai/external/api/inference"
23
+
24
+ # Hyperparameters (if supported by API)
25
+ # Note: Inflection AI may not support all these parameters
26
+ temperature: 0.8
27
+ max_tokens: 300
28
+
29
+ # Sentiment Analysis Configuration
30
+ sentiment:
31
+ # Primary model (Hugging Face)
32
+ primary_model: "distilbert/distilbert-base-uncased-finetuned-sst-2-english"
33
+ # Fallback: NLTK VADER (automatic if primary fails)
34
+
35
+ # Memory System Configuration (JSON only)
36
+ memory:
37
+ # JSON memory settings
38
+ json_path: "./memory.json"
39
+
40
+ # Memory retrieval settings
41
+ retrieval:
42
+ max_retrieved_memories: 5
43
+
44
+ # Conversation Configuration
45
+ conversation:
46
+ max_history_length: 20 # Number of messages to keep (user + assistant pairs)
47
+ max_response_length: 50 # Target response length in words
48
+
49
+ # System Prompt Configuration
50
+ system_prompts:
51
+ galatea_identity: "You are Galatea, an AI assistant with emotional awareness and memory."
52
+ response_style: "Respond in character, keeping responses concise (under 50 words)."
53
+
54
+ # Quantum Randomness Configuration
55
+ quantum:
56
+ api_endpoint: "https://api.quantumnumbers.anu.edu.au"
57
+ default_length: 128
58
+ default_type: "uint8"
59
+
60
+ # Usage settings
61
+ use_for_temperature: true
62
+ use_for_emotion_decay: true
63
+ use_for_learning_rate: true
64
+ use_for_curiosity: true
65
+
requirements.txt CHANGED
@@ -1,7 +1,9 @@
1
  flask==3.0.0
2
- transformers==4.36.0
3
  nltk==3.8.1
4
- google-generativeai==0.8.3
5
  python-dotenv==1.0.0
6
  azure-ai-textanalytics==5.3.0
7
- torch==2.1.0
 
 
 
 
1
  flask==3.0.0
2
+ transformers>=4.50.0
3
  nltk==3.8.1
 
4
  python-dotenv==1.0.0
5
  azure-ai-textanalytics==5.3.0
6
+ torch>=2.2.0
7
+ numpy<2.0.0
8
+ requests==2.31.0
9
+ pyyaml==6.0.1
systems/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Systems package"""
2
+ from .memory_system import MemorySystem
3
+
4
+ __all__ = ['MemorySystem']
5
+
systems/memory_system.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Memory system using JSON for simple key-value storage"""
2
+ import os
3
+ import json
4
+ import logging
5
+ import sys
6
+ from datetime import datetime
7
+
8
+ # Add parent directory to path for imports
9
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10
+ from config import MODEL_CONFIG
11
+
12
+ class MemorySystem:
13
+ """Memory system using JSON for simple key-value storage"""
14
+
15
+ def __init__(self, json_db_path=None, config=None):
16
+ self.config = config or MODEL_CONFIG or {}
17
+ # Get paths from config or use defaults
18
+ memory_config = self.config.get('memory', {}) if self.config else {}
19
+ self.json_db_path = json_db_path or memory_config.get('json_path', './memory.json')
20
+ self.json_memory = {}
21
+
22
+ # Initialize JSON database
23
+ self.load_json_memory()
24
+
25
+ def is_ready(self):
26
+ """Check if memory system is fully initialized"""
27
+ return self.json_memory is not None
28
+
29
+ def load_json_memory(self):
30
+ """Load JSON memory database"""
31
+ try:
32
+ if os.path.exists(self.json_db_path):
33
+ with open(self.json_db_path, 'r', encoding='utf-8') as f:
34
+ self.json_memory = json.load(f)
35
+ logging.info(f"Loaded JSON memory with {len(self.json_memory)} entries")
36
+ else:
37
+ self.json_memory = {}
38
+ logging.info("Created new JSON memory database")
39
+ except Exception as e:
40
+ logging.error(f"Error loading JSON memory: {e}")
41
+ self.json_memory = {}
42
+
43
+ def save_json_memory(self):
44
+ """Save JSON memory database"""
45
+ try:
46
+ with open(self.json_db_path, 'w', encoding='utf-8') as f:
47
+ json.dump(self.json_memory, f, indent=2, ensure_ascii=False)
48
+ except Exception as e:
49
+ logging.error(f"Error saving JSON memory: {e}")
50
+
51
+ def store_memory(self, text, metadata=None, memory_type="conversation"):
52
+ """Store a memory in JSON"""
53
+ timestamp = datetime.now().isoformat()
54
+
55
+ # Store in JSON
56
+ memory_id = f"{memory_type}_{timestamp}"
57
+ self.json_memory[memory_id] = {
58
+ "text": text,
59
+ "metadata": metadata or {},
60
+ "type": memory_type,
61
+ "timestamp": timestamp
62
+ }
63
+ self.save_json_memory()
64
+ logging.info(f"Stored memory in JSON: {memory_id[:20]}...")
65
+
66
+ def retrieve_relevant_memories(self, query, n_results=5):
67
+ """Retrieve relevant memories using keyword search in JSON"""
68
+ relevant_memories = []
69
+
70
+ # Simple keyword search in JSON
71
+ if self.json_memory:
72
+ query_lower = query.lower()
73
+ query_words = set(query_lower.split())
74
+
75
+ for memory_id, memory_data in self.json_memory.items():
76
+ text_lower = memory_data.get("text", "").lower()
77
+ text_words = set(text_lower.split())
78
+
79
+ # Simple overlap check
80
+ overlap = len(query_words & text_words)
81
+ if overlap > 0:
82
+ relevant_memories.append({
83
+ "text": memory_data["text"],
84
+ "metadata": memory_data.get("metadata", {}),
85
+ "distance": 1.0 - (overlap / max(len(query_words), len(text_words)))
86
+ })
87
+
88
+ # Sort by relevance (lower distance = more relevant)
89
+ relevant_memories.sort(key=lambda x: x.get("distance", 1.0))
90
+ relevant_memories = relevant_memories[:n_results]
91
+ logging.info(f"Retrieved {len(relevant_memories)} relevant memories from JSON DB")
92
+
93
+ return relevant_memories
94
+
95
+ def get_json_memory(self, key):
96
+ """Get a specific memory by key from JSON database"""
97
+ return self.json_memory.get(key)
98
+
99
+ def set_json_memory(self, key, value, metadata=None):
100
+ """Set a key-value memory in JSON database"""
101
+ self.json_memory[key] = {
102
+ "value": value,
103
+ "metadata": metadata or {},
104
+ "timestamp": datetime.now().isoformat()
105
+ }
106
+ self.save_json_memory()
107
+
108
+ def get_all_json_memories(self):
109
+ """Get all JSON memories"""
110
+ return self.json_memory.copy()
111
+