Your Name commited on
Commit
25ac68e
·
1 Parent(s): 46c32a9

Remove unused import_random.py file (code has been refactored into separate modules)

Browse files
Files changed (1) hide show
  1. import_random.py +0 -1581
import_random.py DELETED
@@ -1,1581 +0,0 @@
1
- import random
2
- import nltk
3
- import os
4
- import json
5
- import yaml
6
- from dotenv import load_dotenv
7
- import logging
8
- import requests
9
- from litellm import completion
10
- from datetime import datetime
11
-
12
- # Configure logging
13
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
14
-
15
- # Load environment variables from .env file
16
- load_dotenv()
17
-
18
- # Load model configuration from YAML
19
- def load_model_config(config_path="models.yaml"):
20
- """Load model configuration from YAML file"""
21
- try:
22
- if os.path.exists(config_path):
23
- with open(config_path, 'r', encoding='utf-8') as f:
24
- config = yaml.safe_load(f)
25
- logging.info(f"✓ Model configuration loaded from {config_path}")
26
- return config
27
- else:
28
- logging.warning(f"⚠ Model configuration file {config_path} not found, using defaults")
29
- return None
30
- except Exception as e:
31
- logging.error(f"✗ Error loading model configuration: {e}")
32
- return None
33
-
34
- # Load configuration at module level
35
- MODEL_CONFIG = load_model_config()
36
-
37
- # Download NLTK data (only needs to be done once)
38
- try:
39
- nltk.data.find("tokenizers/punkt")
40
- except LookupError:
41
- nltk.download('punkt')
42
-
43
- # Make sure punkt is downloaded before importing the rest
44
- nltk.download('punkt', quiet=True)
45
-
46
- # Import transformers with error handling
47
- try:
48
- from transformers import pipeline
49
- transformers_available = True
50
- except ImportError:
51
- logging.warning("Transformers library not available. Using fallback sentiment analysis.")
52
- transformers_available = False
53
-
54
- from enum import Enum
55
-
56
- # ChromaDB removed - using JSON-only memory
57
-
58
- # --- Memory System (JSON only) ---
59
- class MemorySystem:
60
- """Memory system using JSON for simple key-value storage"""
61
-
62
- def __init__(self, json_db_path=None, config=None):
63
- self.config = config or MODEL_CONFIG or {}
64
- # Get paths from config or use defaults
65
- memory_config = self.config.get('memory', {}) if self.config else {}
66
- self.json_db_path = json_db_path or memory_config.get('json_path', './memory.json')
67
- self.json_memory = {}
68
-
69
- # Initialize JSON database
70
- self.load_json_memory()
71
-
72
- def is_ready(self):
73
- """Check if memory system is fully initialized"""
74
- return self.json_memory is not None
75
-
76
- def load_json_memory(self):
77
- """Load JSON memory database"""
78
- try:
79
- if os.path.exists(self.json_db_path):
80
- with open(self.json_db_path, 'r', encoding='utf-8') as f:
81
- self.json_memory = json.load(f)
82
- logging.info(f"Loaded JSON memory with {len(self.json_memory)} entries")
83
- else:
84
- self.json_memory = {}
85
- logging.info("Created new JSON memory database")
86
- except Exception as e:
87
- logging.error(f"Error loading JSON memory: {e}")
88
- self.json_memory = {}
89
-
90
- def save_json_memory(self):
91
- """Save JSON memory database"""
92
- try:
93
- with open(self.json_db_path, 'w', encoding='utf-8') as f:
94
- json.dump(self.json_memory, f, indent=2, ensure_ascii=False)
95
- except Exception as e:
96
- logging.error(f"Error saving JSON memory: {e}")
97
-
98
- def store_memory(self, text, metadata=None, memory_type="conversation"):
99
- """Store a memory in JSON"""
100
- timestamp = datetime.now().isoformat()
101
-
102
- # Store in JSON
103
- memory_id = f"{memory_type}_{timestamp}"
104
- self.json_memory[memory_id] = {
105
- "text": text,
106
- "metadata": metadata or {},
107
- "type": memory_type,
108
- "timestamp": timestamp
109
- }
110
- self.save_json_memory()
111
- logging.info(f"Stored memory in JSON: {memory_id[:20]}...")
112
-
113
- def retrieve_relevant_memories(self, query, n_results=5):
114
- """Retrieve relevant memories using keyword search in JSON"""
115
- relevant_memories = []
116
-
117
- # Simple keyword search in JSON
118
- if self.json_memory:
119
- query_lower = query.lower()
120
- query_words = set(query_lower.split())
121
-
122
- for memory_id, memory_data in self.json_memory.items():
123
- text_lower = memory_data.get("text", "").lower()
124
- text_words = set(text_lower.split())
125
-
126
- # Simple overlap check
127
- overlap = len(query_words & text_words)
128
- if overlap > 0:
129
- relevant_memories.append({
130
- "text": memory_data["text"],
131
- "metadata": memory_data.get("metadata", {}),
132
- "distance": 1.0 - (overlap / max(len(query_words), len(text_words)))
133
- })
134
-
135
- # Sort by relevance (lower distance = more relevant)
136
- relevant_memories.sort(key=lambda x: x.get("distance", 1.0))
137
- relevant_memories = relevant_memories[:n_results]
138
- logging.info(f"Retrieved {len(relevant_memories)} relevant memories from JSON DB")
139
-
140
- return relevant_memories
141
-
142
- def get_json_memory(self, key):
143
- """Get a specific memory by key from JSON database"""
144
- return self.json_memory.get(key)
145
-
146
- def set_json_memory(self, key, value, metadata=None):
147
- """Set a key-value memory in JSON database"""
148
- self.json_memory[key] = {
149
- "value": value,
150
- "metadata": metadata or {},
151
- "timestamp": datetime.now().isoformat()
152
- }
153
- self.save_json_memory()
154
-
155
- def get_all_json_memories(self):
156
- """Get all JSON memories"""
157
- return self.json_memory.copy()
158
-
159
- # --- Agent Classes ---
160
- class MemoryAgent:
161
- """Agent responsible for memory retrieval and storage"""
162
-
163
- def __init__(self, memory_system, config=None):
164
- self.memory_system = memory_system
165
- self.config = config or MODEL_CONFIG or {}
166
-
167
- def retrieve_memories(self, query, n_results=None):
168
- """Retrieve relevant memories for a query"""
169
- if n_results is None:
170
- max_memories = self.config.get('memory', {}).get('retrieval', {}).get('max_retrieved_memories', 5) if self.config else 5
171
- else:
172
- max_memories = n_results
173
-
174
- try:
175
- memories = self.memory_system.retrieve_relevant_memories(query, n_results=max_memories)
176
- if memories:
177
- logging.info(f"[MemoryAgent] Retrieved {len(memories)} relevant memories")
178
- return memories
179
- except Exception as e:
180
- logging.error(f"[MemoryAgent] Error retrieving memories: {e}")
181
- return []
182
-
183
- def store_memory(self, text, metadata=None, memory_type="conversation"):
184
- """Store a memory"""
185
- try:
186
- self.memory_system.store_memory(text, metadata, memory_type)
187
- logging.info(f"[MemoryAgent] Stored memory: {memory_type}")
188
- except Exception as e:
189
- logging.error(f"[MemoryAgent] Error storing memory: {e}")
190
-
191
- def smoke_test(self):
192
- """Perform smoke test to verify memory system is working"""
193
- try:
194
- # Test storing
195
- test_text = "Smoke test memory entry"
196
- self.store_memory(test_text, {"test": True}, "test")
197
-
198
- # Test retrieving
199
- memories = self.retrieve_memories("smoke test", n_results=1)
200
- if memories is not None:
201
- logging.info("[MemoryAgent] ✓ Smoke test passed")
202
- return True
203
- else:
204
- logging.warning("[MemoryAgent] ⚠ Smoke test failed - retrieve returned None")
205
- return False
206
- except Exception as e:
207
- logging.error(f"[MemoryAgent] ✗ Smoke test failed: {e}")
208
- return False
209
-
210
- def is_ready(self):
211
- """Check if memory agent is ready"""
212
- return self.memory_system.is_ready() if self.memory_system else False
213
-
214
- class GeminiThinkingAgent:
215
- """Agent responsible for thinking and analysis using Gemini"""
216
-
217
- def __init__(self, config=None):
218
- self.config = config or MODEL_CONFIG or {}
219
- self.gemini_available = False
220
- self._initialize()
221
-
222
- def _initialize(self):
223
- """Initialize Gemini API availability"""
224
- gemini_key = os.getenv("GEMINI_API_KEY")
225
- if gemini_key:
226
- os.environ["GEMINI_API_KEY"] = gemini_key
227
- self.gemini_available = True
228
- logging.info("[GeminiThinkingAgent] ✓ Initialized and ready")
229
- else:
230
- logging.warning("[GeminiThinkingAgent] ✗ GEMINI_API_KEY not found")
231
-
232
- def think(self, user_input, emotional_state, conversation_history, retrieved_memories=None):
233
- """Think about and analyze the conversation context"""
234
- if not self.gemini_available:
235
- logging.warning("[GeminiThinkingAgent] Not available")
236
- return None
237
-
238
- try:
239
- # Build thinking prompt with conversation context
240
- emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
241
-
242
- # Prepare conversation context for thinking
243
- context_summary = ""
244
- if conversation_history:
245
- recent_history = conversation_history[-6:] # Last 3 exchanges
246
- context_summary = "\nRecent conversation:\n"
247
- for msg in recent_history:
248
- role = "User" if msg["role"] == "user" else "Galatea"
249
- context_summary += f"{role}: {msg['content']}\n"
250
-
251
- # Add retrieved memories if available
252
- memory_context = ""
253
- if retrieved_memories and len(retrieved_memories) > 0:
254
- memory_context = "\n\nRelevant memories from past conversations:\n"
255
- for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
256
- memory_context += f"{i}. {memory['text'][:200]}...\n"
257
-
258
- thinking_prompt = f"""You are the internal reasoning system for Galatea, an AI assistant.
259
-
260
- Current emotional state: {emotions_text}
261
- {context_summary}
262
- {memory_context}
263
- Current user message: "{user_input}"
264
-
265
- Analyze this conversation and provide:
266
- 1. Key insights about what the user is asking or discussing
267
- 2. Important context from the conversation history and retrieved memories
268
- 3. How Galatea should respond emotionally and contextually
269
- 4. Any important details to remember or reference
270
-
271
- Keep your analysis concise (2-3 sentences). Focus on what matters for crafting an appropriate response."""
272
-
273
- messages = [
274
- {"role": "system", "content": "You are an internal reasoning system. Analyze conversations and provide insights."},
275
- {"role": "user", "content": thinking_prompt}
276
- ]
277
-
278
- logging.info("[GeminiThinkingAgent] Processing thinking request...")
279
-
280
- # Get Gemini models from config
281
- gemini_config = self.config.get('gemini', {}) if self.config else {}
282
- gemini_models = gemini_config.get('thinking_models', [
283
- "gemini/gemini-2.0-flash-exp",
284
- "gemini/gemini-2.0-flash",
285
- "gemini/gemini-1.5-flash-latest",
286
- "gemini/gemini-1.5-flash"
287
- ])
288
-
289
- # Get thinking settings from config
290
- thinking_config = gemini_config.get('thinking', {})
291
- thinking_temp = thinking_config.get('temperature', 0.5)
292
- thinking_max_tokens = thinking_config.get('max_tokens', 200)
293
-
294
- for model in gemini_models:
295
- try:
296
- response = completion(
297
- model=model,
298
- messages=messages,
299
- temperature=thinking_temp,
300
- max_tokens=thinking_max_tokens
301
- )
302
-
303
- if response and 'choices' in response and len(response['choices']) > 0:
304
- thinking_result = response['choices'][0]['message']['content']
305
- logging.info("[GeminiThinkingAgent] ✓ Thinking completed")
306
- return thinking_result.strip()
307
- except Exception as e:
308
- logging.warning(f"[GeminiThinkingAgent] Model {model} failed: {e}, trying next...")
309
- continue
310
-
311
- logging.error("[GeminiThinkingAgent] All models failed")
312
- return None
313
-
314
- except Exception as e:
315
- logging.error(f"[GeminiThinkingAgent] Error: {e}")
316
- return None
317
-
318
- def smoke_test(self):
319
- """Perform smoke test to verify Gemini is working"""
320
- if not self.gemini_available:
321
- return False
322
-
323
- try:
324
- test_result = self.think(
325
- "test",
326
- {"joy": 0.5, "sadness": 0.3, "anger": 0.1, "fear": 0.1, "curiosity": 0.5},
327
- [],
328
- retrieved_memories=None
329
- )
330
- if test_result and len(test_result) > 0:
331
- logging.info("[GeminiThinkingAgent] ✓ Smoke test passed")
332
- return True
333
- else:
334
- logging.warning("[GeminiThinkingAgent] ⚠ Smoke test failed - no result")
335
- return False
336
- except Exception as e:
337
- logging.error(f"[GeminiThinkingAgent] ✗ Smoke test failed: {e}")
338
- return False
339
-
340
- def is_ready(self):
341
- """Check if agent is ready"""
342
- return self.gemini_available
343
-
344
- class PiResponseAgent:
345
- """Agent responsible for generating human-facing responses using Pi-3.1"""
346
-
347
- def __init__(self, config=None):
348
- self.config = config or MODEL_CONFIG or {}
349
- self.inflection_ai_available = False
350
- self.inflection_ai_api_key = None
351
- self._initialize()
352
-
353
- def _initialize(self):
354
- """Initialize Inflection AI API availability"""
355
- inflection_key = os.getenv("INFLECTION_AI_API_KEY")
356
- if inflection_key:
357
- self.inflection_ai_api_key = inflection_key
358
- self.inflection_ai_available = True
359
- logging.info("[PiResponseAgent] ✓ Initialized and ready")
360
- else:
361
- logging.warning("[PiResponseAgent] ✗ INFLECTION_AI_API_KEY not found")
362
-
363
- def respond(self, user_input, emotional_state, thinking_context=None, conversation_history=None, retrieved_memories=None):
364
- """Generate response using Pi-3.1 with thinking context and emotional state"""
365
- if not self.inflection_ai_available:
366
- logging.warning("[PiResponseAgent] Not available")
367
- return None
368
-
369
- try:
370
- # Create context with emotional state
371
- emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
372
-
373
- # Get endpoint and config from YAML
374
- inflection_config = self.config.get('inflection_ai', {}) if self.config else {}
375
- url = inflection_config.get('api_endpoint', 'https://api.inflection.ai/external/api/inference')
376
- model_config = inflection_config.get('model_config', 'Pi-3.1')
377
-
378
- headers = {
379
- "Authorization": f"Bearer {self.inflection_ai_api_key}",
380
- "Content-Type": "application/json"
381
- }
382
-
383
- # Build comprehensive context with thinking insights, conversation history, and retrieved memories
384
- context_parts = []
385
-
386
- # Base system context
387
- base_context = f"You are Galatea, an AI assistant with the following emotional state: {emotions_text}. Respond in character as Galatea. Keep your response concise (under 50 words) and reflect your emotional state in your tone."
388
-
389
- # Add thinking context from Gemini if available
390
- if thinking_context:
391
- base_context += f"\n\nInternal analysis: {thinking_context}"
392
-
393
- # Add retrieved memories if available
394
- if retrieved_memories and len(retrieved_memories) > 0:
395
- memory_text = "\n\nRelevant context from past conversations:\n"
396
- for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
397
- memory_text += f"{i}. {memory['text'][:150]}...\n"
398
- base_context += memory_text
399
-
400
- # Add conversation history context
401
- if conversation_history and len(conversation_history) > 0:
402
- recent_history = conversation_history[-4:] # Last 2 exchanges
403
- history_text = "\n\nRecent conversation context:\n"
404
- for msg in recent_history:
405
- role = "User" if msg["role"] == "user" else "You (Galatea)"
406
- history_text += f"{role}: {msg['content']}\n"
407
- base_context += history_text
408
-
409
- context_parts.append({
410
- "text": base_context,
411
- "type": "System"
412
- })
413
-
414
- # Add conversation history as context messages
415
- if conversation_history and len(conversation_history) > 4:
416
- # Add older messages as context (but not the most recent ones we already included)
417
- for msg in conversation_history[-8:-4]:
418
- context_parts.append({
419
- "text": msg["content"],
420
- "type": "Human" if msg["role"] == "user" else "Assistant"
421
- })
422
-
423
- # Add current user input
424
- context_parts.append({
425
- "text": user_input,
426
- "type": "Human"
427
- })
428
-
429
- data = {
430
- "context": context_parts,
431
- "config": model_config
432
- }
433
-
434
- logging.info("[PiResponseAgent] Sending request to Pi-3.1 API...")
435
- response = requests.post(url, headers=headers, json=data, timeout=30)
436
-
437
- if response.status_code == 200:
438
- result = response.json()
439
- # Extract the response text from the API response
440
- if isinstance(result, dict):
441
- if 'output' in result:
442
- text = result['output']
443
- elif 'text' in result:
444
- text = result['text']
445
- elif 'response' in result:
446
- text = result['response']
447
- elif 'message' in result:
448
- text = result['message']
449
- else:
450
- text = str(result)
451
- elif isinstance(result, str):
452
- text = result
453
- else:
454
- text = str(result)
455
-
456
- logging.info("[PiResponseAgent] ✓ Response received")
457
- return text.strip()
458
- else:
459
- logging.error(f"[PiResponseAgent] API returned status code {response.status_code}: {response.text}")
460
- return None
461
-
462
- except Exception as e:
463
- logging.error(f"[PiResponseAgent] Error: {e}")
464
- return None
465
-
466
- def smoke_test(self):
467
- """Perform smoke test to verify Pi-3.1 is working"""
468
- if not self.inflection_ai_available:
469
- return False
470
-
471
- try:
472
- test_result = self.respond(
473
- "Hello",
474
- {"joy": 0.5, "sadness": 0.3, "anger": 0.1, "fear": 0.1, "curiosity": 0.5},
475
- thinking_context="Test thinking context",
476
- conversation_history=[],
477
- retrieved_memories=None
478
- )
479
- if test_result and len(test_result) > 0:
480
- logging.info("[PiResponseAgent] ✓ Smoke test passed")
481
- return True
482
- else:
483
- logging.warning("[PiResponseAgent] ⚠ Smoke test failed - no result")
484
- return False
485
- except Exception as e:
486
- logging.error(f"[PiResponseAgent] ✗ Smoke test failed: {e}")
487
- return False
488
-
489
- def is_ready(self):
490
- """Check if agent is ready"""
491
- return self.inflection_ai_available
492
-
493
- class EmotionalStateAgent:
494
- """Agent responsible for managing and updating emotional state"""
495
-
496
- def __init__(self, initial_state=None, config=None):
497
- self.config = config or MODEL_CONFIG or {}
498
- self.emotional_state = initial_state or {"joy": 0.2, "sadness": 0.2, "anger": 0.2, "fear": 0.2, "curiosity": 0.2}
499
- self.learning_rate = 0.05
500
- self.quantum_random_available = False
501
- self.quantum_api_key = None
502
- self._initialize_quantum()
503
-
504
- def _initialize_quantum(self):
505
- """Initialize quantum randomness availability"""
506
- quantum_key = os.getenv("ANU_QUANTUM_API_KEY")
507
- if quantum_key:
508
- self.quantum_api_key = quantum_key
509
- self.quantum_random_available = True
510
- logging.info("[EmotionalStateAgent] ✓ Quantum randomness available")
511
- else:
512
- logging.warning("[EmotionalStateAgent] Quantum randomness unavailable")
513
-
514
- def get_quantum_random_float(self, min_val=0.0, max_val=1.0):
515
- """Get a quantum random float between min_val and max_val"""
516
- if not self.quantum_random_available:
517
- return random.uniform(min_val, max_val)
518
-
519
- try:
520
- quantum_config = self.config.get('quantum', {}) if self.config else {}
521
- url = quantum_config.get('api_endpoint', 'https://api.quantumnumbers.anu.edu.au')
522
- headers = {"x-api-key": self.quantum_api_key}
523
- params = {"length": 1, "type": "uint8"}
524
-
525
- response = requests.get(url, headers=headers, params=params, timeout=10)
526
-
527
- if response.status_code == 200:
528
- result = response.json()
529
- if result.get('success') and 'data' in result and len(result['data']) > 0:
530
- normalized = result['data'][0] / 255.0
531
- return min_val + (max_val - min_val) * normalized
532
- except Exception as e:
533
- logging.warning(f"[EmotionalStateAgent] Quantum API failed: {e}")
534
-
535
- return random.uniform(min_val, max_val)
536
-
537
- def update_with_sentiment(self, sentiment_score):
538
- """Update emotional state based on sentiment"""
539
- # Enhanced Emotion Update (decay and normalization with quantum randomness)
540
- decay_factor = 0.9
541
- if self.quantum_random_available:
542
- quantum_decay_variation = self.get_quantum_random_float(0.85, 0.95)
543
- decay_factor = quantum_decay_variation
544
-
545
- for emotion in self.emotional_state:
546
- # Decay emotions (more realistic fading with quantum variation)
547
- self.emotional_state[emotion] *= decay_factor
548
- # Normalize
549
- self.emotional_state[emotion] = max(0.0, min(1.0, self.emotional_state[emotion]))
550
-
551
- # Apply sentiment with quantum-enhanced learning rate variation
552
- learning_rate = self.learning_rate
553
- if self.quantum_random_available:
554
- quantum_lr_variation = self.get_quantum_random_float(0.03, 0.07)
555
- learning_rate = quantum_lr_variation
556
-
557
- self.emotional_state["joy"] += sentiment_score * learning_rate
558
- self.emotional_state["sadness"] -= sentiment_score * learning_rate
559
-
560
- # Add quantum randomness to curiosity (making responses more unpredictable)
561
- if self.quantum_random_available:
562
- quantum_curiosity_boost = self.get_quantum_random_float(-0.05, 0.05)
563
- self.emotional_state["curiosity"] = max(0.0, min(1.0,
564
- self.emotional_state["curiosity"] + quantum_curiosity_boost))
565
-
566
- # Re-normalize
567
- total_emotion = sum(self.emotional_state.values())
568
- for emotion in self.emotional_state:
569
- self.emotional_state[emotion] = self.emotional_state[emotion] / total_emotion if total_emotion > 0 else 0.2
570
-
571
- logging.info(f"[EmotionalStateAgent] Updated emotional state: {self.emotional_state}")
572
- return self.emotional_state
573
-
574
- def get_state(self):
575
- """Get current emotional state"""
576
- return self.emotional_state.copy()
577
-
578
- def smoke_test(self):
579
- """Perform smoke test to verify emotional state system is working"""
580
- try:
581
- # Test quantum randomness if available
582
- if self.quantum_random_available:
583
- test_float = self.get_quantum_random_float(0.0, 1.0)
584
- if not isinstance(test_float, float) or test_float < 0.0 or test_float > 1.0:
585
- logging.warning("[EmotionalStateAgent] ⚠ Smoke test failed - invalid quantum random")
586
- return False
587
-
588
- # Test state update
589
- initial_state = self.get_state().copy()
590
- updated_state = self.update_with_sentiment(0.5)
591
- if updated_state and isinstance(updated_state, dict):
592
- logging.info("[EmotionalStateAgent] ✓ Smoke test passed")
593
- return True
594
- else:
595
- logging.warning("[EmotionalStateAgent] ⚠ Smoke test failed - invalid state")
596
- return False
597
- except Exception as e:
598
- logging.error(f"[EmotionalStateAgent] ✗ Smoke test failed: {e}")
599
- return False
600
-
601
- def is_ready(self):
602
- """Check if agent is ready"""
603
- return True # Emotional state is always ready
604
-
605
- class AzureTextAnalyticsAgent:
606
- """Agent responsible for Azure Text Analytics sentiment analysis"""
607
-
608
- def __init__(self, config=None):
609
- self.config = config or MODEL_CONFIG or {}
610
- self.azure_available = False
611
- self.client = None
612
- self._initialize()
613
-
614
- def _initialize(self):
615
- """Initialize Azure Text Analytics client"""
616
- try:
617
- from azure.ai.textanalytics import TextAnalyticsClient
618
- from azure.core.credentials import AzureKeyCredential
619
-
620
- key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")
621
- endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
622
-
623
- if key and endpoint:
624
- try:
625
- credential = AzureKeyCredential(key)
626
- self.client = TextAnalyticsClient(endpoint=endpoint, credential=credential)
627
- self.azure_available = True
628
- logging.info("[AzureTextAnalyticsAgent] ✓ Initialized and ready")
629
- except Exception as e:
630
- logging.warning(f"[AzureTextAnalyticsAgent] Failed to create client: {e}")
631
- self.azure_available = False
632
- else:
633
- logging.warning("[AzureTextAnalyticsAgent] ✗ Azure credentials not found")
634
- self.azure_available = False
635
- except ImportError:
636
- logging.warning("[AzureTextAnalyticsAgent] ✗ Azure SDK not installed")
637
- self.azure_available = False
638
-
639
- def analyze(self, text):
640
- """Analyze sentiment using Azure Text Analytics"""
641
- if not self.azure_available or not self.client:
642
- return None
643
-
644
- try:
645
- result = self.client.analyze_sentiment(documents=[text])[0]
646
- if result.sentiment == 'positive':
647
- return result.confidence_scores.positive
648
- elif result.sentiment == 'negative':
649
- return -result.confidence_scores.negative
650
- else:
651
- return 0.0
652
- except Exception as e:
653
- logging.error(f"[AzureTextAnalyticsAgent] Error: {e}")
654
- return None
655
-
656
- def smoke_test(self):
657
- """Perform smoke test to verify Azure Text Analytics is working"""
658
- if not self.azure_available:
659
- return False
660
-
661
- try:
662
- test_text = "This is a test message for sentiment analysis."
663
- result = self.analyze(test_text)
664
- if result is not None:
665
- logging.info("[AzureTextAnalyticsAgent] ✓ Smoke test passed")
666
- return True
667
- else:
668
- logging.warning("[AzureTextAnalyticsAgent] ⚠ Smoke test failed - analyze returned None")
669
- return False
670
- except Exception as e:
671
- logging.error(f"[AzureTextAnalyticsAgent] ✗ Smoke test failed: {e}")
672
- return False
673
-
674
- def is_ready(self):
675
- """Check if agent is ready"""
676
- return self.azure_available
677
-
678
- class SentimentAgent:
679
- """Agent responsible for sentiment analysis (uses Azure, Hugging Face, or NLTK fallback)"""
680
-
681
- def __init__(self, config=None):
682
- self.config = config or MODEL_CONFIG or {}
683
- self.azure_agent = AzureTextAnalyticsAgent(config=self.config)
684
- self.sentiment_analyzer = None
685
- self.ready = False
686
- self._initialize()
687
-
688
- def _initialize(self):
689
- """Initialize sentiment analyzer"""
690
- # Try Azure first
691
- if self.azure_agent.is_ready():
692
- self.ready = True
693
- logging.info("[SentimentAgent] Using Azure Text Analytics")
694
- return
695
-
696
- # Fallback to Hugging Face
697
- sentiment_model = self.config.get('sentiment', {}).get('primary_model', 'distilbert/distilbert-base-uncased-finetuned-sst-2-english') if self.config else 'distilbert/distilbert-base-uncased-finetuned-sst-2-english'
698
-
699
- if transformers_available:
700
- try:
701
- logging.info("[SentimentAgent] Initializing Hugging Face sentiment analyzer...")
702
- self.sentiment_analyzer = pipeline("sentiment-analysis", model=sentiment_model)
703
- self.ready = True
704
- logging.info("[SentimentAgent] ✓ Initialized successfully")
705
- except Exception as e:
706
- logging.warning(f"[SentimentAgent] Hugging Face model failed: {e}, using fallback")
707
- self.sentiment_analyzer = None
708
- self.ready = True # Fallback available
709
- else:
710
- self.ready = True # Fallback available
711
-
712
- def analyze(self, text):
713
- """Analyze sentiment of text (tries Azure, then Hugging Face, then NLTK)"""
714
- # Try Azure first
715
- if self.azure_agent.is_ready():
716
- result = self.azure_agent.analyze(text)
717
- if result is not None:
718
- return result
719
-
720
- # Fallback to Hugging Face
721
- if self.sentiment_analyzer:
722
- try:
723
- result = self.sentiment_analyzer(text)[0]
724
- label = result['label'].lower()
725
- score = result['score']
726
-
727
- if 'positive' in label:
728
- return score
729
- elif 'negative' in label:
730
- return -score
731
- else:
732
- return 0.0
733
- except Exception as e:
734
- logging.error(f"[SentimentAgent] Error: {e}")
735
- return self._fallback_analyze(text)
736
- else:
737
- return self._fallback_analyze(text)
738
-
739
- def _fallback_analyze(self, text):
740
- """Fallback sentiment analysis using NLTK VADER"""
741
- try:
742
- from nltk.sentiment import SentimentIntensityAnalyzer
743
- analyzer = SentimentIntensityAnalyzer()
744
- scores = analyzer.polarity_scores(text)
745
- return scores['compound'] # Returns value between -1 and 1
746
- except Exception as e:
747
- logging.error(f"[SentimentAgent] Fallback failed: {e}")
748
- return 0.0
749
-
750
- def smoke_test(self):
751
- """Perform smoke test to verify sentiment analysis is working"""
752
- try:
753
- test_text = "I am happy and excited!"
754
- result = self.analyze(test_text)
755
- if result is not None and isinstance(result, (int, float)):
756
- logging.info("[SentimentAgent] ✓ Smoke test passed")
757
- return True
758
- else:
759
- logging.warning("[SentimentAgent] ⚠ Smoke test failed - invalid result")
760
- return False
761
- except Exception as e:
762
- logging.error(f"[SentimentAgent] ✗ Smoke test failed: {e}")
763
- return False
764
-
765
- def is_ready(self):
766
- """Check if agent is ready"""
767
- return self.ready
768
-
769
- # --- 1. AI Core ---
770
- class GalateaAI:
771
- def __init__(self):
772
- # Load model configuration first
773
- self.config = MODEL_CONFIG or {}
774
-
775
- self.knowledge_base = {}
776
- self.response_model = "A generic response" #Place Holder for the ML model
777
-
778
- # Conversation history for context
779
- self.conversation_history = [] # List of {"role": "user"/"assistant", "content": "..."}
780
- # Get max history length from config or use default
781
- self.max_history_length = self.config.get('conversation', {}).get('max_history_length', 20)
782
-
783
- # Initialize memory system
784
- logging.info("Initializing memory system (JSON)...")
785
- try:
786
- self.memory_system = MemorySystem(config=self.config)
787
- self.memory_system_ready = self.memory_system.is_ready()
788
- if not self.memory_system_ready:
789
- raise Exception("Memory system failed to initialize")
790
- logging.info("✓ Memory system initialized")
791
- except Exception as e:
792
- logging.error(f"Failed to initialize memory system: {e}")
793
- self.memory_system_ready = False
794
- raise
795
-
796
- # Initialize agents
797
- logging.info("Initializing agents...")
798
- self.memory_agent = MemoryAgent(self.memory_system, config=self.config)
799
- self.gemini_agent = GeminiThinkingAgent(config=self.config)
800
- self.pi_agent = PiResponseAgent(config=self.config)
801
- self.emotional_agent = EmotionalStateAgent(config=self.config)
802
- self.sentiment_agent = SentimentAgent(config=self.config)
803
-
804
- # Track initialization status
805
- self.memory_system_ready = self.memory_agent.is_ready()
806
- self.sentiment_analyzer_ready = self.sentiment_agent.is_ready()
807
- self.models_ready = self.gemini_agent.is_ready() or self.pi_agent.is_ready()
808
- self.api_keys_valid = self.gemini_agent.is_ready() or self.pi_agent.is_ready()
809
-
810
- # Legacy compatibility
811
- self.gemini_available = self.gemini_agent.is_ready()
812
- self.inflection_ai_available = self.pi_agent.is_ready()
813
- self.quantum_random_available = self.emotional_agent.quantum_random_available
814
-
815
- logging.info("✓ All agents initialized")
816
-
817
- def _check_pre_initialization(self):
818
- """Check if components were pre-initialized by initialize_galatea.py"""
819
- # Check if ChromaDB directory exists and has collection
820
- chromadb_path = "./chroma_db"
821
- if os.path.exists(chromadb_path):
822
- try:
823
- import chromadb
824
- from chromadb.config import Settings
825
- vector_db = chromadb.PersistentClient(
826
- path=chromadb_path,
827
- settings=Settings(anonymized_telemetry=False)
828
- )
829
- collection = vector_db.get_collection("galatea_memory")
830
- if collection:
831
- logging.info("✓ Pre-initialized ChromaDB detected")
832
- return True
833
- except Exception:
834
- pass
835
-
836
- # Check if JSON memory exists
837
- if os.path.exists("./memory.json"):
838
- logging.info("✓ Pre-initialized JSON memory detected")
839
- return True
840
-
841
- return False
842
-
843
- def is_fully_initialized(self):
844
- """Check if all components are fully initialized"""
845
- return (
846
- self.memory_system_ready and
847
- self.sentiment_analyzer_ready and
848
- self.models_ready and
849
- self.api_keys_valid
850
- )
851
-
852
- def get_initialization_status(self):
853
- """Get detailed initialization status"""
854
- smoke_tests = getattr(self, 'smoke_test_results', {})
855
- return {
856
- "memory_system": self.memory_system_ready,
857
- "sentiment_analyzer": self.sentiment_analyzer_ready,
858
- "models": self.models_ready,
859
- "api_keys": self.api_keys_valid,
860
- "gemini_available": self.gemini_agent.is_ready() if hasattr(self, 'gemini_agent') else False,
861
- "inflection_ai_available": self.pi_agent.is_ready() if hasattr(self, 'pi_agent') else False,
862
- "azure_text_analytics_available": self.sentiment_agent.azure_agent.is_ready() if hasattr(self, 'sentiment_agent') else False,
863
- "smoke_tests": smoke_tests,
864
- "fully_initialized": self.is_fully_initialized()
865
- }
866
-
867
- @property
868
- def emotional_state(self):
869
- """Get current emotional state from EmotionalStateAgent"""
870
- return self.emotional_agent.get_state() if hasattr(self, 'emotional_agent') else {"joy": 0.2, "sadness": 0.2, "anger": 0.2, "fear": 0.2, "curiosity": 0.2}
871
-
872
- def initialize_sentiment_analyzer(self):
873
- """Initialize sentiment analysis with fallback options"""
874
- self.sentiment_analyzer_ready = False
875
- # Get sentiment model from config
876
- sentiment_model = self.config.get('sentiment', {}).get('primary_model', 'distilbert/distilbert-base-uncased-finetuned-sst-2-english') if self.config else 'distilbert/distilbert-base-uncased-finetuned-sst-2-english'
877
-
878
- if transformers_available:
879
- try:
880
- logging.info("Attempting to initialize Hugging Face sentiment analyzer")
881
- # Try to initialize the pipeline with specific parameters
882
- self.sentiment_analyzer = pipeline(
883
- "sentiment-analysis",
884
- model=sentiment_model
885
- )
886
- self.sentiment_analyzer_ready = True
887
- logging.info("✓ Hugging Face sentiment analyzer loaded successfully")
888
- except Exception as e:
889
- logging.error(f"Failed to initialize Hugging Face sentiment analyzer: {e}")
890
- self.sentiment_analyzer = None
891
- # Still mark as ready since we have fallback
892
- self.sentiment_analyzer_ready = True
893
- logging.info("✓ Using fallback sentiment analyzer")
894
- else:
895
- self.sentiment_analyzer = None
896
- self.sentiment_analyzer_ready = True # Fallback available
897
- logging.info("✓ Using fallback sentiment analyzer")
898
-
899
- def analyze_sentiment(self, text):
900
- # Use Hugging Face if available
901
- if self.sentiment_analyzer is not None:
902
- try:
903
- result = self.sentiment_analyzer(text)[0]
904
- sentiment = result['label']
905
- score = result['score']
906
-
907
- if sentiment == 'POSITIVE':
908
- return score
909
- else:
910
- return -score
911
- except Exception as e:
912
- logging.error(f"Error in sentiment analysis: {e}")
913
- # Fall back to simple analysis
914
-
915
- # Simple fallback sentiment analysis
916
- positive_words = ['good', 'great', 'excellent', 'happy', 'joy', 'love', 'like', 'wonderful']
917
- negative_words = ['bad', 'terrible', 'sad', 'hate', 'dislike', 'awful', 'poor', 'angry']
918
-
919
- words = text.lower().split()
920
- sentiment_score = 0.0
921
-
922
- for word in words:
923
- if word in positive_words:
924
- sentiment_score += 0.2
925
- elif word in negative_words:
926
- sentiment_score -= 0.2
927
-
928
- return max(-1.0, min(1.0, sentiment_score)) # Clamp between -1 and 1
929
-
930
- def initialize_litellm(self):
931
- """Initialize LiteLLM for unified model management"""
932
- self.gemini_available = False
933
- self.inflection_ai_available = False
934
- self.quantum_random_available = False
935
- self.models_ready = False
936
- self.api_keys_valid = False
937
-
938
- # Check for Gemini API key
939
- gemini_key = os.getenv("GEMINI_API_KEY")
940
- if gemini_key:
941
- os.environ["GEMINI_API_KEY"] = gemini_key
942
- self.gemini_available = True
943
- logging.info("✓ Gemini API key found - Gemini models available via LiteLLM")
944
- else:
945
- logging.warning("GEMINI_API_KEY not found - Gemini models unavailable")
946
-
947
- # Check for Inflection AI API key
948
- inflection_key = os.getenv("INFLECTION_AI_API_KEY")
949
- if inflection_key:
950
- self.inflection_ai_api_key = inflection_key
951
- self.inflection_ai_available = True
952
- logging.info("✓ Inflection AI API key found - Pi-3.1 model available")
953
- else:
954
- logging.warning("INFLECTION_AI_API_KEY not found - Pi-3.1 model unavailable")
955
-
956
- # Check for Quantum Random Numbers API key
957
- quantum_key = os.getenv("ANU_QUANTUM_API_KEY")
958
- if quantum_key:
959
- self.quantum_api_key = quantum_key
960
- self.quantum_random_available = True
961
- logging.info("✓ ANU Quantum Numbers API key found - Quantum randomness available")
962
- else:
963
- logging.warning("ANU_QUANTUM_API_KEY not found - Quantum randomness unavailable")
964
-
965
- # Verify API keys are valid (at least one model API key must be present)
966
- self.api_keys_valid = self.gemini_available or self.inflection_ai_available
967
- if self.api_keys_valid:
968
- logging.info("✓ API keys validated - at least one model API key is available")
969
- else:
970
- logging.error("✗ No valid API keys found - models unavailable")
971
-
972
- # Models are ready if at least one is available
973
- self.models_ready = self.gemini_available or self.inflection_ai_available
974
- if self.models_ready:
975
- logging.info("✓ Models ready for use")
976
- else:
977
- logging.warning("⚠ No models available")
978
-
979
- def get_quantum_random_numbers(self, length=None, number_type=None):
980
- """Fetch quantum random numbers from ANU Quantum Numbers API"""
981
- if not self.quantum_random_available:
982
- logging.warning("Quantum random numbers unavailable, using fallback")
983
- return None
984
-
985
- # Get defaults from config
986
- quantum_config = self.config.get('quantum', {}) if self.config else {}
987
- if length is None:
988
- length = quantum_config.get('default_length', 128)
989
- if number_type is None:
990
- number_type = quantum_config.get('default_type', 'uint8')
991
-
992
- try:
993
- url = quantum_config.get('api_endpoint', 'https://api.quantumnumbers.anu.edu.au')
994
- headers = {
995
- "x-api-key": self.quantum_api_key
996
- }
997
- params = {
998
- "length": length,
999
- "type": number_type
1000
- }
1001
-
1002
- response = requests.get(url, headers=headers, params=params, timeout=10)
1003
-
1004
- if response.status_code == 200:
1005
- result = response.json()
1006
- if result.get('success') and 'data' in result:
1007
- logging.info(f"✓ Retrieved {len(result['data'])} quantum random numbers")
1008
- return result['data']
1009
- else:
1010
- logging.warning("Quantum API returned success but no data")
1011
- return None
1012
- else:
1013
- logging.error(f"Quantum API returned status code {response.status_code}: {response.text}")
1014
- return None
1015
-
1016
- except Exception as e:
1017
- logging.error(f"Error fetching quantum random numbers: {e}")
1018
- return None
1019
-
1020
- def get_quantum_random_float(self, min_val=0.0, max_val=1.0):
1021
- """Get a quantum random float between min_val and max_val"""
1022
- quantum_nums = self.get_quantum_random_numbers(length=1, number_type='uint8')
1023
- if quantum_nums and len(quantum_nums) > 0:
1024
- # Normalize uint8 (0-255) to float range
1025
- normalized = quantum_nums[0] / 255.0
1026
- return min_val + (max_val - min_val) * normalized
1027
- # Fallback to regular random
1028
- return random.uniform(min_val, max_val)
1029
-
1030
- def call_inflection_ai(self, user_input, emotional_state, thinking_context=None, conversation_history=None, retrieved_memories=None):
1031
- """Call Inflection AI Pi-3.1 model API with conversation context, thinking insights, and retrieved memories"""
1032
- if not self.inflection_ai_available:
1033
- return None
1034
-
1035
- try:
1036
- # Create context with emotional state
1037
- emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
1038
-
1039
- # Format the request according to Inflection AI API
1040
- # Get endpoint and config from YAML
1041
- inflection_config = self.config.get('inflection_ai', {}) if self.config else {}
1042
- url = inflection_config.get('api_endpoint', 'https://api.inflection.ai/external/api/inference')
1043
- model_config = inflection_config.get('model_config', 'Pi-3.1')
1044
-
1045
- headers = {
1046
- "Authorization": f"Bearer {self.inflection_ai_api_key}",
1047
- "Content-Type": "application/json"
1048
- }
1049
-
1050
- # Build comprehensive context with thinking insights, conversation history, and retrieved memories
1051
- context_parts = []
1052
-
1053
- # Base system context
1054
- base_context = f"You are Galatea, an AI assistant with the following emotional state: {emotions_text}. Respond in character as Galatea. Keep your response concise (under 50 words) and reflect your emotional state in your tone."
1055
-
1056
- # Add thinking context from Gemini if available
1057
- if thinking_context:
1058
- base_context += f"\n\nInternal analysis: {thinking_context}"
1059
-
1060
- # Add retrieved memories if available
1061
- if retrieved_memories and len(retrieved_memories) > 0:
1062
- memory_text = "\n\nRelevant context from past conversations:\n"
1063
- for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
1064
- memory_text += f"{i}. {memory['text'][:150]}...\n"
1065
- base_context += memory_text
1066
-
1067
- # Add conversation history context
1068
- if conversation_history and len(conversation_history) > 0:
1069
- recent_history = conversation_history[-4:] # Last 2 exchanges
1070
- history_text = "\n\nRecent conversation context:\n"
1071
- for msg in recent_history:
1072
- role = "User" if msg["role"] == "user" else "You (Galatea)"
1073
- history_text += f"{role}: {msg['content']}\n"
1074
- base_context += history_text
1075
-
1076
- context_parts.append({
1077
- "text": base_context,
1078
- "type": "System"
1079
- })
1080
-
1081
- # Add conversation history as context messages
1082
- if conversation_history and len(conversation_history) > 4:
1083
- # Add older messages as context (but not the most recent ones we already included)
1084
- for msg in conversation_history[-8:-4]:
1085
- context_parts.append({
1086
- "text": msg["content"],
1087
- "type": "Human" if msg["role"] == "user" else "Assistant"
1088
- })
1089
-
1090
- # Add current user input
1091
- context_parts.append({
1092
- "text": user_input,
1093
- "type": "Human"
1094
- })
1095
-
1096
- data = {
1097
- "context": context_parts,
1098
- "config": model_config
1099
- }
1100
-
1101
- logging.info("Sending request to Inflection AI Pi-3.1 API")
1102
- response = requests.post(url, headers=headers, json=data, timeout=30)
1103
-
1104
- if response.status_code == 200:
1105
- result = response.json()
1106
- # Extract the response text from the API response
1107
- if isinstance(result, dict):
1108
- if 'output' in result:
1109
- text = result['output']
1110
- elif 'text' in result:
1111
- text = result['text']
1112
- elif 'response' in result:
1113
- text = result['response']
1114
- elif 'message' in result:
1115
- text = result['message']
1116
- else:
1117
- text = str(result)
1118
- elif isinstance(result, str):
1119
- text = result
1120
- else:
1121
- text = str(result)
1122
-
1123
- logging.info("Inflection AI response received successfully")
1124
- return text.strip()
1125
- else:
1126
- logging.error(f"Inflection AI API returned status code {response.status_code}: {response.text}")
1127
- return None
1128
-
1129
- except Exception as e:
1130
- logging.error(f"Error calling Inflection AI API: {e}")
1131
- logging.error(f"Full error details: {type(e).__name__}: {str(e)}")
1132
- return None
1133
-
1134
- def gemini_think(self, user_input, emotional_state, conversation_history, retrieved_memories=None):
1135
- """Use Gemini to think about and analyze the conversation context with retrieved memories"""
1136
- if not self.gemini_available:
1137
- return None
1138
-
1139
- try:
1140
- # Build thinking prompt with conversation context
1141
- emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
1142
-
1143
- # Prepare conversation context for thinking
1144
- context_summary = ""
1145
- if conversation_history:
1146
- recent_history = conversation_history[-6:] # Last 3 exchanges
1147
- context_summary = "\nRecent conversation:\n"
1148
- for msg in recent_history:
1149
- role = "User" if msg["role"] == "user" else "Galatea"
1150
- context_summary += f"{role}: {msg['content']}\n"
1151
-
1152
- # Add retrieved memories if available
1153
- memory_context = ""
1154
- if retrieved_memories and len(retrieved_memories) > 0:
1155
- memory_context = "\n\nRelevant memories from past conversations:\n"
1156
- for i, memory in enumerate(retrieved_memories[:3], 1): # Top 3 most relevant
1157
- memory_context += f"{i}. {memory['text'][:200]}...\n"
1158
-
1159
- thinking_prompt = f"""You are the internal reasoning system for Galatea, an AI assistant.
1160
-
1161
- Current emotional state: {emotions_text}
1162
- {context_summary}
1163
- {memory_context}
1164
- Current user message: "{user_input}"
1165
-
1166
- Analyze this conversation and provide:
1167
- 1. Key insights about what the user is asking or discussing
1168
- 2. Important context from the conversation history and retrieved memories
1169
- 3. How Galatea should respond emotionally and contextually
1170
- 4. Any important details to remember or reference
1171
-
1172
- Keep your analysis concise (2-3 sentences). Focus on what matters for crafting an appropriate response."""
1173
-
1174
- messages = [
1175
- {"role": "system", "content": "You are an internal reasoning system. Analyze conversations and provide insights."},
1176
- {"role": "user", "content": thinking_prompt}
1177
- ]
1178
-
1179
- logging.info("Using Gemini for thinking/analysis")
1180
-
1181
- # Get Gemini models from config
1182
- gemini_config = self.config.get('gemini', {}) if self.config else {}
1183
- gemini_models = gemini_config.get('thinking_models', [
1184
- "gemini/gemini-2.0-flash-exp",
1185
- "gemini/gemini-2.0-flash",
1186
- "gemini/gemini-1.5-flash-latest",
1187
- "gemini/gemini-1.5-flash"
1188
- ])
1189
-
1190
- # Get thinking settings from config
1191
- thinking_config = gemini_config.get('thinking', {})
1192
- thinking_temp = thinking_config.get('temperature', 0.5)
1193
- thinking_max_tokens = thinking_config.get('max_tokens', 200)
1194
-
1195
- for model in gemini_models:
1196
- try:
1197
- response = completion(
1198
- model=model,
1199
- messages=messages,
1200
- temperature=thinking_temp,
1201
- max_tokens=thinking_max_tokens
1202
- )
1203
-
1204
- if response and 'choices' in response and len(response['choices']) > 0:
1205
- thinking_result = response['choices'][0]['message']['content']
1206
- logging.info("✓ Gemini thinking completed")
1207
- return thinking_result.strip()
1208
- except Exception as e:
1209
- logging.warning(f"Gemini model {model} failed for thinking: {e}, trying next...")
1210
- continue
1211
-
1212
- logging.error("All Gemini models failed for thinking")
1213
- return None
1214
-
1215
- except Exception as e:
1216
- logging.error(f"Error in Gemini thinking: {e}")
1217
- return None
1218
-
1219
- def update_conversation_history(self, user_input, assistant_response):
1220
- """Update conversation history, maintaining max length"""
1221
- # Add user message
1222
- self.conversation_history.append({"role": "user", "content": user_input})
1223
- # Add assistant response
1224
- self.conversation_history.append({"role": "assistant", "content": assistant_response})
1225
-
1226
- # Trim history if too long
1227
- if len(self.conversation_history) > self.max_history_length:
1228
- # Keep the most recent messages
1229
- self.conversation_history = self.conversation_history[-self.max_history_length:]
1230
-
1231
- def store_important_memory(self, user_input, assistant_response, intent, keywords):
1232
- """Store important conversation snippets in memory system"""
1233
- try:
1234
- # Determine if this conversation is worth storing
1235
- # Store if: question, contains important keywords, or is a significant exchange
1236
- should_store = False
1237
- memory_type = "conversation"
1238
-
1239
- if intent == "question":
1240
- should_store = True
1241
- memory_type = "question"
1242
- elif len(keywords) > 3: # Substantial conversation
1243
- should_store = True
1244
- elif any(keyword in ["remember", "important", "note", "save"] for keyword in keywords):
1245
- should_store = True
1246
- memory_type = "important"
1247
-
1248
- if should_store:
1249
- # Create a memory entry combining user input and response
1250
- memory_text = f"User: {user_input}\nGalatea: {assistant_response}"
1251
-
1252
- metadata = {
1253
- "intent": intent,
1254
- "keywords": keywords[:5], # Top 5 keywords
1255
- "emotions": {k: round(v, 2) for k, v in self.emotional_state.items()}
1256
- }
1257
-
1258
- # Store in memory system (both ChromaDB and JSON)
1259
- self.memory_system.store_memory(
1260
- text=memory_text,
1261
- metadata=metadata,
1262
- memory_type=memory_type
1263
- )
1264
- logging.info(f"Stored important memory: {memory_type} - {user_input[:50]}...")
1265
- except Exception as e:
1266
- logging.error(f"Error storing memory: {e}")
1267
-
1268
- def is_thinking_mode(self, intent, user_input, keywords):
1269
- """Determine if the request requires thinking mode (use Gemini for complex reasoning)"""
1270
- # Always use thinking mode now - Gemini always thinks, Pi-3.1 always responds
1271
- return True
1272
-
1273
- def process_input(self, user_input):
1274
- """Process user input through the agent chain workflow: PHI(GEMINI(User inputs, read with past memory), emotionalstate)"""
1275
- # Step 1: Analyze sentiment
1276
- sentiment_score = self.sentiment_agent.analyze(user_input)
1277
-
1278
- # Step 2: Extract keywords and determine intent
1279
- keywords = self.extract_keywords(user_input)
1280
- intent = self.determine_intent(user_input)
1281
-
1282
- # Step 3: Update emotional state based on sentiment
1283
- self.emotional_agent.update_with_sentiment(sentiment_score)
1284
- current_emotional_state = self.emotional_agent.get_state()
1285
-
1286
- # Step 4: Retrieve memories
1287
- retrieved_memories = self.memory_agent.retrieve_memories(user_input)
1288
-
1289
- # Step 5: Chain workflow: PHI(GEMINI(User inputs, read with past memory), emotionalstate)
1290
- # Step 5a: GEMINI(User inputs, read with past memory)
1291
- thinking_context = self.gemini_agent.think(
1292
- user_input,
1293
- current_emotional_state,
1294
- self.conversation_history,
1295
- retrieved_memories=retrieved_memories
1296
- )
1297
-
1298
- # Step 5b: PHI(GEMINI result, emotionalstate)
1299
- response = self.pi_agent.respond(
1300
- user_input,
1301
- current_emotional_state,
1302
- thinking_context=thinking_context,
1303
- conversation_history=self.conversation_history,
1304
- retrieved_memories=retrieved_memories
1305
- )
1306
-
1307
- # Fallback if Pi-3.1 is not available
1308
- if not response and self.gemini_agent.is_ready():
1309
- response = self._gemini_fallback_response(
1310
- user_input,
1311
- current_emotional_state,
1312
- thinking_context,
1313
- self.conversation_history
1314
- )
1315
-
1316
- # If still no response, use fallback
1317
- if not response:
1318
- response = self._generate_fallback_response(intent, keywords, current_emotional_state, user_input)
1319
-
1320
- # Update conversation history
1321
- if response:
1322
- self.update_conversation_history(user_input, response)
1323
-
1324
- # Store important memories
1325
- self._store_important_memory(user_input, response, intent, keywords)
1326
-
1327
- # Update knowledge base
1328
- self.update_knowledge(keywords, user_input)
1329
-
1330
- return response
1331
- return response
1332
-
1333
- def extract_keywords(self, text):
1334
- try:
1335
- # Try using NLTK's tokenizer
1336
- tokens = nltk.word_tokenize(text)
1337
- keywords = [word.lower() for word in tokens if word.isalnum()]
1338
- return keywords
1339
- except Exception:
1340
- # Fall back to a simple split-based approach if NLTK fails
1341
- words = text.split()
1342
- # Clean up words (remove punctuation)
1343
- keywords = [word.lower().strip('.,!?;:()[]{}""\'') for word in words]
1344
- # Filter out empty strings
1345
- keywords = [word for word in keywords if word and word.isalnum()]
1346
- return keywords
1347
-
1348
- def determine_intent(self, text):
1349
- # More comprehensive intent recognition (using keywords)
1350
- text = text.lower()
1351
- if "what" in text or "how" in text or "why" in text:
1352
- return "question"
1353
- elif "thank" in text:
1354
- return "gratitude"
1355
- elif "goodbye" in text or "bye" in text:
1356
- return "farewell"
1357
- else:
1358
- return "statement"
1359
-
1360
- def _gemini_fallback_response(self, user_input, emotional_state, thinking_context, conversation_history):
1361
- """Fallback response using Gemini directly"""
1362
- try:
1363
- logging.info("[GalateaAI] Using Gemini fallback for direct response")
1364
- emotions_text = ", ".join([f"{emotion}: {value:.2f}" for emotion, value in emotional_state.items()])
1365
-
1366
- # Build messages with conversation history
1367
- messages = []
1368
- # Get system prompts from config
1369
- system_prompts = self.config.get('system_prompts', {}) if self.config else {}
1370
- identity = system_prompts.get('galatea_identity', 'You are Galatea, an AI assistant with emotional awareness and memory.')
1371
- style = system_prompts.get('response_style', 'Respond in character, keeping responses concise (under 50 words).')
1372
-
1373
- messages.append({
1374
- "role": "system",
1375
- "content": f"{identity} Your emotional state: {emotions_text}. {style}"
1376
- })
1377
-
1378
- # Get fallback settings from config
1379
- gemini_config = self.config.get('gemini', {}) if self.config else {}
1380
- fallback_config = gemini_config.get('fallback', {})
1381
- max_history_exchanges = fallback_config.get('max_history_exchanges', 8)
1382
- fallback_model = gemini_config.get('fallback_model', 'gemini/gemini-1.5-flash')
1383
-
1384
- # Add conversation history
1385
- if conversation_history:
1386
- for msg in conversation_history[-max_history_exchanges:]:
1387
- messages.append({
1388
- "role": msg["role"],
1389
- "content": msg["content"]
1390
- })
1391
-
1392
- # Add current user input
1393
- messages.append({
1394
- "role": "user",
1395
- "content": user_input
1396
- })
1397
-
1398
- # Add thinking context if available
1399
- if thinking_context:
1400
- messages.append({
1401
- "role": "system",
1402
- "content": f"Internal analysis: {thinking_context}"
1403
- })
1404
-
1405
- # Use quantum randomness for temperature
1406
- base_temperature = fallback_config.get('temperature_base', 0.7)
1407
- temp_range = fallback_config.get('temperature_variation_range', [0.0, 0.3])
1408
- quantum_temp_variation = self.emotional_agent.get_quantum_random_float(temp_range[0], temp_range[1])
1409
- temperature = base_temperature + quantum_temp_variation
1410
-
1411
- response = completion(
1412
- model=fallback_model,
1413
- messages=messages,
1414
- temperature=temperature,
1415
- max_tokens=fallback_config.get('max_tokens', 150)
1416
- )
1417
-
1418
- if response and 'choices' in response and len(response['choices']) > 0:
1419
- text = response['choices'][0]['message']['content']
1420
- logging.info("[GalateaAI] ✓ Gemini fallback response received")
1421
- return text.strip()
1422
- except Exception as e:
1423
- logging.error(f"[GalateaAI] Gemini fallback failed: {e}")
1424
-
1425
- return None
1426
-
1427
- def _generate_fallback_response(self, intent, keywords, emotional_state, original_input):
1428
- """Generate final fallback response when all systems fail"""
1429
- logging.info(f"[GalateaAI] Using final fallback response. Intent: {intent}, Keywords: {keywords[:5]}")
1430
-
1431
- # Determine which systems are not working
1432
- unavailable_systems = []
1433
- system_descriptions = {
1434
- 'inflection_ai': ('Pi-3.1', 'my conversation model'),
1435
- 'gemini': ('Gemini', 'my thinking model'),
1436
- 'quantum_random': ('Quantum Random Numbers API', 'my quantum randomness source'),
1437
- 'memory': ('Memory System', 'my memory system')
1438
- }
1439
-
1440
- if not getattr(self, 'inflection_ai_available', False):
1441
- unavailable_systems.append(system_descriptions['inflection_ai'])
1442
- if not getattr(self, 'gemini_available', False):
1443
- unavailable_systems.append(system_descriptions['gemini'])
1444
- if not getattr(self, 'quantum_random_available', False):
1445
- unavailable_systems.append(system_descriptions['quantum_random'])
1446
- if not getattr(self, 'memory_system_ready', False):
1447
- unavailable_systems.append(system_descriptions['memory'])
1448
-
1449
- # Generate natural, conversational error message
1450
- if unavailable_systems:
1451
- if len(unavailable_systems) == 1:
1452
- system_name, system_desc = unavailable_systems[0]
1453
- system_msg = f"{system_desc} ({system_name}) is not working right now"
1454
- elif len(unavailable_systems) == 2:
1455
- sys1_name, sys1_desc = unavailable_systems[0]
1456
- sys2_name, sys2_desc = unavailable_systems[1]
1457
- system_msg = f"{sys1_desc} ({sys1_name}) and {sys2_desc} ({sys2_name}) are not working"
1458
- else:
1459
- # For 3+ systems, list them naturally
1460
- system_list = []
1461
- for sys_name, sys_desc in unavailable_systems[:-1]:
1462
- system_list.append(f"{sys_desc} ({sys_name})")
1463
- last_name, last_desc = unavailable_systems[-1]
1464
- system_msg = f"{', '.join(system_list)}, and {last_desc} ({last_name}) are not working"
1465
- else:
1466
- system_msg = "some of my systems encountered an error"
1467
-
1468
- fallback_response = None
1469
- if intent == "question":
1470
- if "you" in keywords:
1471
- fallback_response = f"I'm still learning about myself, but I'm having technical difficulties. {system_msg.capitalize()}. I apologize for the inconvenience."
1472
- else:
1473
- fallback_response = f"I'd love to help with that, but {system_msg}. Please check my system status or try again in a moment."
1474
- elif intent == "gratitude":
1475
- fallback_response = "You're welcome!"
1476
- else:
1477
- if unavailable_systems:
1478
- fallback_response = f"I hear you, but {system_msg}. This might be due to missing API keys or network issues. Please check my configuration."
1479
- else:
1480
- fallback_response = "I hear you, though my full AI capabilities aren't active right now. Please check if my API keys are configured."
1481
-
1482
- # Update conversation history even for fallback
1483
- if fallback_response:
1484
- self.update_conversation_history(original_input, fallback_response)
1485
-
1486
- return fallback_response
1487
-
1488
- def update_knowledge(self, keywords, user_input):
1489
- #for new key words remember them
1490
- for keyword in keywords:
1491
- if keyword not in self.knowledge_base:
1492
- self.knowledge_base[keyword] = user_input
1493
-
1494
-
1495
- # --- 2. Dialogue Engine ---
1496
- class DialogueEngine:
1497
- def __init__(self, ai_core):
1498
- self.ai_core = ai_core
1499
- self.last_user_message = ""
1500
-
1501
- def get_response(self, user_input):
1502
- # Store the last message for sentiment analysis
1503
- self.last_user_message = user_input
1504
-
1505
- ai_response = self.ai_core.process_input(user_input)
1506
- styled_response = self.apply_style(ai_response, self.ai_core.emotional_state)
1507
- return styled_response
1508
-
1509
- def apply_style(self, text, emotional_state):
1510
- style = self.get_style(emotional_state)
1511
- #selects styles based on emotions
1512
- #add style to text
1513
- styled_text = text # Remove the style suffix to make responses cleaner
1514
- return styled_text
1515
-
1516
- def get_style(self, emotional_state):
1517
- #determine style based on the state of the AI
1518
- return "neutral"
1519
-
1520
- # --- 3. Avatar Engine ---
1521
-
1522
- class AvatarShape(Enum): #create shape types for the avatar
1523
- CIRCLE = "Circle"
1524
- TRIANGLE = "Triangle"
1525
- SQUARE = "Square"
1526
-
1527
- class AvatarEngine:
1528
- def __init__(self):
1529
- self.avatar_model = "Circle" # Start with a basic shape
1530
- self.expression_parameters = {}
1531
-
1532
- def update_avatar(self, emotional_state):
1533
- # Map emotions to avatar parameters (facial expressions, color)
1534
- joy_level = emotional_state["joy"]
1535
- sadness_level = emotional_state["sadness"]
1536
-
1537
- # Simple mapping (placeholder)
1538
- self.avatar_model = self.change_avatar_shape(joy_level, sadness_level)
1539
-
1540
- def change_avatar_shape(self, joy, sad):
1541
- #determine shape based on feelings
1542
- if joy > 0.5:
1543
- return AvatarShape.CIRCLE.value
1544
- elif sad > 0.5:
1545
- return AvatarShape.TRIANGLE.value
1546
- else:
1547
- return AvatarShape.SQUARE.value
1548
-
1549
- def render_avatar(self):
1550
- # Simple console rendering of the avatar state
1551
- print(f"Avatar shape: {self.avatar_model}")
1552
-
1553
- # REMOVE THE MAIN PROGRAM LOOP THAT BLOCKS EXECUTION
1554
- # This is critical - the code below was causing the issue
1555
- # by creating instances outside of the Flask app's control
1556
-
1557
- # instead, only run this if the script is executed directly
1558
- if __name__ == "__main__":
1559
- # Download NLTK data again before starting the main loop to ensure availability
1560
- nltk.download('punkt', quiet=True)
1561
-
1562
- try:
1563
- nltk.data.find("tokenizers/punkt")
1564
- except LookupError:
1565
- nltk.download('punkt')
1566
-
1567
- #Create
1568
- galatea_ai = GalateaAI()
1569
- dialogue_engine = DialogueEngine(galatea_ai)
1570
- avatar_engine = AvatarEngine()
1571
- avatar_engine.update_avatar(galatea_ai.emotional_state)
1572
- # Initial avatar rendering
1573
- avatar_engine.render_avatar()
1574
-
1575
- while True:
1576
- user_input = input("You: ")
1577
- response = dialogue_engine.get_response(user_input)
1578
- print(f"Galatea: {response}")
1579
-
1580
- avatar_engine.update_avatar(galatea_ai.emotional_state)
1581
- avatar_engine.render_avatar()