Raiff1982 commited on
Commit
7e6fb26
·
verified ·
1 Parent(s): 9095b13

Update codette_new.py

Browse files
Files changed (1) hide show
  1. codette_new.py +357 -327
codette_new.py CHANGED
@@ -1,327 +1,357 @@
1
- import logging
2
- import nltk
3
- import numpy as np
4
- from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
5
- from typing import List, Dict, Any, Optional
6
- from nltk.tokenize import word_tokenize
7
- import os
8
- import json
9
- from datetime import datetime
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
- # Download required NLTK data with error handling
14
- try:
15
- nltk.download('punkt', quiet=True)
16
- nltk.download('averaged_perceptron_tagger', quiet=True)
17
- nltk.download('wordnet', quiet=True)
18
- except Exception as e:
19
- logger.warning(f"NLTK download failed (this is non-critical): {e}")
20
-
21
- class Codette:
22
- def __init__(self, user_name="User"):
23
- self.user_name = user_name
24
- self.memory = []
25
- self.analyzer = SentimentIntensityAnalyzer()
26
- np.seterr(divide='ignore', invalid='ignore')
27
- # audit_log may rely on logging; ensure method exists before call
28
- self.context_memory = []
29
- self.daw_knowledge = self._initialize_daw_knowledge()
30
- self.recent_responses = []
31
- self.max_recent_responses = 20
32
- self.personality_modes = {
33
- 'technical_expert': 'precise_technical_professional',
34
- 'creative_mentor': 'inspirational_metaphorical_encouraging',
35
- 'practical_guide': 'direct_actionable_efficient',
36
- 'analytical_teacher': 'detailed_explanatory_educational',
37
- 'innovative_explorer': 'experimental_cutting_edge_forward_thinking'
38
- }
39
- self.current_personality = 'technical_expert'
40
- self.conversation_topics = []
41
- self.max_conversation_topics = 10
42
- self.has_music_knowledge_table = False
43
- self.has_music_knowledge_backup_table = False
44
- self.has_chat_history_table = False
45
- self.music_knowledge_table = 'music_knowledge'
46
- self.supabase_client = self._initialize_supabase()
47
- # Log after initialization
48
- try:
49
- self.audit_log("Codette initialized with FULL ML CAPABILITIES (no placeholders)", system=True)
50
- except Exception:
51
- logger.info("Codette initialized (audit log not available yet)")
52
-
53
- def _initialize_daw_knowledge(self) -> Dict[str, Any]:
54
- return {
55
- "frequency_ranges": {
56
- "sub_bass": (20, 60),
57
- "bass": (60, 250),
58
- "low_mid": (250, 500),
59
- "mid": (500, 2000),
60
- "high_mid": (2000, 4000),
61
- "presence": (4000, 6000),
62
- "brilliance": (6000, 20000)
63
- },
64
- "mixing_principles": {
65
- "gain_staging": "Set master fader to -6dB headroom before mixing. Individual tracks should peak around -12dB to -6dB.",
66
- "eq_fundamentals": "Cut before boost. Use high-pass filters to remove unnecessary low-end. EQ to fit tracks in the frequency spectrum, not in isolation.",
67
- "compression_strategy": "Start with 4:1 ratio, adjust attack/release based on transient content. Use parallel compression for drums.",
68
- "panning_technique": "Pan rhythmic elements for width, keep bass and kick centered. Use mid-side processing for stereo field control."
69
- },
70
- "problem_detection": {
71
- "muddy_mix": "Excessive energy in 200-500Hz range. Solution: High-pass filters on non-bass elements, surgical EQ cuts.",
72
- "harsh_highs": "Peak around 3-5kHz causing fatigue. Solution: Gentle EQ reduction, de-esser on vocals.",
73
- "weak_low_end": "Insufficient bass presence. Solution: Check phase relationships, ensure bass/kick complement each other.",
74
- "lack_of_depth": "Everything sounds flat. Solution: Use reverb/delay strategically, automate wet/dry mix."
75
- }
76
- }
77
-
78
- def respond(self, prompt: str) -> str:
79
- sentiment = self.analyze_sentiment(prompt)
80
- key_concepts = self.extract_key_concepts(prompt)
81
-
82
- self.memory.append({
83
- "prompt": prompt,
84
- "sentiment": sentiment,
85
- "concepts": key_concepts,
86
- "timestamp": datetime.now().isoformat()
87
- })
88
-
89
- is_daw_query = self._is_daw_query_ml(prompt, key_concepts)
90
- responses: List[str] = []
91
-
92
- if is_daw_query:
93
- daw_response = self._generate_daw_specific_response_ml(prompt, key_concepts, sentiment)
94
- responses.append(f"[DAW Expert] {daw_response}")
95
-
96
- technical_insight = self._generate_technical_insight_ml(key_concepts, sentiment)
97
- responses.append(f"[Technical] {technical_insight}")
98
- else:
99
- neural_insight = self._generate_neural_insight_ml(key_concepts, sentiment)
100
- responses.append(f"[Neural] {neural_insight}")
101
-
102
- logical_response = self._generate_logical_response_ml(key_concepts, sentiment)
103
- responses.append(f"[Logical] {logical_response}")
104
-
105
- creative_response = self._generate_creative_response_ml(key_concepts, sentiment)
106
- responses.append(f"[Creative] {creative_response}")
107
-
108
- try:
109
- full_response = "\n\n".join(responses)
110
- self.save_conversation_to_db(prompt, full_response)
111
- except Exception as e:
112
- logger.warning(f"Could not save conversation to DB: {e}")
113
-
114
- self.context_memory.append({
115
- 'input': prompt,
116
- 'concepts': key_concepts,
117
- 'sentiment': sentiment.get('compound', 0) if isinstance(sentiment, dict) else 0,
118
- 'is_daw': is_daw_query
119
- })
120
-
121
- return "\n\n".join(responses)
122
-
123
- def _is_daw_query_ml(self, prompt: str, concepts: List[str]) -> bool:
124
- daw_semantic_indicators = {
125
- 'audio_production', 'mixing', 'mastering', 'recording',
126
- 'eq', 'compression', 'reverb', 'delay', 'frequency',
127
- 'gain', 'volume', 'pan', 'stereo', 'track', 'plugin'
128
- }
129
- prompt_lower = prompt.lower()
130
- concept_set = set(concepts)
131
- return bool(daw_semantic_indicators & concept_set) or any(indicator in prompt_lower for indicator in ['mix', 'eq', 'compress', 'audio', 'track'])
132
-
133
- def _generate_daw_specific_response_ml(self, prompt: str, concepts: List[str], sentiment: Dict) -> str:
134
- prompt_lower = prompt.lower()
135
- if any(term in prompt_lower for term in ['gain', 'level', 'volume', 'loud']):
136
- return self.daw_knowledge['mixing_principles']['gain_staging']
137
- elif any(term in prompt_lower for term in ['eq', 'frequency', 'boost', 'cut']):
138
- return self.daw_knowledge['mixing_principles']['eq_fundamentals']
139
- elif any(term in prompt_lower for term in ['compress', 'ratio', 'attack', 'release']):
140
- return self.daw_knowledge['mixing_principles']['compression_strategy']
141
- elif any(term in prompt_lower for term in ['pan', 'stereo', 'width']):
142
- return self.daw_knowledge['mixing_principles']['panning_technique']
143
- elif any(term in prompt_lower for term in ['muddy', 'unclear', 'boomy']):
144
- return self.daw_knowledge['problem_detection']['muddy_mix']
145
- elif any(term in prompt_lower for term in ['harsh', 'bright', 'sibilant']):
146
- return self.daw_knowledge['problem_detection']['harsh_highs']
147
- elif any(term in prompt_lower for term in ['thin', 'weak bass', 'no low end']):
148
- return self.daw_knowledge['problem_detection']['weak_low_end']
149
- elif any(term in prompt_lower for term in ['flat', 'depth', 'dimension']):
150
- return self.daw_knowledge['problem_detection']['lack_of_depth']
151
- else:
152
- if isinstance(sentiment, dict) and sentiment.get('compound', 0) < 0:
153
- return "Identify the specific issue: frequency buildup, dynamic imbalance, or routing problem. Isolate and address systematically."
154
- else:
155
- return "Continue with gain staging, then EQ for balance, compression for control, and spatial effects for depth. Follow signal flow logically."
156
-
157
- def _generate_neural_insight_ml(self, concepts: List[str], sentiment: Dict) -> str:
158
- if not concepts:
159
- return "Neural analysis suggests exploring the pattern relationships within this context."
160
- primary_concept = concepts[0] if concepts else "concept"
161
- sentiment_polarity = "positive" if (isinstance(sentiment, dict) and sentiment.get('compound', 0) > 0) else "neutral" if (isinstance(sentiment, dict) and sentiment.get('compound', 0) == 0) else "analytical"
162
- return f"Pattern recognition analysis of '{primary_concept}' reveals {sentiment_polarity} associations across multiple domains. Neural networks suggest systematic exploration through interconnected relationships."
163
-
164
- def _generate_logical_response_ml(self, concepts: List[str], sentiment: Dict) -> str:
165
- if not concepts:
166
- return "Logical analysis requires structured evaluation of cause-effect relationships."
167
- primary_concept = concepts[0]
168
- return f"Structured analysis shows that '{primary_concept}' follows deterministic principles. Cause-effect mapping suggests systematic approach yields optimal outcomes."
169
-
170
- def _generate_creative_response_ml(self, concepts: List[str], sentiment: Dict) -> str:
171
- if not concepts:
172
- return "Creative synthesis reveals novel connections emerging from conceptual intersections."
173
- primary_concept = concepts[0]
174
- return f"Creative synthesis transforms '{primary_concept}' through multi-dimensional perspective shifts. Emergent patterns suggest innovative approaches through systematic exploration."
175
-
176
- def _generate_technical_insight_ml(self, concepts: List[str], sentiment: Dict) -> str:
177
- if not concepts:
178
- return "Technical analysis requires precise parameter identification and systematic adjustment."
179
- primary_concept = concepts[0]
180
- return f"Technical analysis of '{primary_concept}' indicates specific parameter optimization opportunities. Systematic calibration yields measurable improvements."
181
-
182
- def analyze_sentiment(self, text: str) -> Dict[str, float]:
183
- score = self.analyzer.polarity_scores(text)
184
- try:
185
- self.audit_log(f"Sentiment analysis: {score}")
186
- except Exception:
187
- logger.debug("audit_log unavailable during sentiment analysis")
188
- return score
189
-
190
- def extract_key_concepts(self, text: str) -> List[str]:
191
- try:
192
- tokens = word_tokenize(text.lower())
193
- concepts = [token for token in tokens if len(token) > 2 and token.isalpha()]
194
- return list(dict.fromkeys(concepts))[:5]
195
- except Exception as e:
196
- logger.warning(f"Could not extract concepts: {e}")
197
- return [w for w in text.lower().split() if len(w) > 2][:5]
198
-
199
- def audit_log(self, message: str, system: bool = False) -> None:
200
- source = "SYSTEM" if system else self.user_name
201
- logger.info(f"{source}: {message}")
202
-
203
- def _initialize_supabase(self):
204
- try:
205
- from supabase import create_client, Client
206
- supabase_url = (
207
- os.environ.get('VITE_SUPABASE_URL') or
208
- os.environ.get('SUPABASE_URL') or
209
- os.environ.get('NEXT_PUBLIC_SUPABASE_URL')
210
- )
211
- supabase_key = (
212
- os.environ.get('VITE_SUPABASE_ANON_KEY') or
213
- os.environ.get('SUPABASE_KEY') or
214
- os.environ.get('SUPABASE_SERVICE_ROLE_KEY') or
215
- os.environ.get('NEXT_PUBLIC_SUPABASE_ANON_KEY')
216
- )
217
- if supabase_url and supabase_key:
218
- client = create_client(supabase_url, supabase_key)
219
- logger.info("✅ Supabase client initialized")
220
- return client
221
- else:
222
- logger.warning("⚠️ Supabase credentials not found in environment")
223
- return None
224
- except Exception as e:
225
- logger.warning(f"⚠️ Could not initialize Supabase: {e}")
226
- return None
227
-
228
- def save_conversation_to_db(self, user_message: str, codette_response: str) -> None:
229
- if not self.supabase_client:
230
- return
231
- try:
232
- data = {
233
- "user_message": user_message,
234
- "codette_response": codette_response,
235
- "timestamp": datetime.now().isoformat(),
236
- "user_name": self.user_name
237
- }
238
- self.supabase_client.table('chat_history').insert(data).execute()
239
- logger.debug("Conversation saved to Supabase")
240
- except Exception as e:
241
- logger.debug(f"Could not save conversation: {e}")
242
-
243
- async def generate_response(self, query: str, user_id: int = 0, daw_context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
244
- try:
245
- response_text = self.respond(query)
246
- sentiment = self.analyze_sentiment(query)
247
- result = {
248
- "response": response_text,
249
- "sentiment": sentiment,
250
- "confidence": 0.85,
251
- "timestamp": datetime.now().isoformat(),
252
- "source": "codette_new",
253
- "ml_enhanced": True,
254
- "security_filtered": True,
255
- "health_status": "healthy"
256
- }
257
- if daw_context:
258
- result["daw_context"] = daw_context
259
- return result
260
- except Exception as e:
261
- logger.error(f"Response generation failed: {e}")
262
- return {
263
- "error": str(e),
264
- "response": "I encountered an issue. Could you rephrase your question?",
265
- "fallback": True,
266
- "timestamp": datetime.now().isoformat()
267
- }
268
-
269
- def generate_mixing_suggestions(self, track_type: str, track_info: dict) -> List[str]:
270
- suggestions = []
271
- peak_level = track_info.get('peak_level', 0)
272
- if peak_level > -3:
273
- suggestions.append("Reduce level to prevent clipping (aim for -6dB peak)")
274
- elif peak_level < -20:
275
- suggestions.append("Increase level - track is very quiet (aim for -12dB to -6dB)")
276
- if track_type == 'audio':
277
- suggestions.append("Apply high-pass filter at 80-100Hz to remove rumble")
278
- suggestions.append("Check for phase issues if recording in stereo")
279
- suggestions.append("Use compression to control dynamics (4:1 ratio, 10ms attack)")
280
- elif track_type == 'instrument':
281
- suggestions.append("Add gentle compression for consistency (3:1 ratio)")
282
- suggestions.append("EQ to fit in frequency spectrum - boost presence around 3-5kHz")
283
- suggestions.append("Consider reverb send for spatial depth")
284
- elif track_type == 'midi':
285
- suggestions.append("Adjust velocity curves for natural dynamics")
286
- suggestions.append("Layer with EQ and compression for polish")
287
- if track_info.get('muted'):
288
- suggestions.append("⚠️ Track is muted - unmute to hear in mix")
289
- if track_info.get('soloed'):
290
- suggestions.append("ℹ️ Track is soloed - unsolo to hear full mix context")
291
- return suggestions[:4]
292
-
293
- def analyze_daw_context(self, daw_context: dict) -> Dict[str, Any]:
294
- tracks = daw_context.get('tracks', []) if isinstance(daw_context, dict) else []
295
- analysis = {
296
- 'track_count': len(tracks),
297
- 'recommendations': [],
298
- 'potential_issues': [],
299
- 'session_health': 'good'
300
- }
301
- if analysis['track_count'] > 64:
302
- analysis['potential_issues'].append("High track count (>64) may impact CPU performance")
303
- analysis['session_health'] = 'warning'
304
- if analysis['track_count'] > 100:
305
- analysis['potential_issues'].append("Very high track count (>100) - consider bouncing to audio")
306
- analysis['session_health'] = 'critical'
307
- muted_count = len([t for t in tracks if t.get('muted', False)])
308
- if muted_count > len(tracks) * 0.3 and len(tracks) > 0:
309
- analysis['potential_issues'].append(f"{muted_count} muted tracks - consider archiving unused content")
310
- analysis['recommendations'].append("Use color coding for track organization")
311
- analysis['recommendations'].append("Create buses for grouped processing (drums, vocals, etc)")
312
- analysis['recommendations'].append("Leave 6dB headroom on master for mastering")
313
- bpm = daw_context.get('bpm', 120) if isinstance(daw_context, dict) else 120
314
- if bpm:
315
- analysis['recommendations'].append(f"Current BPM: {bpm} - sync delay times to tempo for musical results")
316
- return analysis
317
-
318
- def get_personality_prefix(self) -> str:
319
- prefixes = {
320
- 'technical_expert': '[Technical Expert]',
321
- 'creative_mentor': '[Creative Mentor]',
322
- 'practical_guide': '[Practical Guide]',
323
- 'analytical_teacher': '[Analytical Teacher]',
324
- 'innovative_explorer': '[Innovation Explorer]'
325
- }
326
- return prefixes.get(self.current_personality, '[Expert]')
327
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import nltk
3
+ import numpy as np
4
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
5
+ from typing import List, Dict, Any, Optional
6
+ from nltk.tokenize import word_tokenize
7
+ import os
8
+ import json
9
+ from datetime import datetime
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ # Download required NLTK data with error handling
14
+ try:
15
+ nltk.download('punkt', quiet=True)
16
+ nltk.download('averaged_perceptron_tagger', quiet=True)
17
+ nltk.download('wordnet', quiet=True)
18
+ except Exception as e:
19
+ logger.warning(f"NLTK download failed (this is non-critical): {e}")
20
+
21
+ # Import natural response enhancer (optional - graceful degradation if not available)
22
+ try:
23
+ from src.components.natural_response_enhancer import get_natural_enhancer
24
+ NATURAL_ENHANCER_AVAILABLE = True
25
+ except ImportError:
26
+ try:
27
+ # Try alternative import path
28
+ from natural_response_enhancer import get_natural_enhancer
29
+ NATURAL_ENHANCER_AVAILABLE = True
30
+ except ImportError:
31
+ NATURAL_ENHANCER_AVAILABLE = False
32
+ logger.debug("Natural response enhancer not available")
33
+
34
+
35
+ class Codette:
36
+ def __init__(self, user_name="User"):
37
+ self.user_name = user_name
38
+ self.memory = []
39
+ self.analyzer = SentimentIntensityAnalyzer()
40
+ np.seterr(divide='ignore', invalid='ignore')
41
+ # audit_log may rely on logging; ensure method exists before call
42
+ self.context_memory = []
43
+ self.daw_knowledge = self._initialize_daw_knowledge()
44
+ self.recent_responses = []
45
+ self.max_recent_responses = 20
46
+ self.personality_modes = {
47
+ 'technical_expert': 'precise_technical_professional',
48
+ 'creative_mentor': 'inspirational_metaphorical_encouraging',
49
+ 'practical_guide': 'direct_actionable_efficient',
50
+ 'analytical_teacher': 'detailed_explanatory_educational',
51
+ 'innovative_explorer': 'experimental_cutting_edge_forward_thinking'
52
+ }
53
+ self.current_personality = 'technical_expert'
54
+ self.conversation_topics = []
55
+ self.max_conversation_topics = 10
56
+ self.has_music_knowledge_table = False
57
+ self.has_music_knowledge_backup_table = False
58
+ self.has_chat_history_table = False
59
+ self.music_knowledge_table = 'music_knowledge'
60
+ self.supabase_client = self._initialize_supabase()
61
+ # Initialize natural response enhancer if available
62
+ self.natural_enhancer = get_natural_enhancer() if NATURAL_ENHANCER_AVAILABLE else None
63
+ # Log after initialization
64
+ try:
65
+ self.audit_log("Codette initialized with FULL ML CAPABILITIES (no placeholders)", system=True)
66
+ except Exception:
67
+ logger.info("Codette initialized (audit log not available yet)")
68
+
69
+ def _initialize_daw_knowledge(self) -> Dict[str, Any]:
70
+ return {
71
+ "frequency_ranges": {
72
+ "sub_bass": (20, 60),
73
+ "bass": (60, 250),
74
+ "low_mid": (250, 500),
75
+ "mid": (500, 2000),
76
+ "high_mid": (2000, 4000),
77
+ "presence": (4000, 6000),
78
+ "brilliance": (6000, 20000)
79
+ },
80
+ "mixing_principles": {
81
+ "gain_staging": "Set master fader to -6dB headroom before mixing. Individual tracks should peak around -12dB to -6dB.",
82
+ "eq_fundamentals": "Cut before boost. Use high-pass filters to remove unnecessary low-end. EQ to fit tracks in the frequency spectrum, not in isolation.",
83
+ "compression_strategy": "Start with 4:1 ratio, adjust attack/release based on transient content. Use parallel compression for drums.",
84
+ "panning_technique": "Pan rhythmic elements for width, keep bass and kick centered. Use mid-side processing for stereo field control."
85
+ },
86
+ "problem_detection": {
87
+ "muddy_mix": "Excessive energy in 200-500Hz range. Solution: High-pass filters on non-bass elements, surgical EQ cuts.",
88
+ "harsh_highs": "Peak around 3-5kHz causing fatigue. Solution: Gentle EQ reduction, de-esser on vocals.",
89
+ "weak_low_end": "Insufficient bass presence. Solution: Check phase relationships, ensure bass/kick complement each other.",
90
+ "lack_of_depth": "Everything sounds flat. Solution: Use reverb/delay strategically, automate wet/dry mix."
91
+ }
92
+ }
93
+
94
+ def respond(self, prompt: str) -> str:
95
+ sentiment = self.analyze_sentiment(prompt)
96
+ key_concepts = self.extract_key_concepts(prompt)
97
+
98
+ self.memory.append({
99
+ "prompt": prompt,
100
+ "sentiment": sentiment,
101
+ "concepts": key_concepts,
102
+ "timestamp": datetime.now().isoformat()
103
+ })
104
+
105
+ is_daw_query = self._is_daw_query_ml(prompt, key_concepts)
106
+ responses: List[str] = []
107
+
108
+ if is_daw_query:
109
+ daw_response = self._generate_daw_specific_response_ml(prompt, key_concepts, sentiment)
110
+ responses.append(f"{daw_response}") # Removed [DAW Expert] prefix
111
+
112
+ technical_insight = self._generate_technical_insight_ml(key_concepts, sentiment)
113
+ responses.append(f"{technical_insight}") # Removed [Technical] prefix
114
+ else:
115
+ neural_insight = self._generate_neural_insight_ml(key_concepts, sentiment)
116
+ responses.append(f"{neural_insight}") # Removed [Neural] prefix
117
+
118
+ logical_response = self._generate_logical_response_ml(key_concepts, sentiment)
119
+ responses.append(f"{logical_response}") # Removed [Logical] prefix
120
+
121
+ creative_response = self._generate_creative_response_ml(key_concepts, sentiment)
122
+ responses.append(f"{creative_response}") # Removed [Creative] prefix
123
+
124
+ try:
125
+ full_response = "\n\n".join(responses)
126
+ self.save_conversation_to_db(prompt, full_response)
127
+ except Exception as e:
128
+ logger.warning(f"Could not save conversation to DB: {e}")
129
+
130
+ self.context_memory.append({
131
+ 'input': prompt,
132
+ 'concepts': key_concepts,
133
+ 'sentiment': sentiment.get('compound', 0) if isinstance(sentiment, dict) else 0,
134
+ 'is_daw': is_daw_query
135
+ })
136
+
137
+ # Apply natural enhancement to remove any unnatural markers and improve flow
138
+ final_response = "\n\n".join(responses)
139
+
140
+ if self.natural_enhancer:
141
+ try:
142
+ final_response = self.natural_enhancer.enhance_response(
143
+ final_response,
144
+ confidence=0.85,
145
+ context={'domain': 'music' if is_daw_query else 'general'}
146
+ )
147
+ except Exception as e:
148
+ logger.debug(f"Natural enhancement failed (using original): {e}")
149
+ # Fall back to original if enhancement fails
150
+
151
+ return final_response
152
+
153
+ def _is_daw_query_ml(self, prompt: str, concepts: List[str]) -> bool:
154
+ daw_semantic_indicators = {
155
+ 'audio_production', 'mixing', 'mastering', 'recording',
156
+ 'eq', 'compression', 'reverb', 'delay', 'frequency',
157
+ 'gain', 'volume', 'pan', 'stereo', 'track', 'plugin'
158
+ }
159
+ prompt_lower = prompt.lower()
160
+ concept_set = set(concepts)
161
+ return bool(daw_semantic_indicators & concept_set) or any(indicator in prompt_lower for indicator in ['mix', 'eq', 'compress', 'audio', 'track'])
162
+
163
+ def _generate_daw_specific_response_ml(self, prompt: str, concepts: List[str], sentiment: Dict) -> str:
164
+ prompt_lower = prompt.lower()
165
+ if any(term in prompt_lower for term in ['gain', 'level', 'volume', 'loud']):
166
+ return self.daw_knowledge['mixing_principles']['gain_staging']
167
+ elif any(term in prompt_lower for term in ['eq', 'frequency', 'boost', 'cut']):
168
+ return self.daw_knowledge['mixing_principles']['eq_fundamentals']
169
+ elif any(term in prompt_lower for term in ['compress', 'ratio', 'attack', 'release']):
170
+ return self.daw_knowledge['mixing_principles']['compression_strategy']
171
+ elif any(term in prompt_lower for term in ['pan', 'stereo', 'width']):
172
+ return self.daw_knowledge['mixing_principles']['panning_technique']
173
+ elif any(term in prompt_lower for term in ['muddy', 'unclear', 'boomy']):
174
+ return self.daw_knowledge['problem_detection']['muddy_mix']
175
+ elif any(term in prompt_lower for term in ['harsh', 'bright', 'sibilant']):
176
+ return self.daw_knowledge['problem_detection']['harsh_highs']
177
+ elif any(term in prompt_lower for term in ['thin', 'weak bass', 'no low end']):
178
+ return self.daw_knowledge['problem_detection']['weak_low_end']
179
+ elif any(term in prompt_lower for term in ['flat', 'depth', 'dimension']):
180
+ return self.daw_knowledge['problem_detection']['lack_of_depth']
181
+ else:
182
+ if isinstance(sentiment, dict) and sentiment.get('compound', 0) < 0:
183
+ return "Identify the specific issue: frequency buildup, dynamic imbalance, or routing problem. Isolate and address systematically."
184
+ else:
185
+ return "Continue with gain staging, then EQ for balance, compression for control, and spatial effects for depth. Follow signal flow logically."
186
+
187
+ def _generate_neural_insight_ml(self, concepts: List[str], sentiment: Dict) -> str:
188
+ if not concepts:
189
+ return "Neural analysis suggests exploring the pattern relationships within this context."
190
+ primary_concept = concepts[0] if concepts else "concept"
191
+ sentiment_polarity = "positive" if (isinstance(sentiment, dict) and sentiment.get('compound', 0) > 0) else "neutral" if (isinstance(sentiment, dict) and sentiment.get('compound', 0) == 0) else "analytical"
192
+ return f"Pattern recognition analysis of '{primary_concept}' reveals {sentiment_polarity} associations across multiple domains. Neural networks suggest systematic exploration through interconnected relationships."
193
+
194
+ def _generate_logical_response_ml(self, concepts: List[str], sentiment: Dict) -> str:
195
+ if not concepts:
196
+ return "Logical analysis requires structured evaluation of cause-effect relationships."
197
+ primary_concept = concepts[0]
198
+ return f"Structured analysis shows that '{primary_concept}' follows deterministic principles. Cause-effect mapping suggests systematic approach yields optimal outcomes."
199
+
200
+ def _generate_creative_response_ml(self, concepts: List[str], sentiment: Dict) -> str:
201
+ if not concepts:
202
+ return "Creative synthesis reveals novel connections emerging from conceptual intersections."
203
+ primary_concept = concepts[0]
204
+ return f"Creative synthesis transforms '{primary_concept}' through multi-dimensional perspective shifts. Emergent patterns suggest innovative approaches through systematic exploration."
205
+
206
+ def _generate_technical_insight_ml(self, concepts: List[str], sentiment: Dict) -> str:
207
+ if not concepts:
208
+ return "Technical analysis requires precise parameter identification and systematic adjustment."
209
+ primary_concept = concepts[0]
210
+ return f"Technical analysis of '{primary_concept}' indicates specific parameter optimization opportunities. Systematic calibration yields measurable improvements."
211
+
212
+ def analyze_sentiment(self, text: str) -> Dict[str, float]:
213
+ score = self.analyzer.polarity_scores(text)
214
+ try:
215
+ self.audit_log(f"Sentiment analysis: {score}")
216
+ except Exception:
217
+ logger.debug("audit_log unavailable during sentiment analysis")
218
+ return score
219
+
220
+ def extract_key_concepts(self, text: str) -> List[str]:
221
+ try:
222
+ tokens = word_tokenize(text.lower())
223
+ concepts = [token for token in tokens if len(token) > 2 and token.isalpha()]
224
+ return list(dict.fromkeys(concepts))[:5]
225
+ except Exception as e:
226
+ logger.warning(f"Could not extract concepts: {e}")
227
+ return [w for w in text.lower().split() if len(w) > 2][:5]
228
+
229
+ def audit_log(self, message: str, system: bool = False) -> None:
230
+ source = "SYSTEM" if system else self.user_name
231
+ logger.info(f"{source}: {message}")
232
+
233
+ def _initialize_supabase(self):
234
+ try:
235
+ from supabase import create_client, Client
236
+ supabase_url = (
237
+ os.environ.get('VITE_SUPABASE_URL') or
238
+ os.environ.get('SUPABASE_URL') or
239
+ os.environ.get('NEXT_PUBLIC_SUPABASE_URL')
240
+ )
241
+ supabase_key = (
242
+ os.environ.get('VITE_SUPABASE_ANON_KEY') or
243
+ os.environ.get('SUPABASE_KEY') or
244
+ os.environ.get('SUPABASE_SERVICE_ROLE_KEY') or
245
+ os.environ.get('NEXT_PUBLIC_SUPABASE_ANON_KEY')
246
+ )
247
+ if supabase_url and supabase_key:
248
+ client = create_client(supabase_url, supabase_key)
249
+ logger.info("✅ Supabase client initialized")
250
+ return client
251
+ else:
252
+ logger.warning("⚠️ Supabase credentials not found in environment")
253
+ return None
254
+ except Exception as e:
255
+ logger.warning(f"⚠️ Could not initialize Supabase: {e}")
256
+ return None
257
+
258
+ def save_conversation_to_db(self, user_message: str, codette_response: str) -> None:
259
+ if not self.supabase_client:
260
+ return
261
+ try:
262
+ data = {
263
+ "user_message": user_message,
264
+ "codette_response": codette_response,
265
+ "timestamp": datetime.now().isoformat(),
266
+ "user_name": self.user_name
267
+ }
268
+ self.supabase_client.table('chat_history').insert(data).execute()
269
+ logger.debug("Conversation saved to Supabase")
270
+ except Exception as e:
271
+ logger.debug(f"Could not save conversation: {e}")
272
+
273
+ async def generate_response(self, query: str, user_id: int = 0, daw_context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
274
+ try:
275
+ response_text = self.respond(query)
276
+ sentiment = self.analyze_sentiment(query)
277
+ result = {
278
+ "response": response_text,
279
+ "sentiment": sentiment,
280
+ "confidence": 0.85,
281
+ "timestamp": datetime.now().isoformat(),
282
+ "source": "codette_new",
283
+ "ml_enhanced": True,
284
+ "security_filtered": True,
285
+ "health_status": "healthy"
286
+ }
287
+ if daw_context:
288
+ result["daw_context"] = daw_context
289
+ return result
290
+ except Exception as e:
291
+ logger.error(f"Response generation failed: {e}")
292
+ return {
293
+ "error": str(e),
294
+ "response": "I encountered an issue. Could you rephrase your question?",
295
+ "fallback": True,
296
+ "timestamp": datetime.now().isoformat()
297
+ }
298
+
299
+ def generate_mixing_suggestions(self, track_type: str, track_info: dict) -> List[str]:
300
+ suggestions = []
301
+ peak_level = track_info.get('peak_level', 0)
302
+ if peak_level > -3:
303
+ suggestions.append("Reduce level to prevent clipping (aim for -6dB peak)")
304
+ elif peak_level < -20:
305
+ suggestions.append("Increase level - track is very quiet (aim for -12dB to -6dB)")
306
+ if track_type == 'audio':
307
+ suggestions.append("Apply high-pass filter at 80-100Hz to remove rumble")
308
+ suggestions.append("Check for phase issues if recording in stereo")
309
+ suggestions.append("Use compression to control dynamics (4:1 ratio, 10ms attack)")
310
+ elif track_type == 'instrument':
311
+ suggestions.append("Add gentle compression for consistency (3:1 ratio)")
312
+ suggestions.append("EQ to fit in frequency spectrum - boost presence around 3-5kHz")
313
+ suggestions.append("Consider reverb send for spatial depth")
314
+ elif track_type == 'midi':
315
+ suggestions.append("Adjust velocity curves for natural dynamics")
316
+ suggestions.append("Layer with EQ and compression for polish")
317
+ if track_info.get('muted'):
318
+ suggestions.append("⚠️ Track is muted - unmute to hear in mix")
319
+ if track_info.get('soloed'):
320
+ suggestions.append("ℹ️ Track is soloed - unsolo to hear full mix context")
321
+ return suggestions[:4]
322
+
323
+ def analyze_daw_context(self, daw_context: dict) -> Dict[str, Any]:
324
+ tracks = daw_context.get('tracks', []) if isinstance(daw_context, dict) else []
325
+ analysis = {
326
+ 'track_count': len(tracks),
327
+ 'recommendations': [],
328
+ 'potential_issues': [],
329
+ 'session_health': 'good'
330
+ }
331
+ if analysis['track_count'] > 64:
332
+ analysis['potential_issues'].append("High track count (>64) may impact CPU performance")
333
+ analysis['session_health'] = 'warning'
334
+ if analysis['track_count'] > 100:
335
+ analysis['potential_issues'].append("Very high track count (>100) - consider bouncing to audio")
336
+ analysis['session_health'] = 'critical'
337
+ muted_count = len([t for t in tracks if t.get('muted', False)])
338
+ if muted_count > len(tracks) * 0.3 and len(tracks) > 0:
339
+ analysis['potential_issues'].append(f"{muted_count} muted tracks - consider archiving unused content")
340
+ analysis['recommendations'].append("Use color coding for track organization")
341
+ analysis['recommendations'].append("Create buses for grouped processing (drums, vocals, etc)")
342
+ analysis['recommendations'].append("Leave 6dB headroom on master for mastering")
343
+ bpm = daw_context.get('bpm', 120) if isinstance(daw_context, dict) else 120
344
+ if bpm:
345
+ analysis['recommendations'].append(f"Current BPM: {bpm} - sync delay times to tempo for musical results")
346
+ return analysis
347
+
348
+ def get_personality_prefix(self) -> str:
349
+ prefixes = {
350
+ 'technical_expert': '[Technical Expert]',
351
+ 'creative_mentor': '[Creative Mentor]',
352
+ 'practical_guide': '[Practical Guide]',
353
+ 'analytical_teacher': '[Analytical Teacher]',
354
+ 'innovative_explorer': '[Innovation Explorer]'
355
+ }
356
+ return prefixes.get(self.current_personality, '[Expert]')
357
+