duongve commited on
Commit
531efa1
·
verified ·
1 Parent(s): e814e98

Upload check.txt.txt

Browse files
Files changed (1) hide show
  1. QA_result/check.txt.txt +620 -0
QA_result/check.txt.txt ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Emotion Detection Service Module
3
+
4
+ Provides async emotion detection from audio with:
5
+ - Concurrent request handling via semaphores
6
+ - Audio chunking for multi-emotion detection
7
+ - Temporary file management
8
+ - Thread-safe model inference
9
+ """
10
+
11
+ import asyncio
12
+ import os
13
+ import uuid
14
+ import threading
15
+ import numpy as np
16
+ import soundfile as sf
17
+ from pathlib import Path
18
+ from typing import Dict, List, Optional, Tuple, Any
19
+ from datetime import datetime
20
+ from collections import defaultdict
21
+
22
+ from .config import EmotionConfig, get_emotion_config
23
+ from .history import EmotionHistory, EmotionTurn, build_emotion_prompt_context
24
+
25
+
26
+ def _ensure_mono_1d(audio: np.ndarray) -> np.ndarray:
27
+ """
28
+ Ensure audio is a 1D mono array.
29
+
30
+ Handles various audio formats:
31
+ - Already 1D: return as-is
32
+ - (samples, channels): transpose and average channels
33
+ - (channels, samples): average channels
34
+ - (1, samples) or (samples, 1): squeeze to 1D
35
+
36
+ Args:
37
+ audio: Audio data as numpy array (1D or 2D)
38
+
39
+ Returns:
40
+ 1D mono audio array
41
+ """
42
+ if audio.ndim == 1:
43
+ return audio
44
+
45
+ if audio.ndim != 2:
46
+ # For higher dimensional arrays, try to flatten
47
+ print(f"[EMOTION] Warning: unexpected audio shape {audio.shape}, attempting to flatten")
48
+ return audio.flatten()
49
+
50
+ # 2D array - determine format and convert to mono
51
+ rows, cols = audio.shape
52
+
53
+ # Check if one dimension is small (likely channels: 1 or 2)
54
+ if rows <= 2 and cols > 2:
55
+ # Shape is (channels, samples) - average across axis 0
56
+ if rows == 1:
57
+ return audio.squeeze(axis=0)
58
+ else:
59
+ return np.mean(audio, axis=0)
60
+
61
+ elif cols <= 2 and rows > 2:
62
+ # Shape is (samples, channels) - average across axis 1
63
+ if cols == 1:
64
+ return audio.squeeze(axis=1)
65
+ else:
66
+ return np.mean(audio, axis=1)
67
+
68
+ else:
69
+ # Both dimensions are large or both are small
70
+ # Heuristic: if rows > cols, assume (samples, channels)
71
+ if rows > cols:
72
+ if cols == 1:
73
+ return audio.squeeze(axis=1)
74
+ return np.mean(audio, axis=1)
75
+ else:
76
+ if rows == 1:
77
+ return audio.squeeze(axis=0)
78
+ return np.mean(audio, axis=0)
79
+
80
+
81
+ class EmotionDetectionService:
82
+ """
83
+ Async-safe emotion detection service.
84
+
85
+ Handles:
86
+ - Audio preprocessing and chunking
87
+ - Concurrent request management via semaphores
88
+ - Temporary file lifecycle
89
+ - Model inference with thread safety
90
+ """
91
+
92
+ def __init__(self, config: Optional[EmotionConfig] = None):
93
+ """
94
+ Initialize the emotion detection service.
95
+
96
+ Args:
97
+ config: Optional EmotionConfig. If not provided, loads from .env
98
+ """
99
+ self.config = config or get_emotion_config()
100
+ self._model = None
101
+ self._model_lock = threading.Lock()
102
+ self._semaphore: Optional[asyncio.Semaphore] = None
103
+ self._history = EmotionHistory()
104
+ self._initialized = False
105
+
106
+ # Track active requests for debugging
107
+ self._active_requests: Dict[str, datetime] = {}
108
+ self._request_lock = threading.Lock()
109
+
110
+ if self.config.enabled:
111
+ self._initialize()
112
+
113
+ def _initialize(self):
114
+ """Initialize the service (load model, create directories)."""
115
+ if self._initialized:
116
+ return
117
+
118
+ try:
119
+ # Create temp directory
120
+ Path(self.config.temp_audio_dir).mkdir(parents=True, exist_ok=True)
121
+ print(f"[EMOTION] Temp audio directory: {self.config.temp_audio_dir}")
122
+
123
+ # Create semaphore for concurrent task limiting
124
+ self._semaphore = asyncio.Semaphore(self.config.max_concurrent_tasks)
125
+ print(f"[EMOTION] Max concurrent tasks: {self.config.max_concurrent_tasks}")
126
+
127
+ # Lazy load model on first use
128
+ self._initialized = True
129
+ print(f"[EMOTION] Service initialized successfully")
130
+
131
+ except Exception as e:
132
+ print(f"[EMOTION] Failed to initialize service: {e}")
133
+ self.config.enabled = False
134
+
135
+ def _ensure_model(self):
136
+ """Ensure model is loaded (lazy loading with thread safety)."""
137
+ if self._model is not None:
138
+ return
139
+
140
+ with self._model_lock:
141
+ if self._model is not None:
142
+ return
143
+
144
+ try:
145
+ import onnxruntime as ort
146
+
147
+ # Check available providers
148
+ available = ort.get_available_providers()
149
+ providers = []
150
+ if "CUDAExecutionProvider" in available:
151
+ providers.append("CUDAExecutionProvider")
152
+ print(f"[EMOTION] CUDA available, using GPU acceleration")
153
+ providers.append("CPUExecutionProvider")
154
+
155
+ # Load model
156
+ self._model = ort.InferenceSession(
157
+ self.config.model_path,
158
+ providers=providers
159
+ )
160
+
161
+ # Log model info
162
+ actual_provider = self._model.get_providers()[0]
163
+ print(f"[EMOTION] Model loaded from: {self.config.model_path}")
164
+ print(f"[EMOTION] Running on: {actual_provider}")
165
+
166
+ except Exception as e:
167
+ print(f"[EMOTION] Failed to load model: {e}")
168
+ self.config.enabled = False
169
+ raise
170
+
171
+ def _generate_request_id(self) -> str:
172
+ """Generate unique request ID for tracking."""
173
+ return f"emo_{uuid.uuid4().hex[:12]}_{int(datetime.now().timestamp() * 1000)}"
174
+
175
+ def _get_temp_audio_path(self, request_id: str, chunk_idx: int = 0) -> str:
176
+ """
177
+ Generate temporary audio file path.
178
+
179
+ Args:
180
+ request_id: Unique request identifier
181
+ chunk_idx: Index of audio chunk (for multi-emotion)
182
+
183
+ Returns:
184
+ Full path to temporary audio file
185
+ """
186
+ filename = f"{request_id}_chunk{chunk_idx}{self.config.audio_extension}"
187
+ return os.path.join(self.config.temp_audio_dir, filename)
188
+
189
+ def _split_audio_into_chunks(
190
+ self,
191
+ audio: np.ndarray,
192
+ sample_rate: int,
193
+ chunk_duration: int
194
+ ) -> List[np.ndarray]:
195
+ """
196
+ Split audio into fixed-duration chunks.
197
+
198
+ Args:
199
+ audio: Audio data as numpy array (1D or 2D)
200
+ sample_rate: Sample rate of audio
201
+ chunk_duration: Duration of each chunk in seconds
202
+
203
+ Returns:
204
+ List of audio chunks as numpy arrays (1D mono)
205
+ """
206
+ # Ensure audio is 1D mono before processing
207
+ audio = _ensure_mono_1d(audio)
208
+
209
+ chunk_samples = chunk_duration * sample_rate
210
+ total_samples = len(audio)
211
+
212
+ if total_samples <= chunk_samples:
213
+ # Single chunk, pad if needed
214
+ if total_samples < chunk_samples:
215
+ audio = np.pad(audio, (0, chunk_samples - total_samples), mode='constant')
216
+ return [audio]
217
+
218
+ # Split into multiple chunks
219
+ chunks = []
220
+ for start in range(0, total_samples, chunk_samples):
221
+ end = start + chunk_samples
222
+ chunk = audio[start:end]
223
+
224
+ # Pad last chunk if needed
225
+ if len(chunk) < chunk_samples:
226
+ chunk = np.pad(chunk, (0, chunk_samples - len(chunk)), mode='constant')
227
+
228
+ chunks.append(chunk)
229
+
230
+ return chunks
231
+
232
+ async def _save_audio_chunk(
233
+ self,
234
+ audio: np.ndarray,
235
+ sample_rate: int,
236
+ path: str
237
+ ) -> bool:
238
+ """
239
+ Save audio chunk to file asynchronously.
240
+
241
+ Args:
242
+ audio: Audio data
243
+ sample_rate: Sample rate
244
+ path: Output file path
245
+
246
+ Returns:
247
+ True if successful
248
+ """
249
+ try:
250
+ # Run file I/O in thread pool
251
+ await asyncio.get_event_loop().run_in_executor(
252
+ None,
253
+ lambda: sf.write(path, audio.astype(np.float32), sample_rate)
254
+ )
255
+ return True
256
+ except Exception as e:
257
+ print(f"[EMOTION] Failed to save audio chunk: {e}")
258
+ return False
259
+
260
+ def _cleanup_temp_file(self, path: str):
261
+ """Remove temporary audio file."""
262
+ if self.config.cleanup_temp_files:
263
+ try:
264
+ if os.path.exists(path):
265
+ os.remove(path)
266
+ except Exception as e:
267
+ print(f"[EMOTION] Failed to cleanup temp file {path}: {e}")
268
+
269
+ def _run_inference(self, audio_path: str) -> Dict[str, float]:
270
+ """
271
+ Run emotion inference on audio file.
272
+
273
+ Args:
274
+ audio_path: Path to audio file
275
+
276
+ Returns:
277
+ Dict mapping emotion class to confidence score
278
+ """
279
+ self._ensure_model()
280
+
281
+ try:
282
+ import librosa
283
+ from audio_emotion_detection.preprocessing import MelSTFT
284
+
285
+ # Get model input/output info
286
+ input_name = self._model.get_inputs()[0].name
287
+ output_name = self._model.get_outputs()[0].name
288
+
289
+ # Preprocess audio
290
+ mel = MelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320)
291
+ waveform, _ = librosa.core.load(audio_path, sr=32000, mono=True)
292
+ waveform = np.stack([waveform])
293
+ spec = mel(waveform)
294
+
295
+ # Ensure correct temporal dimension (400 frames for ~4s audio)
296
+ if spec.shape[-1] > 400:
297
+ spec = spec[:, :, :400]
298
+ else:
299
+ spec = np.pad(spec, ((0, 0), (0, 0), (0, 400 - spec.shape[-1])), mode='constant')
300
+
301
+ spec = np.expand_dims(spec, axis=0).astype(np.float32)
302
+
303
+ # Run inference
304
+ output = self._model.run([output_name], {input_name: spec})
305
+
306
+ # Softmax
307
+ logits = output[0][0]
308
+ exp_logits = np.exp(logits - np.max(logits))
309
+ probs = exp_logits / np.sum(exp_logits)
310
+
311
+ # Map to emotion classes
312
+ results = {}
313
+ for i, class_name in enumerate(self.config.class_labels):
314
+ results[class_name] = float(probs[i])
315
+
316
+ return results
317
+
318
+ except Exception as e:
319
+ print(f"[EMOTION] Inference failed: {e}")
320
+ import traceback
321
+ traceback.print_exc()
322
+ return {}
323
+
324
+ async def detect_emotion(
325
+ self,
326
+ audio: np.ndarray,
327
+ sample_rate: int,
328
+ request_id: Optional[str] = None
329
+ ) -> Dict[str, float]:
330
+ """
331
+ Detect emotion from single audio segment.
332
+
333
+ Args:
334
+ audio: Audio data as numpy array (1D or 2D)
335
+ sample_rate: Sample rate of audio
336
+ request_id: Optional request ID for tracking
337
+
338
+ Returns:
339
+ Dict mapping emotion class to confidence score
340
+ """
341
+ if not self.config.enabled:
342
+ return {}
343
+
344
+ request_id = request_id or self._generate_request_id()
345
+
346
+ # Ensure audio is 1D mono before processing
347
+ audio = _ensure_mono_1d(audio)
348
+
349
+ # Track request
350
+ with self._request_lock:
351
+ self._active_requests[request_id] = datetime.now()
352
+
353
+ try:
354
+ # Acquire semaphore for concurrency control
355
+ async with self._semaphore:
356
+ # Resample if needed
357
+ if sample_rate != self.config.sample_rate:
358
+ try:
359
+ import librosa
360
+ audio = librosa.resample(
361
+ audio.astype(np.float32),
362
+ orig_sr=sample_rate,
363
+ target_sr=self.config.sample_rate
364
+ )
365
+ sample_rate = self.config.sample_rate
366
+ except Exception as e:
367
+ print(f"[EMOTION] Resampling failed: {e}")
368
+
369
+ # Ensure correct duration
370
+ target_samples = self.config.audio_duration * sample_rate
371
+ if len(audio) > target_samples:
372
+ audio = audio[:target_samples]
373
+ elif len(audio) < target_samples:
374
+ audio = np.pad(audio, (0, target_samples - len(audio)), mode='constant')
375
+
376
+ # Save to temp file
377
+ temp_path = self._get_temp_audio_path(request_id)
378
+ if not await self._save_audio_chunk(audio, sample_rate, temp_path):
379
+ return {}
380
+
381
+ try:
382
+ # Run inference in thread pool
383
+ result = await asyncio.wait_for(
384
+ asyncio.get_event_loop().run_in_executor(
385
+ None,
386
+ self._run_inference,
387
+ temp_path
388
+ ),
389
+ timeout=self.config.detection_timeout
390
+ )
391
+ return result
392
+ finally:
393
+ # Cleanup temp file
394
+ self._cleanup_temp_file(temp_path)
395
+
396
+ except asyncio.TimeoutError:
397
+ print(f"[EMOTION] Detection timeout for request {request_id}")
398
+ return {}
399
+ except Exception as e:
400
+ print(f"[EMOTION] Detection failed for request {request_id}: {e}")
401
+ return {}
402
+ finally:
403
+ # Remove from active requests
404
+ with self._request_lock:
405
+ self._active_requests.pop(request_id, None)
406
+
407
+ async def detect_emotions_multi(
408
+ self,
409
+ audio: np.ndarray,
410
+ sample_rate: int,
411
+ request_id: Optional[str] = None
412
+ ) -> List[Dict[str, float]]:
413
+ """
414
+ Detect emotions from audio with multiple chunks.
415
+
416
+ If multi_emotion_per_conversation is enabled, splits audio into
417
+ chunks and detects emotion for each. Otherwise, uses first chunk only.
418
+
419
+ Args:
420
+ audio: Audio data as numpy array (1D or 2D)
421
+ sample_rate: Sample rate of audio
422
+ request_id: Optional request ID for tracking
423
+
424
+ Returns:
425
+ List of dicts, each mapping emotion class to confidence score
426
+ """
427
+ if not self.config.enabled:
428
+ return []
429
+
430
+ request_id = request_id or self._generate_request_id()
431
+
432
+ # Ensure audio is 1D mono before processing
433
+ audio = _ensure_mono_1d(audio)
434
+
435
+ # Resample if needed
436
+ if sample_rate != self.config.sample_rate:
437
+ try:
438
+ import librosa
439
+ audio = librosa.resample(
440
+ audio.astype(np.float32),
441
+ orig_sr=sample_rate,
442
+ target_sr=self.config.sample_rate
443
+ )
444
+ sample_rate = self.config.sample_rate
445
+ except Exception as e:
446
+ print(f"[EMOTION] Resampling failed: {e}")
447
+ return []
448
+
449
+ if not self.config.multi_emotion_per_conversation:
450
+ # Single chunk mode
451
+ result = await self.detect_emotion(audio, sample_rate, request_id)
452
+ return [result] if result else []
453
+
454
+ # Multi-chunk mode
455
+ chunks = self._split_audio_into_chunks(
456
+ audio, sample_rate, self.config.audio_duration
457
+ )
458
+
459
+ print(f"[EMOTION] Processing {len(chunks)} audio chunks for request {request_id}")
460
+
461
+ # Process chunks concurrently
462
+ tasks = []
463
+ for i, chunk in enumerate(chunks):
464
+ chunk_request_id = f"{request_id}_c{i}"
465
+ tasks.append(self.detect_emotion(chunk, sample_rate, chunk_request_id))
466
+
467
+ results = await asyncio.gather(*tasks, return_exceptions=True)
468
+
469
+ # Filter out failures
470
+ valid_results = []
471
+ for result in results:
472
+ if isinstance(result, dict) and result:
473
+ valid_results.append(result)
474
+ elif isinstance(result, Exception):
475
+ print(f"[EMOTION] Chunk detection failed: {result}")
476
+
477
+ return valid_results
478
+
479
+ def aggregate_emotions(
480
+ self,
481
+ emotion_results: List[Dict[str, float]]
482
+ ) -> Dict[str, float]:
483
+ """
484
+ Aggregate emotions from multiple chunks into single result.
485
+
486
+ Uses average confidence across all chunks.
487
+
488
+ Args:
489
+ emotion_results: List of emotion dicts from each chunk
490
+
491
+ Returns:
492
+ Aggregated emotion dict with average confidences
493
+ """
494
+ if not emotion_results:
495
+ return {}
496
+
497
+ if len(emotion_results) == 1:
498
+ return emotion_results[0]
499
+
500
+ # Average across all results
501
+ aggregated = defaultdict(float)
502
+ for result in emotion_results:
503
+ for emotion, confidence in result.items():
504
+ aggregated[emotion] += confidence
505
+
506
+ n = len(emotion_results)
507
+ return {k: v / n for k, v in aggregated.items()}
508
+
509
+ async def process_conversation_turn(
510
+ self,
511
+ user_id: str,
512
+ text: str,
513
+ audio: np.ndarray,
514
+ sample_rate: int
515
+ ) -> Tuple[EmotionTurn, bool, str]:
516
+ """
517
+ Process a full conversation turn with emotion tracking.
518
+
519
+ This is the main entry point for integrating emotion detection
520
+ into the conversation pipeline.
521
+
522
+ Args:
523
+ user_id: User identifier for tracking
524
+ text: Transcribed text from STT
525
+ audio: Audio data (1D or 2D)
526
+ sample_rate: Sample rate
527
+
528
+ Returns:
529
+ Tuple of:
530
+ - EmotionTurn object
531
+ - bool: Whether empathy should be triggered
532
+ - str: Emotion prompt context (empty if no empathy needed)
533
+ """
534
+ if not self.config.enabled or not text.strip():
535
+ return None, False, ""
536
+
537
+ request_id = self._generate_request_id()
538
+
539
+ # Log audio shape for debugging
540
+ print(f"[EMOTION] Processing turn for user {user_id}, request {request_id}, audio shape: {audio.shape}")
541
+
542
+ # Detect emotions (multi-chunk if enabled)
543
+ emotion_results = await self.detect_emotions_multi(audio, sample_rate, request_id)
544
+
545
+ if not emotion_results:
546
+ print(f"[EMOTION] No emotions detected for request {request_id}")
547
+ return None, False, ""
548
+
549
+ # Aggregate results
550
+ aggregated = self.aggregate_emotions(emotion_results)
551
+
552
+ print(f"[EMOTION] Detected emotions: {aggregated}")
553
+
554
+ # Record in history
555
+ turn = self._history.add_turn(
556
+ user_id=user_id,
557
+ text=text,
558
+ emotions=aggregated,
559
+ negative_classes=self.config.negative_classes,
560
+ min_confidence_scores=self.config.min_confidence_scores
561
+ )
562
+
563
+ print(f"[EMOTION] Turn recorded - negative majority: {turn.is_negative_majority}")
564
+
565
+ # Check if empathy should be triggered
566
+ should_empathize, negative_turns = self._history.should_trigger_empathy(
567
+ user_id=user_id,
568
+ consecutive_threshold=self.config.consecutive_count
569
+ )
570
+
571
+ emotion_prompt = ""
572
+ if should_empathize:
573
+ print(f"[EMOTION] Triggering empathetic response for user {user_id}")
574
+ emotion_prompt = build_emotion_prompt_context(negative_turns)
575
+
576
+ return turn, should_empathize, emotion_prompt
577
+
578
+ def get_user_emotion_summary(self, user_id: str) -> Dict:
579
+ """Get emotion summary for a user."""
580
+ return self._history.get_emotion_summary(user_id)
581
+
582
+ def reset_user_emotions(self, user_id: str):
583
+ """Reset emotion history for a user."""
584
+ self._history.reset_user_history(user_id)
585
+
586
+ def get_active_request_count(self) -> int:
587
+ """Get count of currently active emotion detection requests."""
588
+ with self._request_lock:
589
+ return len(self._active_requests)
590
+
591
+
592
+ # Global service instance (lazy loaded)
593
+ _GLOBAL_SERVICE: Optional[EmotionDetectionService] = None
594
+ _SERVICE_LOCK = threading.Lock()
595
+
596
+
597
+ def get_emotion_service() -> EmotionDetectionService:
598
+ """Get the global emotion detection service instance."""
599
+ global _GLOBAL_SERVICE
600
+ if _GLOBAL_SERVICE is None:
601
+ with _SERVICE_LOCK:
602
+ if _GLOBAL_SERVICE is None:
603
+ _GLOBAL_SERVICE = EmotionDetectionService()
604
+ return _GLOBAL_SERVICE
605
+
606
+
607
+ def initialize_emotion_service(config: Optional[EmotionConfig] = None) -> EmotionDetectionService:
608
+ """
609
+ Initialize or reinitialize the global emotion service.
610
+
611
+ Args:
612
+ config: Optional config. If not provided, loads from .env
613
+
614
+ Returns:
615
+ The initialized service instance
616
+ """
617
+ global _GLOBAL_SERVICE
618
+ with _SERVICE_LOCK:
619
+ _GLOBAL_SERVICE = EmotionDetectionService(config)
620
+ return _GLOBAL_SERVICE