Kevin King commited on
Commit
5710525
Β·
1 Parent(s): d136cd5

remaking src folder

Browse files
Files changed (1) hide show
  1. app.py β†’ src/app.py +212 -212
app.py β†’ src/app.py RENAMED
@@ -1,213 +1,213 @@
1
- import streamlit as st
2
- from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
3
- import av
4
- import numpy as np
5
- import torch
6
- import whisper
7
- from transformers import pipeline, AutoModelForAudioClassification, AutoFeatureExtractor
8
- from deepface import DeepFace
9
- import logging
10
- import queue
11
- import soundfile as sf
12
- from scipy.io.wavfile import write as write_wav
13
- import os
14
- import tempfile
15
-
16
- # --- Page Configuration ---
17
- st.set_page_config(
18
- page_title="AffectLink Online Demo",
19
- page_icon="😊",
20
- layout="wide"
21
- )
22
-
23
- st.title("AffectLink: Real-time Emotion Recognition")
24
- st.write("This demo analyzes your facial expressions in real-time and processes short audio clips for speech and text-based emotion.")
25
-
26
- # --- Logger Configuration ---
27
- # Suppress noisy logs from libraries
28
- logging.basicConfig(level=logging.INFO)
29
- logging.getLogger('deepface').setLevel(logging.ERROR)
30
- logging.getLogger('huggingface_hub').setLevel(logging.WARNING)
31
-
32
- # --- Emotion Mappings ---
33
- UNIFIED_EMOTIONS = ['neutral', 'happy', 'sad', 'angry']
34
- FACIAL_TO_UNIFIED = {
35
- 'neutral': 'neutral', 'happy': 'happy', 'sad': 'sad', 'angry': 'angry',
36
- 'fear': None, 'surprise': None, 'disgust': None
37
- }
38
- TEXT_TO_UNIFIED = {
39
- 'neutral': 'neutral', 'joy': 'happy', 'sadness': 'sad', 'anger': 'angry',
40
- 'fear': None, 'surprise': None, 'disgust': None
41
- }
42
- SER_TO_UNIFIED = {
43
- 'neu': 'neutral', 'hap': 'happy', 'sad': 'sad', 'ang': 'angry'
44
- }
45
- AUDIO_SAMPLE_RATE = 16000
46
-
47
- # --- Model Loading ---
48
- # Use Streamlit's caching to load models only once.
49
- @st.cache_resource
50
- def load_models():
51
- with st.spinner("Loading AI models... This may take a moment on first run."):
52
- # Whisper for Speech-to-Text
53
- whisper_model = whisper.load_model("base")
54
-
55
- # Text Emotion Classifier
56
- text_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None)
57
-
58
- # Speech Emotion Recognition (SER)
59
- ser_model_name = "superb/hubert-large-superb-er"
60
- ser_feature_extractor = AutoFeatureExtractor.from_pretrained(ser_model_name)
61
- ser_model = AutoModelForAudioClassification.from_pretrained(ser_model_name)
62
-
63
- # NEW: Pre-load the DeepFace model to prevent lag on first use
64
- DeepFace.build_model("Emotion")
65
-
66
- return whisper_model, text_classifier, ser_model, ser_feature_extractor
67
-
68
- whisper_model, text_classifier, ser_model, ser_feature_extractor = load_models()
69
-
70
- # --- WebRTC and Video Processing ---
71
- webrtc_ctx = webrtc_streamer(
72
- key="affectlink-video",
73
- mode=WebRtcMode.SENDRECV,
74
- rtc_configuration=RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}),
75
- media_stream_constraints={"video": True, "audio": False},
76
- async_processing=True,
77
- )
78
-
79
- if 'facial_emotion' not in st.session_state:
80
- st.session_state.facial_emotion = "Neutral"
81
- if 'last_emotion_time' not in st.session_state:
82
- st.session_state.last_emotion_time = 0
83
-
84
- def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
85
- img = frame.to_ndarray(format="bgr24")
86
-
87
- # Process a frame every 5 seconds to conserve CPU
88
- current_time = st.session_state.get('last_emotion_time', 0)
89
- if torch.cuda.is_available() or (hasattr(st.session_state, 'last_emotion_time') and (torch.tensor(current_time).item() + 5 < torch.tensor(frame.time).item())):
90
- try:
91
- analysis = DeepFace.analyze(
92
- img,
93
- actions=['emotion'],
94
- enforce_detection=False,
95
- silent=True
96
- )
97
- if isinstance(analysis, list) and len(analysis) > 0:
98
- dominant_emotion = analysis[0]['dominant_emotion']
99
- st.session_state.facial_emotion = dominant_emotion.capitalize()
100
- else:
101
- st.session_state.facial_emotion = "Unknown"
102
- except Exception as e:
103
- logging.error(f"DeepFace analysis failed: {e}")
104
- st.session_state.facial_emotion = "Error"
105
-
106
- st.session_state.last_emotion_time = frame.time
107
-
108
- return av.VideoFrame.from_ndarray(img, format="bgr24")
109
-
110
- if webrtc_ctx.video_processor:
111
- webrtc_ctx.video_processor.video_frame_callback = video_frame_callback
112
-
113
- # --- Audio Processing ---
114
- if "audio_buffer" not in st.session_state:
115
- st.session_state.audio_buffer = []
116
-
117
- def audio_frame_callback(frame: av.AudioFrame):
118
- sound = np.frombuffer(frame.to_ndarray(), dtype=np.int16)
119
- st.session_state.audio_buffer.append(sound)
120
-
121
- webrtc_streamer(
122
- key="affectlink-audio",
123
- mode=WebRtcMode.RECVONLY,
124
- media_stream_constraints={"video": False, "audio": True},
125
- audio_frame_callback=audio_frame_callback,
126
- async_processing=True,
127
- )
128
-
129
- # --- UI Layout ---
130
- st.sidebar.header("Facial Emotion")
131
- st.sidebar.metric("Current Expression", st.session_state.get('facial_emotion', 'N/A'))
132
- st.sidebar.info("Facial emotion is updated every 5 seconds to optimize performance.")
133
- st.sidebar.divider()
134
-
135
- st.sidebar.header("Audio Analysis")
136
- is_recording = st.sidebar.checkbox("Start Recording Audio")
137
-
138
- # NEW: Set up placeholders for audio results
139
- st.sidebar.subheader("Transcription:")
140
- transcription_placeholder = st.sidebar.empty()
141
- transcription_placeholder.write("_Waiting for audio..._")
142
-
143
- st.sidebar.subheader("Text Emotion:")
144
- text_emotion_placeholder = st.sidebar.empty()
145
- text_emotion_placeholder.write("_Waiting for audio..._")
146
-
147
- st.sidebar.subheader("Speech Emotion:")
148
- ser_placeholder = st.sidebar.empty()
149
- ser_placeholder.write("_Waiting for audio..._")
150
-
151
-
152
- if not is_recording and st.session_state.audio_buffer:
153
- # Combine audio chunks
154
- audio_data = np.concatenate(st.session_state.audio_buffer)
155
- st.session_state.audio_buffer = [] # Clear buffer
156
-
157
- # Save to a temporary file
158
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_audio_file:
159
- write_wav(tmp_audio_file.name, AUDIO_SAMPLE_RATE, audio_data)
160
-
161
- # 1. Speech-to-Text (Whisper)
162
- with st.spinner("Transcribing audio..."):
163
- try:
164
- result = whisper_model.transcribe(tmp_audio_file.name, fp16=False)
165
- transcribed_text = result['text']
166
- except Exception as e:
167
- transcribed_text = f"Transcription failed: {e}"
168
- transcription_placeholder.write(f'"{transcribed_text}"')
169
-
170
- # 2. Text-based Emotion
171
- with st.spinner("Analyzing text emotion..."):
172
- if transcribed_text:
173
- try:
174
- text_emotions = text_classifier(transcribed_text)[0]
175
- unified_text_scores = {e: 0.0 for e in UNIFIED_EMOTIONS}
176
- for EMO in text_emotions:
177
- unified_emo = TEXT_TO_UNIFIED.get(EMO['label'])
178
- if unified_emo:
179
- unified_text_scores[unified_emo] += EMO['score']
180
-
181
- dominant_text_emotion = max(unified_text_scores, key=unified_text_scores.get)
182
- except Exception as e:
183
- dominant_text_emotion = f"Text analysis failed: {e}"
184
- else:
185
- dominant_text_emotion = "No text to analyze."
186
- text_emotion_placeholder.write(dominant_text_emotion.capitalize())
187
-
188
- # 3. Speech Emotion Recognition (SER)
189
- with st.spinner("Analyzing speech emotion..."):
190
- try:
191
- audio_array, _ = sf.read(tmp_audio_file.name)
192
- inputs = ser_feature_extractor(audio_array, sampling_rate=AUDIO_SAMPLE_RATE, return_tensors="pt", padding=True)
193
- with torch.no_grad():
194
- logits = ser_model(**inputs).logits
195
- scores = torch.nn.functional.softmax(logits, dim=1).squeeze()
196
-
197
- unified_ser_scores = {e: 0.0 for e in UNIFIED_EMOTIONS}
198
- for i, score in enumerate(scores):
199
- raw_emo = ser_model.config.id2label[i]
200
- unified_emo = SER_TO_UNIFIED.get(raw_emo)
201
- if unified_emo:
202
- unified_ser_scores[unified_emo] += score.item()
203
-
204
- dominant_ser_emotion = max(unified_ser_scores, key=unified_ser_scores.get)
205
- except Exception as e:
206
- dominant_ser_emotion = f"Speech analysis failed: {e}"
207
- ser_placeholder.write(dominant_ser_emotion.capitalize())
208
-
209
- # Clean up the temporary file
210
- os.unlink(tmp_audio_file.name)
211
-
212
- elif is_recording:
213
  st.sidebar.warning("Recording audio... Uncheck to stop and process.")
 
1
+ import streamlit as st
2
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
3
+ import av
4
+ import numpy as np
5
+ import torch
6
+ import whisper
7
+ from transformers import pipeline, AutoModelForAudioClassification, AutoFeatureExtractor
8
+ from deepface import DeepFace
9
+ import logging
10
+ import queue
11
+ import soundfile as sf
12
+ from scipy.io.wavfile import write as write_wav
13
+ import os
14
+ import tempfile
15
+
16
+ # --- Page Configuration ---
17
+ st.set_page_config(
18
+ page_title="AffectLink Online Demo",
19
+ page_icon="😊",
20
+ layout="wide"
21
+ )
22
+
23
+ st.title("AffectLink: Real-time Emotion Recognition")
24
+ st.write("This demo analyzes your facial expressions in real-time and processes short audio clips for speech and text-based emotion.")
25
+
26
+ # --- Logger Configuration ---
27
+ # Suppress noisy logs from libraries
28
+ logging.basicConfig(level=logging.INFO)
29
+ logging.getLogger('deepface').setLevel(logging.ERROR)
30
+ logging.getLogger('huggingface_hub').setLevel(logging.WARNING)
31
+
32
+ # --- Emotion Mappings ---
33
+ UNIFIED_EMOTIONS = ['neutral', 'happy', 'sad', 'angry']
34
+ FACIAL_TO_UNIFIED = {
35
+ 'neutral': 'neutral', 'happy': 'happy', 'sad': 'sad', 'angry': 'angry',
36
+ 'fear': None, 'surprise': None, 'disgust': None
37
+ }
38
+ TEXT_TO_UNIFIED = {
39
+ 'neutral': 'neutral', 'joy': 'happy', 'sadness': 'sad', 'anger': 'angry',
40
+ 'fear': None, 'surprise': None, 'disgust': None
41
+ }
42
+ SER_TO_UNIFIED = {
43
+ 'neu': 'neutral', 'hap': 'happy', 'sad': 'sad', 'ang': 'angry'
44
+ }
45
+ AUDIO_SAMPLE_RATE = 16000
46
+
47
+ # --- Model Loading ---
48
+ # Use Streamlit's caching to load models only once.
49
+ @st.cache_resource
50
+ def load_models():
51
+ with st.spinner("Loading AI models... This may take a moment on first run."):
52
+ # Whisper for Speech-to-Text
53
+ whisper_model = whisper.load_model("base")
54
+
55
+ # Text Emotion Classifier
56
+ text_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None)
57
+
58
+ # Speech Emotion Recognition (SER)
59
+ ser_model_name = "superb/hubert-large-superb-er"
60
+ ser_feature_extractor = AutoFeatureExtractor.from_pretrained(ser_model_name)
61
+ ser_model = AutoModelForAudioClassification.from_pretrained(ser_model_name)
62
+
63
+ # NEW: Pre-load the DeepFace model to prevent lag on first use
64
+ DeepFace.build_model("Emotion")
65
+
66
+ return whisper_model, text_classifier, ser_model, ser_feature_extractor
67
+
68
+ whisper_model, text_classifier, ser_model, ser_feature_extractor = load_models()
69
+
70
+ # --- WebRTC and Video Processing ---
71
+ webrtc_ctx = webrtc_streamer(
72
+ key="affectlink-video",
73
+ mode=WebRtcMode.SENDRECV,
74
+ rtc_configuration=RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}),
75
+ media_stream_constraints={"video": True, "audio": False},
76
+ async_processing=True,
77
+ )
78
+
79
+ if 'facial_emotion' not in st.session_state:
80
+ st.session_state.facial_emotion = "Neutral"
81
+ if 'last_emotion_time' not in st.session_state:
82
+ st.session_state.last_emotion_time = 0
83
+
84
+ def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
85
+ img = frame.to_ndarray(format="bgr24")
86
+
87
+ # Process a frame every 5 seconds to conserve CPU
88
+ current_time = st.session_state.get('last_emotion_time', 0)
89
+ if torch.cuda.is_available() or (hasattr(st.session_state, 'last_emotion_time') and (torch.tensor(current_time).item() + 5 < torch.tensor(frame.time).item())):
90
+ try:
91
+ analysis = DeepFace.analyze(
92
+ img,
93
+ actions=['emotion'],
94
+ enforce_detection=False,
95
+ silent=True
96
+ )
97
+ if isinstance(analysis, list) and len(analysis) > 0:
98
+ dominant_emotion = analysis[0]['dominant_emotion']
99
+ st.session_state.facial_emotion = dominant_emotion.capitalize()
100
+ else:
101
+ st.session_state.facial_emotion = "Unknown"
102
+ except Exception as e:
103
+ logging.error(f"DeepFace analysis failed: {e}")
104
+ st.session_state.facial_emotion = "Error"
105
+
106
+ st.session_state.last_emotion_time = frame.time
107
+
108
+ return av.VideoFrame.from_ndarray(img, format="bgr24")
109
+
110
+ if webrtc_ctx.video_processor:
111
+ webrtc_ctx.video_processor.video_frame_callback = video_frame_callback
112
+
113
+ # --- Audio Processing ---
114
+ if "audio_buffer" not in st.session_state:
115
+ st.session_state.audio_buffer = []
116
+
117
+ def audio_frame_callback(frame: av.AudioFrame):
118
+ sound = np.frombuffer(frame.to_ndarray(), dtype=np.int16)
119
+ st.session_state.audio_buffer.append(sound)
120
+
121
+ webrtc_streamer(
122
+ key="affectlink-audio",
123
+ mode=WebRtcMode.RECVONLY,
124
+ media_stream_constraints={"video": False, "audio": True},
125
+ audio_frame_callback=audio_frame_callback,
126
+ async_processing=True,
127
+ )
128
+
129
+ # --- UI Layout ---
130
+ st.sidebar.header("Facial Emotion")
131
+ st.sidebar.metric("Current Expression", st.session_state.get('facial_emotion', 'N/A'))
132
+ st.sidebar.info("Facial emotion is updated every 5 seconds to optimize performance.")
133
+ st.sidebar.divider()
134
+
135
+ st.sidebar.header("Audio Analysis")
136
+ is_recording = st.sidebar.checkbox("Start Recording Audio")
137
+
138
+ # NEW: Set up placeholders for audio results
139
+ st.sidebar.subheader("Transcription:")
140
+ transcription_placeholder = st.sidebar.empty()
141
+ transcription_placeholder.write("_Waiting for audio..._")
142
+
143
+ st.sidebar.subheader("Text Emotion:")
144
+ text_emotion_placeholder = st.sidebar.empty()
145
+ text_emotion_placeholder.write("_Waiting for audio..._")
146
+
147
+ st.sidebar.subheader("Speech Emotion:")
148
+ ser_placeholder = st.sidebar.empty()
149
+ ser_placeholder.write("_Waiting for audio..._")
150
+
151
+
152
+ if not is_recording and st.session_state.audio_buffer:
153
+ # Combine audio chunks
154
+ audio_data = np.concatenate(st.session_state.audio_buffer)
155
+ st.session_state.audio_buffer = [] # Clear buffer
156
+
157
+ # Save to a temporary file
158
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_audio_file:
159
+ write_wav(tmp_audio_file.name, AUDIO_SAMPLE_RATE, audio_data)
160
+
161
+ # 1. Speech-to-Text (Whisper)
162
+ with st.spinner("Transcribing audio..."):
163
+ try:
164
+ result = whisper_model.transcribe(tmp_audio_file.name, fp16=False)
165
+ transcribed_text = result['text']
166
+ except Exception as e:
167
+ transcribed_text = f"Transcription failed: {e}"
168
+ transcription_placeholder.write(f'"{transcribed_text}"')
169
+
170
+ # 2. Text-based Emotion
171
+ with st.spinner("Analyzing text emotion..."):
172
+ if transcribed_text:
173
+ try:
174
+ text_emotions = text_classifier(transcribed_text)[0]
175
+ unified_text_scores = {e: 0.0 for e in UNIFIED_EMOTIONS}
176
+ for EMO in text_emotions:
177
+ unified_emo = TEXT_TO_UNIFIED.get(EMO['label'])
178
+ if unified_emo:
179
+ unified_text_scores[unified_emo] += EMO['score']
180
+
181
+ dominant_text_emotion = max(unified_text_scores, key=unified_text_scores.get)
182
+ except Exception as e:
183
+ dominant_text_emotion = f"Text analysis failed: {e}"
184
+ else:
185
+ dominant_text_emotion = "No text to analyze."
186
+ text_emotion_placeholder.write(dominant_text_emotion.capitalize())
187
+
188
+ # 3. Speech Emotion Recognition (SER)
189
+ with st.spinner("Analyzing speech emotion..."):
190
+ try:
191
+ audio_array, _ = sf.read(tmp_audio_file.name)
192
+ inputs = ser_feature_extractor(audio_array, sampling_rate=AUDIO_SAMPLE_RATE, return_tensors="pt", padding=True)
193
+ with torch.no_grad():
194
+ logits = ser_model(**inputs).logits
195
+ scores = torch.nn.functional.softmax(logits, dim=1).squeeze()
196
+
197
+ unified_ser_scores = {e: 0.0 for e in UNIFIED_EMOTIONS}
198
+ for i, score in enumerate(scores):
199
+ raw_emo = ser_model.config.id2label[i]
200
+ unified_emo = SER_TO_UNIFIED.get(raw_emo)
201
+ if unified_emo:
202
+ unified_ser_scores[unified_emo] += score.item()
203
+
204
+ dominant_ser_emotion = max(unified_ser_scores, key=unified_ser_scores.get)
205
+ except Exception as e:
206
+ dominant_ser_emotion = f"Speech analysis failed: {e}"
207
+ ser_placeholder.write(dominant_ser_emotion.capitalize())
208
+
209
+ # Clean up the temporary file
210
+ os.unlink(tmp_audio_file.name)
211
+
212
+ elif is_recording:
213
  st.sidebar.warning("Recording audio... Uncheck to stop and process.")