Kevin King commited on
Commit
764dc1d
·
1 Parent(s): 650fd5d

REFAC: Remove unused streamlit app files and clean up imports in streamlit_app.py

Browse files
src/streamlit_app.py CHANGED
@@ -11,7 +11,7 @@ import tempfile
11
  import cv2
12
  from moviepy.editor import VideoFileClip
13
  import time
14
- import shutil # Import the shutil library for file copying
15
 
16
  # --- Create a cross-platform, writable cache directory for all libraries ---
17
  CACHE_DIR = os.path.join(tempfile.gettempdir(), "affectlink_cache")
@@ -19,7 +19,6 @@ os.makedirs(CACHE_DIR, exist_ok=True)
19
  os.environ['DEEPFACE_HOME'] = CACHE_DIR
20
  os.environ['HF_HOME'] = CACHE_DIR
21
 
22
- # === THIS IS THE NEW CODE TO PRELOAD THE DEEPFACE MODEL ===
23
  # Define paths for the pre-included model weights
24
  MODEL_NAME = "facial_expression_model_weights.h5"
25
  SOURCE_PATH = os.path.join("src", "weights", MODEL_NAME)
@@ -37,7 +36,6 @@ if not os.path.exists(DEST_PATH):
37
  print(f"Warning: Local model file not found at {SOURCE_PATH}. App will attempt to download it.")
38
  except Exception as e:
39
  print(f"Error copying model file: {e}")
40
- # =========================================================
41
 
42
  # --- Page Configuration ---
43
  st.set_page_config(page_title="AffectLink Demo", page_icon="😊", layout="wide")
@@ -45,8 +43,6 @@ st.title("AffectLink: Post-Hoc Emotion Analysis")
45
  st.write("Upload a short video clip (under 30 seconds) to analyze facial expressions, speech-to-text, and the emotional tone of the audio.")
46
 
47
  # --- Logger Configuration ---
48
- # [The rest of your code remains the same]
49
- # [I have included the full script below for clarity]
50
 
51
  logging.basicConfig(level=logging.INFO)
52
  logging.getLogger('deepface').setLevel(logging.ERROR)
 
11
  import cv2
12
  from moviepy.editor import VideoFileClip
13
  import time
14
+ import shutil
15
 
16
  # --- Create a cross-platform, writable cache directory for all libraries ---
17
  CACHE_DIR = os.path.join(tempfile.gettempdir(), "affectlink_cache")
 
19
  os.environ['DEEPFACE_HOME'] = CACHE_DIR
20
  os.environ['HF_HOME'] = CACHE_DIR
21
 
 
22
  # Define paths for the pre-included model weights
23
  MODEL_NAME = "facial_expression_model_weights.h5"
24
  SOURCE_PATH = os.path.join("src", "weights", MODEL_NAME)
 
36
  print(f"Warning: Local model file not found at {SOURCE_PATH}. App will attempt to download it.")
37
  except Exception as e:
38
  print(f"Error copying model file: {e}")
 
39
 
40
  # --- Page Configuration ---
41
  st.set_page_config(page_title="AffectLink Demo", page_icon="😊", layout="wide")
 
43
  st.write("Upload a short video clip (under 30 seconds) to analyze facial expressions, speech-to-text, and the emotional tone of the audio.")
44
 
45
  # --- Logger Configuration ---
 
 
46
 
47
  logging.basicConfig(level=logging.INFO)
48
  logging.getLogger('deepface').setLevel(logging.ERROR)
src/streamlit_app_full.py DELETED
@@ -1,178 +0,0 @@
1
- import os
2
- import streamlit as st
3
-
4
- # Set home directories for model caching to the writable /tmp folder
5
- os.environ['DEEPFACE_HOME'] = '/tmp/.deepface'
6
- os.environ['HF_HOME'] = '/tmp/huggingface'
7
-
8
- import numpy as np
9
- import torch
10
- import whisper
11
- from transformers import pipeline, AutoModelForAudioClassification, AutoFeatureExtractor
12
- from deepface import DeepFace
13
- import logging
14
- import soundfile as sf
15
- from scipy.io.wavfile import write as write_wav
16
- import tempfile
17
- from PIL import Image
18
- import cv2
19
- from moviepy.editor import VideoFileClip
20
-
21
- # Set home directories for model caching inside the app's writable directory
22
- os.environ['DEEPFACE_HOME'] = '/tmp/.deepface'
23
- os.environ['HF_HOME'] = '/tmp/huggingface'
24
-
25
- # --- Page Configuration ---
26
- st.set_page_config(
27
- page_title="AffectLink Batch Demo",
28
- page_icon="😊",
29
- layout="wide"
30
- )
31
-
32
- st.title("AffectLink: Post-Hoc Emotion Analysis")
33
- st.write("Upload a short video clip to analyze facial expressions, speech-to-text, and the emotional tone of the audio.")
34
-
35
- # --- Logger Configuration ---
36
- logging.basicConfig(level=logging.INFO)
37
- logging.getLogger('deepface').setLevel(logging.ERROR)
38
- logging.getLogger('huggingface_hub').setLevel(logging.WARNING)
39
- logging.getLogger('moviepy').setLevel(logging.ERROR)
40
-
41
-
42
- # --- Emotion Mappings ---
43
- UNIFIED_EMOTIONS = ['neutral', 'happy', 'sad', 'angry']
44
- TEXT_TO_UNIFIED = {
45
- 'neutral': 'neutral', 'joy': 'happy', 'sadness': 'sad', 'anger': 'angry',
46
- 'fear': None, 'surprise': None, 'disgust': None
47
- }
48
- SER_TO_UNIFIED = {
49
- 'neu': 'neutral', 'hap': 'happy', 'sad': 'sad', 'ang': 'angry'
50
- }
51
- AUDIO_SAMPLE_RATE = 16000
52
-
53
- # --- Model Loading ---
54
- @st.cache_resource
55
- def load_models():
56
- with st.spinner("Loading AI models, this may take a moment..."):
57
- whisper_model = whisper.load_model("base", download_root="/tmp/whisper_cache")
58
- text_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", top_k=None)
59
- ser_model_name = "superb/hubert-large-superb-er"
60
- ser_feature_extractor = AutoFeatureExtractor.from_pretrained(ser_model_name)
61
- ser_model = AutoModelForAudioClassification.from_pretrained(ser_model_name)
62
- return whisper_model, text_classifier, ser_model, ser_feature_extractor
63
-
64
- whisper_model, text_classifier, ser_model, ser_feature_extractor = load_models()
65
-
66
-
67
- # --- UI and Processing Logic ---
68
- uploaded_file = st.file_uploader("Choose a video file...", type=["mp4", "mov", "avi"])
69
-
70
- if uploaded_file is not None:
71
- # Save the uploaded file to a temporary location
72
- with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tfile:
73
- tfile.write(uploaded_file.read())
74
- temp_video_path = tfile.name
75
-
76
- st.video(temp_video_path)
77
-
78
- if st.button("Analyze Video"):
79
- facial_analysis_results = []
80
- audio_analysis_results = {}
81
-
82
- # --- Video Processing for Facial Emotion ---
83
- with st.spinner("Analyzing video for facial expressions..."):
84
- try:
85
- cap = cv2.VideoCapture(temp_video_path)
86
- fps = cap.get(cv2.CAP_PROP_FPS)
87
- frame_count = 0
88
- while cap.isOpened():
89
- ret, frame = cap.read()
90
- if not ret:
91
- break
92
-
93
- # Process one frame per second
94
- if frame_count % int(fps) == 0:
95
- timestamp = frame_count / fps
96
- analysis = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False, silent=True)
97
- if isinstance(analysis, list) and len(analysis) > 0:
98
- dominant_emotion = analysis[0]['dominant_emotion']
99
- facial_analysis_results.append((timestamp, dominant_emotion.capitalize()))
100
-
101
- frame_count += 1
102
- cap.release()
103
- except Exception as e:
104
- st.error(f"An error occurred during facial analysis: {e}")
105
-
106
-
107
- # --- Audio Extraction and Processing ---
108
- with st.spinner("Extracting and analyzing audio..."):
109
- try:
110
- # Extract audio using moviepy
111
- video_clip = VideoFileClip(temp_video_path)
112
- with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as taudio:
113
- video_clip.audio.write_audiofile(taudio.name, fps=AUDIO_SAMPLE_RATE, logger=None)
114
- temp_audio_path = taudio.name
115
-
116
- # 1. Speech-to-Text (Whisper)
117
- result = whisper_model.transcribe(temp_audio_path, fp16=False)
118
- transcribed_text = result['text']
119
- audio_analysis_results['Transcription'] = transcribed_text
120
-
121
- # 2. Text-based Emotion
122
- if transcribed_text:
123
- text_emotions = text_classifier(transcribed_text)[0]
124
- unified_text_scores = {e: 0.0 for e in UNIFIED_EMOTIONS}
125
- for emo in text_emotions:
126
- unified_emo = TEXT_TO_UNIFIED.get(emo['label'])
127
- if unified_emo:
128
- unified_text_scores[unified_emo] += emo['score']
129
- dominant_text_emotion = max(unified_text_scores, key=unified_text_scores.get)
130
- audio_analysis_results['Text Emotion'] = dominant_text_emotion.capitalize()
131
-
132
- # 3. Speech Emotion Recognition (SER)
133
- audio_array, _ = sf.read(temp_audio_path)
134
- inputs = ser_feature_extractor(audio_array, sampling_rate=AUDIO_SAMPLE_RATE, return_tensors="pt", padding=True)
135
- with torch.no_grad():
136
- logits = ser_model(**inputs).logits
137
- scores = torch.nn.functional.softmax(logits, dim=1).squeeze()
138
- unified_ser_scores = {e: 0.0 for e in UNIFIED_EMOTIONS}
139
- for i, score in enumerate(scores):
140
- raw_emo = ser_model.config.id2label[i]
141
- unified_emo = SER_TO_UNIFIED.get(raw_emo)
142
- if unified_emo:
143
- unified_ser_scores[unified_emo] += score.item()
144
- dominant_ser_emotion = max(unified_ser_scores, key=unified_ser_scores.get)
145
- audio_analysis_results['Speech Emotion'] = dominant_ser_emotion.capitalize()
146
-
147
- # Clean up temp audio file
148
- os.unlink(temp_audio_path)
149
-
150
- except Exception as e:
151
- st.error(f"An error occurred during audio analysis: {e}")
152
- finally:
153
- video_clip.close()
154
-
155
-
156
- # --- Display Results ---
157
- st.header("Analysis Results")
158
- col1, col2 = st.columns(2)
159
-
160
- with col1:
161
- st.subheader("Audio Analysis")
162
- if audio_analysis_results:
163
- st.write(f"**Transcription:** \"{audio_analysis_results.get('Transcription', 'N/A')}\"")
164
- st.metric("Emotion from Text", audio_analysis_results.get('Text Emotion', 'N/A'))
165
- st.metric("Emotion from Speech", audio_analysis_results.get('Speech Emotion', 'N/A'))
166
- else:
167
- st.write("No audio results to display.")
168
-
169
- with col2:
170
- st.subheader("Facial Expression Timeline")
171
- if facial_analysis_results:
172
- for timestamp, emotion in facial_analysis_results:
173
- st.write(f"**Time {int(timestamp // 60):02d}:{int(timestamp % 60):02d}:** {emotion}")
174
- else:
175
- st.write("No faces detected or video processing failed.")
176
-
177
- # Clean up temp video file
178
- os.unlink(temp_video_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/streamlit_app_imageFER.py DELETED
@@ -1,56 +0,0 @@
1
- import os
2
- import streamlit as st
3
-
4
- # Point the cache directory to the guaranteed writable /tmp folder
5
- os.environ['DEEPFACE_HOME'] = '/tmp/.deepface'
6
-
7
- from PIL import Image
8
- import numpy as np
9
- from deepface import DeepFace
10
- import logging
11
- import cv2
12
-
13
- # --- Page Configuration ---
14
- st.set_page_config(
15
- page_title="FER Test",
16
- page_icon="😀",
17
- layout="centered"
18
- )
19
-
20
- st.title("Step 1: Facial Emotion Recognition (FER) Test")
21
- st.write("Upload an image with a face to test the DeepFace library.")
22
-
23
- # --- Logger Configuration ---
24
- logging.basicConfig(level=logging.INFO)
25
- logging.getLogger('deepface').setLevel(logging.ERROR)
26
-
27
-
28
- # --- UI and Processing Logic ---
29
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
30
-
31
- if uploaded_file is not None:
32
- pil_image = Image.open(uploaded_file)
33
- numpy_image = np.array(pil_image)
34
- image_bgr = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
35
-
36
- st.image(pil_image, caption="Image Uploaded", use_column_width=True)
37
-
38
- with st.spinner("Analyzing image for emotion..."):
39
- try:
40
- # This will now download its models to /tmp/.deepface
41
- analysis = DeepFace.analyze(
42
- img_path=image_bgr,
43
- actions=['emotion'],
44
- enforce_detection=False,
45
- silent=True
46
- )
47
-
48
- if isinstance(analysis, list) and len(analysis) > 0:
49
- dominant_emotion = analysis[0]['dominant_emotion']
50
- st.success(f"Dominant Emotion Detected: **{dominant_emotion.capitalize()}**")
51
- st.write(analysis[0]['emotion'])
52
- else:
53
- st.warning("No face detected in the image.")
54
-
55
- except Exception as e:
56
- st.error(f"An error occurred during analysis: {e}")