Spaces:
Runtime error
Runtime error
Delete face_emo_analysize.py
Browse files- face_emo_analysize.py +0 -283
face_emo_analysize.py
DELETED
|
@@ -1,283 +0,0 @@
|
|
| 1 |
-
import cv2
|
| 2 |
-
import mediapipe as mp
|
| 3 |
-
import math
|
| 4 |
-
import numpy as np
|
| 5 |
-
import time
|
| 6 |
-
import torch
|
| 7 |
-
from PIL import Image
|
| 8 |
-
from torchvision import transforms
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
# 定义预处理函数
|
| 12 |
-
def pth_processing(fp):
|
| 13 |
-
class PreprocessInput(torch.nn.Module):
|
| 14 |
-
def __init__(self):
|
| 15 |
-
super(PreprocessInput, self).__init__()
|
| 16 |
-
|
| 17 |
-
def forward(self, x):
|
| 18 |
-
x = x.to(torch.float32)
|
| 19 |
-
x = torch.flip(x, dims=(0,))
|
| 20 |
-
x[0, :, :] -= 91.4953
|
| 21 |
-
x[1, :, :] -= 103.8827
|
| 22 |
-
x[2, :, :] -= 131.0912
|
| 23 |
-
return x
|
| 24 |
-
|
| 25 |
-
def get_img_torch(img):
|
| 26 |
-
ttransform = transforms.Compose([
|
| 27 |
-
transforms.PILToTensor(),
|
| 28 |
-
PreprocessInput()
|
| 29 |
-
])
|
| 30 |
-
img = img.resize((224, 224), Image.Resampling.NEAREST)
|
| 31 |
-
img = ttransform(img)
|
| 32 |
-
img = torch.unsqueeze(img, 0).to('cuda')
|
| 33 |
-
return img
|
| 34 |
-
|
| 35 |
-
return get_img_torch(fp)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
# 定义坐标归一化函数
|
| 39 |
-
def norm_coordinates(normalized_x, normalized_y, image_width, image_height):
|
| 40 |
-
x_px = min(math.floor(normalized_x * image_width), image_width - 1)
|
| 41 |
-
y_px = min(math.floor(normalized_y * image_height), image_height - 1)
|
| 42 |
-
return x_px, y_px
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
# 定义获取面部边界框的函数
|
| 46 |
-
def get_box(fl, w, h):
|
| 47 |
-
idx_to_coors = {}
|
| 48 |
-
for idx, landmark in enumerate(fl.landmark):
|
| 49 |
-
landmark_px = norm_coordinates(landmark.x, landmark.y, w, h)
|
| 50 |
-
if landmark_px:
|
| 51 |
-
idx_to_coors[idx] = landmark_px
|
| 52 |
-
|
| 53 |
-
x_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 0])
|
| 54 |
-
y_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 1])
|
| 55 |
-
endX = np.max(np.asarray(list(idx_to_coors.values()))[:, 0])
|
| 56 |
-
endY = np.max(np.asarray(list(idx_to_coors.values()))[:, 1])
|
| 57 |
-
|
| 58 |
-
(startX, startY) = (max(0, x_min), max(0, y_min))
|
| 59 |
-
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
|
| 60 |
-
return startX, startY, endX, endY
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# 定义显示情感预测结果的函数
|
| 64 |
-
def display_EMO_PRED(img, box, label='', prob=0.0, color=(128, 128, 128), txt_color=(255, 255, 255), line_width=2):
|
| 65 |
-
lw = line_width or max(round(sum(img.shape) / 2 * 0.003), 2)
|
| 66 |
-
text2_color = (255, 0, 255)
|
| 67 |
-
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
| 68 |
-
cv2.rectangle(img, p1, p2, text2_color, thickness=lw, lineType=cv2.LINE_AA)
|
| 69 |
-
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 70 |
-
|
| 71 |
-
tf = max(lw - 1, 1)
|
| 72 |
-
text_fond = (0, 0, 0)
|
| 73 |
-
|
| 74 |
-
# 获取情感标签的文本尺寸
|
| 75 |
-
label_width, label_height = cv2.getTextSize(label, font, lw / 3, tf)[0]
|
| 76 |
-
|
| 77 |
-
# 显示情感标签
|
| 78 |
-
cv2.putText(img, label,
|
| 79 |
-
(p1[0], p1[1] - round(((p2[0] - p1[0]) * 20) / 360)), font,
|
| 80 |
-
lw / 3, text_fond, thickness=tf, lineType=cv2.LINE_AA)
|
| 81 |
-
cv2.putText(img, label,
|
| 82 |
-
(p1[0], p1[1] - round(((p2[0] - p1[0]) * 20) / 360)), font,
|
| 83 |
-
lw / 3, text2_color, thickness=tf, lineType=cv2.LINE_AA)
|
| 84 |
-
|
| 85 |
-
# 显示情感概率
|
| 86 |
-
prob_text = f"{prob:.2f}"
|
| 87 |
-
prob_width, prob_height = cv2.getTextSize(prob_text, font, lw / 3, tf)[0]
|
| 88 |
-
cv2.putText(img, prob_text,
|
| 89 |
-
(p1[0] + label_width + 5, p1[1] - round(((p2[0] - p1[0]) * 20) / 360)), font,
|
| 90 |
-
lw / 3, text_fond, thickness=tf, lineType=cv2.LINE_AA)
|
| 91 |
-
cv2.putText(img, prob_text,
|
| 92 |
-
(p1[0] + label_width + 5, p1[1] - round(((p2[0] - p1[0]) * 20) / 360)), font,
|
| 93 |
-
lw / 3, text2_color, thickness=tf, lineType=cv2.LINE_AA)
|
| 94 |
-
|
| 95 |
-
return img
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
# 定义显示FPS的函数
|
| 99 |
-
def display_FPS(img, text, margin=1.0, box_scale=1.0):
|
| 100 |
-
img_h, img_w, _ = img.shape
|
| 101 |
-
line_width = int(min(img_h, img_w) * 0.001) # line width
|
| 102 |
-
thickness = max(int(line_width / 3), 1) # font thickness
|
| 103 |
-
|
| 104 |
-
font_face = cv2.FONT_HERSHEY_SIMPLEX
|
| 105 |
-
font_color = (0, 0, 0)
|
| 106 |
-
font_scale = thickness / 1.5
|
| 107 |
-
|
| 108 |
-
t_w, t_h = cv2.getTextSize(text, font_face, font_scale, None)[0]
|
| 109 |
-
|
| 110 |
-
margin_n = int(t_h * margin)
|
| 111 |
-
sub_img = img[0 + margin_n: 0 + margin_n + t_h + int(2 * t_h * box_scale),
|
| 112 |
-
img_w - t_w - margin_n - int(2 * t_h * box_scale): img_w - margin_n]
|
| 113 |
-
|
| 114 |
-
white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255
|
| 115 |
-
|
| 116 |
-
img[0 + margin_n: 0 + margin_n + t_h + int(2 * t_h * box_scale),
|
| 117 |
-
img_w - t_w - margin_n - int(2 * t_h * box_scale):img_w - margin_n] = cv2.addWeighted(sub_img, 0.5, white_rect, .5,
|
| 118 |
-
1.0)
|
| 119 |
-
|
| 120 |
-
cv2.putText(img=img,
|
| 121 |
-
text=text,
|
| 122 |
-
org=(img_w - t_w - margin_n - int(2 * t_h * box_scale) // 2,
|
| 123 |
-
0 + margin_n + t_h + int(2 * t_h * box_scale) // 2),
|
| 124 |
-
fontFace=font_face,
|
| 125 |
-
fontScale=font_scale,
|
| 126 |
-
color=font_color,
|
| 127 |
-
thickness=thickness,
|
| 128 |
-
lineType=cv2.LINE_AA,
|
| 129 |
-
bottomLeftOrigin=False)
|
| 130 |
-
|
| 131 |
-
return img
|
| 132 |
-
|
| 133 |
-
def face_emo_analysize():
|
| 134 |
-
# 初始化MediaPipe Face Mesh
|
| 135 |
-
mp_face_mesh = mp.solutions.face_mesh
|
| 136 |
-
|
| 137 |
-
# 加载PyTorch模型
|
| 138 |
-
name = '0_66_49_wo_gl'
|
| 139 |
-
pth_model = torch.jit.load('torchscript_model_0_66_49_wo_gl.pth'.format(name)).to(
|
| 140 |
-
'cuda')
|
| 141 |
-
pth_model.eval()
|
| 142 |
-
|
| 143 |
-
# 定义情感字典
|
| 144 |
-
DICT_EMO = {0: 'Neutral', 1: 'Happiness', 2: 'Sadness', 3: 'Surprise', 4: 'Fear', 5: 'Disgust', 6: 'Anger'}
|
| 145 |
-
|
| 146 |
-
# 打开摄像头
|
| 147 |
-
cap = cv2.VideoCapture(0)
|
| 148 |
-
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 149 |
-
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 150 |
-
fps = np.round(cap.get(cv2.CAP_PROP_FPS))
|
| 151 |
-
|
| 152 |
-
# 设置视频写入器
|
| 153 |
-
path_save_video = 'result2.mp4'
|
| 154 |
-
vid_writer = cv2.VideoWriter(path_save_video, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
| 155 |
-
|
| 156 |
-
# 使用MediaPipe Face Mesh进行面部检测
|
| 157 |
-
emotion_stats = {}
|
| 158 |
-
with mp_face_mesh.FaceMesh(
|
| 159 |
-
max_num_faces=1,
|
| 160 |
-
refine_landmarks=False,
|
| 161 |
-
min_detection_confidence=0.5,
|
| 162 |
-
min_tracking_confidence=0.5) as face_mesh:
|
| 163 |
-
while cap.isOpened():
|
| 164 |
-
t1 = time.time()
|
| 165 |
-
success, frame = cap.read()
|
| 166 |
-
if frame is None: break
|
| 167 |
-
|
| 168 |
-
frame_copy = frame.copy()
|
| 169 |
-
frame_copy.flags.writeable = False
|
| 170 |
-
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
|
| 171 |
-
results = face_mesh.process(frame_copy)
|
| 172 |
-
frame_copy.flags.writeable = True
|
| 173 |
-
|
| 174 |
-
if results.multi_face_landmarks:
|
| 175 |
-
for fl in results.multi_face_landmarks:
|
| 176 |
-
startX, startY, endX, endY = get_box(fl, w, h)
|
| 177 |
-
cur_face = frame_copy[startY:endY, startX: endX]
|
| 178 |
-
|
| 179 |
-
# 使用PyTorch模型进行情感预测
|
| 180 |
-
cur_face = pth_processing(Image.fromarray(cur_face))
|
| 181 |
-
output = torch.nn.functional.softmax(pth_model(cur_face), dim=1).cpu().detach().numpy()[0]
|
| 182 |
-
|
| 183 |
-
# 获取情感类别和概率
|
| 184 |
-
cl = np.argmax(output)
|
| 185 |
-
label = DICT_EMO[cl]
|
| 186 |
-
prob = output[cl]
|
| 187 |
-
|
| 188 |
-
# 记录情感统计信息
|
| 189 |
-
if label not in emotion_stats:
|
| 190 |
-
emotion_stats[label] = {'start_time': t1, 'duration': 0, 'total_prob': prob, 'count': 1}
|
| 191 |
-
else:
|
| 192 |
-
emotion_stats[label]['duration'] += (t1 - emotion_stats[label]['start_time'])
|
| 193 |
-
emotion_stats[label]['total_prob'] += prob
|
| 194 |
-
emotion_stats[label]['count'] += 1
|
| 195 |
-
emotion_stats[label]['start_time'] = t1
|
| 196 |
-
|
| 197 |
-
# 显示情感结果和概率
|
| 198 |
-
frame = display_EMO_PRED(frame, (startX, startY, endX, endY), label, prob, line_width=3)
|
| 199 |
-
|
| 200 |
-
t2 = time.time()
|
| 201 |
-
|
| 202 |
-
# 显示FPS
|
| 203 |
-
frame = display_FPS(frame, 'FPS: {0:.1f}'.format(1 / (t2 - t1)), box_scale=.5)
|
| 204 |
-
|
| 205 |
-
# 写入视频
|
| 206 |
-
vid_writer.write(frame)
|
| 207 |
-
|
| 208 |
-
# 显示帧
|
| 209 |
-
cv2.imshow('Webcam', frame)
|
| 210 |
-
if cv2.waitKey(1) & 0xFF == ord('\x1b'):
|
| 211 |
-
break
|
| 212 |
-
|
| 213 |
-
# 释放资源
|
| 214 |
-
vid_writer.release()
|
| 215 |
-
cap.release()
|
| 216 |
-
cv2.destroyAllWindows()
|
| 217 |
-
|
| 218 |
-
# 打印情感统计信息
|
| 219 |
-
for emotion, stats in emotion_stats.items():
|
| 220 |
-
avg_prob = stats['total_prob'] / stats['count']
|
| 221 |
-
print(f'Emotion: {emotion}, Duration: {stats["duration"]:.2f} seconds, Average Probability: {avg_prob:.2f}')
|
| 222 |
-
|
| 223 |
-
# 将视频转换为GIF
|
| 224 |
-
from moviepy.editor import VideoFileClip
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
def convert_mp4_to_gif(input_path, output_path, fps=10):
|
| 228 |
-
clip = VideoFileClip(input_path)
|
| 229 |
-
clip.write_gif(output_path, fps=fps)
|
| 230 |
-
#此时我们获得了各表情的持续时间与平均概率,我们可以计算大小,如果负向情绪大于正向情绪那么情感就是负的,再计算平均值即可.
|
| 231 |
-
positive_emotions = ['Happiness', 'Surprise']
|
| 232 |
-
negative_emotions = ['Anger', 'Fear', 'Sadness', 'Disgust']
|
| 233 |
-
|
| 234 |
-
# 初始化正向和负向情感的统计信息
|
| 235 |
-
positive_stats = {'duration': 0, 'total_prob': 0, 'count': 0}
|
| 236 |
-
negative_stats = {'duration': 0, 'total_prob': 0, 'count': 0}
|
| 237 |
-
|
| 238 |
-
# 统计正向和负向情感的持续时间和概率
|
| 239 |
-
for emotion, stats in emotion_stats.items():
|
| 240 |
-
if emotion in positive_emotions:
|
| 241 |
-
positive_stats['duration'] += stats['duration']
|
| 242 |
-
positive_stats['total_prob'] += stats['total_prob']
|
| 243 |
-
positive_stats['count'] += stats['count']
|
| 244 |
-
elif emotion in negative_emotions:
|
| 245 |
-
negative_stats['duration'] += stats['duration']
|
| 246 |
-
negative_stats['total_prob'] += stats['total_prob']
|
| 247 |
-
negative_stats['count'] += stats['count']
|
| 248 |
-
|
| 249 |
-
# 计算正向和负向情感的平均概率
|
| 250 |
-
if positive_stats['count'] > 0:
|
| 251 |
-
positive_avg_prob = positive_stats['total_prob'] / positive_stats['count']
|
| 252 |
-
else:
|
| 253 |
-
positive_avg_prob = 0
|
| 254 |
-
|
| 255 |
-
if negative_stats['count'] > 0:
|
| 256 |
-
negative_avg_prob = negative_stats['total_prob'] / negative_stats['count']
|
| 257 |
-
else:
|
| 258 |
-
negative_avg_prob = 0
|
| 259 |
-
|
| 260 |
-
# 比较正向和负向情感的持续时间
|
| 261 |
-
if negative_stats['duration'] > positive_stats['duration']:
|
| 262 |
-
print(f'负向情感持续时间更长: {negative_stats["duration"]:.2f} seconds')
|
| 263 |
-
print(f'负向情感的平均概率: {negative_avg_prob:.2f}')
|
| 264 |
-
outcome = "负向,概率:"+str(negative_avg_prob)
|
| 265 |
-
return outcome
|
| 266 |
-
else:
|
| 267 |
-
print(f'正向情感持续时间更长: {positive_stats["duration"]:.2f} seconds')
|
| 268 |
-
print(f'正向情感的平均概率: {positive_avg_prob:.2f}')
|
| 269 |
-
outcome = "正向,概率:"+str(positive_avg_prob)
|
| 270 |
-
return outcome
|
| 271 |
-
# 将视频转换为GIF
|
| 272 |
-
from moviepy.editor import VideoFileClip
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
def convert_mp4_to_gif(input_path, output_path, fps=10):
|
| 276 |
-
clip = VideoFileClip(input_path)
|
| 277 |
-
clip.write_gif(output_path, fps=fps)
|
| 278 |
-
|
| 279 |
-
# 示例使用
|
| 280 |
-
input_video_path = "result.mp4"
|
| 281 |
-
output_gif_path = "result.gif"
|
| 282 |
-
|
| 283 |
-
convert_mp4_to_gif(input_video_path, output_gif_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|