daikooo's picture
Update app.py
f218f26 verified
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
from collections import defaultdict
# Setup
mp_face_mesh = mp.solutions.face_mesh
lip_connections = list(mp_face_mesh.FACEMESH_LIPS)
lip_graph = defaultdict(set)
for start, end in lip_connections:
lip_graph[start].add(end)
lip_graph[end].add(start)
palette = [
(0, 13, 200), # Red
(147, 20, 200), # Pink
(13, 128, 200), # Orange
(200, 13, 127), # Purple-pink
(13, 200, 127), # Greenish
]
color_index = 0
prev_mouth_open = False
# Trace loop
def trace_contour(start_idx, visited):
contour = [start_idx]
current = start_idx
while True:
neighbors = lip_graph[current] - visited
if not neighbors:
break
next_point = neighbors.pop()
visited.add(next_point)
contour.append(next_point)
current = next_point
if next_point == start_idx:
break
return contour
# Get contours
def find_lip_contours():
visited = set()
contours = []
for point in lip_graph:
if point not in visited:
visited.add(point)
contour = trace_contour(point, visited)
if len(contour) > 5:
contours.append(contour)
contours = sorted(contours, key=len, reverse=True)
return contours[:2]
# Gradio processing function
def process(frame):
global color_index, prev_mouth_open
image = frame.copy()
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
h, w, _ = image.shape
with mp_face_mesh.FaceMesh(
static_image_mode=False,
max_num_faces=1,
refine_landmarks=True,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
) as face_mesh:
results = face_mesh.process(image_rgb)
if results.multi_face_landmarks:
outer_lip, inner_lip = find_lip_contours()
for face_landmarks in results.multi_face_landmarks:
def get_points(indices):
return [(int(face_landmarks.landmark[i].x * w),
int(face_landmarks.landmark[i].y * h)) for i in indices]
outer_pts = get_points(outer_lip)
inner_pts = get_points(inner_lip)
# Lip mask
lip_mask = np.zeros(image.shape[:2], dtype=np.uint8)
cv2.fillPoly(lip_mask, [np.array(outer_pts, dtype=np.int32)], 255)
cv2.fillPoly(lip_mask, [np.array(inner_pts, dtype=np.int32)], 0)
# Mouth open detection
top_lip = face_landmarks.landmark[13]
bottom_lip = face_landmarks.landmark[14]
mouth_open = abs(top_lip.y - bottom_lip.y) > 0.03
if mouth_open != prev_mouth_open and mouth_open:
color_index = (color_index + 1) % len(palette)
prev_mouth_open = mouth_open
color = palette[color_index]
colored_lips = np.zeros_like(image)
colored_lips[:] = color
colored_lips = cv2.bitwise_and(colored_lips, colored_lips, mask=lip_mask)
image = cv2.addWeighted(image, 1.0, colored_lips, 0.4, 0)
return cv2.flip(image, 1)
# Assuming 'process' is your function that processes the image
demo = gr.Interface(
fn=process,
inputs=gr.Image(source="webcam", streaming=True),
outputs=gr.Image(),
live=True,
title="πŸ’„ Real-time Lipstick Try-On",
description="Open your mouth to switch lip colors."
)
if __name__ == "__main__":
demo.launch()