Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastai.vision.all import *
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import pathlib
|
| 4 |
+
import cv2
|
| 5 |
+
import dlib
|
| 6 |
+
import numpy as np
|
| 7 |
+
import mediapipe as mp
|
| 8 |
+
from imutils import face_utils
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import time
|
| 11 |
+
from dotenv import load_dotenv
|
| 12 |
+
import os
|
| 13 |
+
|
| 14 |
+
load_dotenv()
|
| 15 |
+
|
| 16 |
+
colorDict = eval(os.getenv('color-dict'))
|
| 17 |
+
makeup_recommendations = eval(os.getenv('makeup_recommendations'))
|
| 18 |
+
|
| 19 |
+
plt = platform.system()
|
| 20 |
+
if plt == 'Windows': pathlib.PosixPath = pathlib.WindowsPath
|
| 21 |
+
|
| 22 |
+
def rgb_to_bgr(rgb_color):
|
| 23 |
+
r, g, b = rgb_color
|
| 24 |
+
return (b, g, r)
|
| 25 |
+
|
| 26 |
+
image_path = None
|
| 27 |
+
lipstick_shade = None
|
| 28 |
+
foundation_color = None
|
| 29 |
+
powder_color = None
|
| 30 |
+
|
| 31 |
+
# Function to get makeup recommendations
|
| 32 |
+
def get_makeup_recommendation(skin_tone):
|
| 33 |
+
global lipstick_shade, foundation_color, powder_color
|
| 34 |
+
if skin_tone in makeup_recommendations:
|
| 35 |
+
lipstick_shade = makeup_recommendations[skin_tone]['Lipstick Shade']
|
| 36 |
+
foundation_color = makeup_recommendations[skin_tone]['Foundation Shade']
|
| 37 |
+
powder_color = makeup_recommendations[skin_tone]['Powder Shade']
|
| 38 |
+
return makeup_recommendations[skin_tone]
|
| 39 |
+
return {'Foundation Shade': '', 'Lipstick Shade': '', 'Powder Shade': '', 'Brands': {}}
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Function to save the uploaded image
|
| 43 |
+
def save_uploaded_image(img):
|
| 44 |
+
global image_path
|
| 45 |
+
image_path = "uploaded_image_"+str(round(time.time() * 1000))+".png"
|
| 46 |
+
img.save(image_path)
|
| 47 |
+
return image_path
|
| 48 |
+
|
| 49 |
+
# Load the trained model
|
| 50 |
+
learn = load_learner('export.pkl')
|
| 51 |
+
|
| 52 |
+
# Prediction and recommendation function
|
| 53 |
+
def classify_and_recommend(img):
|
| 54 |
+
# Save the uploaded image
|
| 55 |
+
img_path = save_uploaded_image(img)
|
| 56 |
+
print(f"Image saved to {img_path}")
|
| 57 |
+
|
| 58 |
+
# Perform classification and get recommendations
|
| 59 |
+
pred, _, probs = learn.predict(img)
|
| 60 |
+
result = {learn.dls.vocab[i]: float(probs[i]) for i in range(len(probs))}
|
| 61 |
+
top_pred = pred
|
| 62 |
+
recommendation = get_makeup_recommendation(top_pred)
|
| 63 |
+
return result, recommendation
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Load the pre-trained facial landmark detector model
|
| 67 |
+
predictor_path = "shape_predictor_68_face_landmarks.dat"
|
| 68 |
+
detector = dlib.get_frontal_face_detector()
|
| 69 |
+
predictor = dlib.shape_predictor(predictor_path)
|
| 70 |
+
|
| 71 |
+
# Define lip landmarks
|
| 72 |
+
LIP_POINTS = list(range(48, 61))
|
| 73 |
+
|
| 74 |
+
def apply_lipstick(image):
|
| 75 |
+
# Convert PIL image to OpenCV format
|
| 76 |
+
lipstick_color = rgb_to_bgr(colorDict[lipstick_shade])
|
| 77 |
+
image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
| 78 |
+
|
| 79 |
+
# Detect faces in the image
|
| 80 |
+
faces = detector(image_cv, 1)
|
| 81 |
+
if len(faces) == 0:
|
| 82 |
+
return image # No faces detected, return the original image
|
| 83 |
+
|
| 84 |
+
for face in faces:
|
| 85 |
+
# Get the facial landmarks
|
| 86 |
+
shape = predictor(image_cv, face)
|
| 87 |
+
shape = face_utils.shape_to_np(shape)
|
| 88 |
+
|
| 89 |
+
# Get the lip region points
|
| 90 |
+
lips = shape[LIP_POINTS]
|
| 91 |
+
|
| 92 |
+
# Create a mask for the lips
|
| 93 |
+
mask = np.zeros(image_cv.shape[:2], dtype=np.uint8)
|
| 94 |
+
cv2.fillPoly(mask, [lips], 255)
|
| 95 |
+
|
| 96 |
+
# Create a colored overlay
|
| 97 |
+
overlay = np.zeros_like(image_cv, dtype=np.uint8)
|
| 98 |
+
overlay[:] = lipstick_color
|
| 99 |
+
|
| 100 |
+
# Isolate the lip region from the original image
|
| 101 |
+
lip_area = cv2.bitwise_and(overlay, overlay, mask=mask)
|
| 102 |
+
|
| 103 |
+
# Invert the lip mask
|
| 104 |
+
inv_mask = cv2.bitwise_not(mask)
|
| 105 |
+
|
| 106 |
+
# Apply the inverted mask to the original image to remove the lip region
|
| 107 |
+
no_lip_area = cv2.bitwise_and(image_cv, image_cv, mask=inv_mask)
|
| 108 |
+
|
| 109 |
+
# Combine the lip area with the rest of the image
|
| 110 |
+
image_cv = cv2.addWeighted(no_lip_area, 1, lip_area, 0.6, 0)
|
| 111 |
+
|
| 112 |
+
# Convert back to PIL format
|
| 113 |
+
return Image.fromarray(cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB))
|
| 114 |
+
|
| 115 |
+
mp_face_detection = mp.solutions.face_detection
|
| 116 |
+
mp_face_mesh = mp.solutions.face_mesh
|
| 117 |
+
|
| 118 |
+
def apply_color(image_pil, color, alpha):
|
| 119 |
+
|
| 120 |
+
color = rgb_to_bgr(colorDict[color])
|
| 121 |
+
|
| 122 |
+
"""Applies a specified color to the face in the image."""
|
| 123 |
+
# Convert PIL image to OpenCV format
|
| 124 |
+
image_cv = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)
|
| 125 |
+
|
| 126 |
+
# Convert the image to RGB
|
| 127 |
+
image_rgb = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
|
| 128 |
+
|
| 129 |
+
# Initialize face detection and face mesh
|
| 130 |
+
with mp_face_detection.FaceDetection(min_detection_confidence=0.5) as face_detection, \
|
| 131 |
+
mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, min_detection_confidence=0.5) as face_mesh:
|
| 132 |
+
|
| 133 |
+
# Detect faces in the image
|
| 134 |
+
detection_results = face_detection.process(image_rgb)
|
| 135 |
+
|
| 136 |
+
if detection_results.detections:
|
| 137 |
+
for detection in detection_results.detections:
|
| 138 |
+
# Get face landmarks
|
| 139 |
+
mesh_results = face_mesh.process(image_rgb)
|
| 140 |
+
if mesh_results.multi_face_landmarks:
|
| 141 |
+
for face_landmarks in mesh_results.multi_face_landmarks:
|
| 142 |
+
# Create a mask for the face
|
| 143 |
+
mask = np.zeros_like(image_cv, dtype=np.uint8)
|
| 144 |
+
ih, iw, _ = image_cv.shape
|
| 145 |
+
for landmark in face_landmarks.landmark:
|
| 146 |
+
x = int(landmark.x * iw)
|
| 147 |
+
y = int(landmark.y * ih)
|
| 148 |
+
cv2.circle(mask, (x, y), 1, (255, 255, 255), -1)
|
| 149 |
+
|
| 150 |
+
hull = cv2.convexHull(np.array([(int(landmark.x * iw), int(landmark.y * ih)) for landmark in face_landmarks.landmark]))
|
| 151 |
+
cv2.fillConvexPoly(mask, hull, (255, 255, 255))
|
| 152 |
+
|
| 153 |
+
# Create a color image
|
| 154 |
+
color_image = np.full_like(image_cv, color, dtype=np.uint8) # Apply the specified color
|
| 155 |
+
|
| 156 |
+
# Blend the color with the face region using the mask
|
| 157 |
+
blended = cv2.addWeighted(image_cv, 1 - alpha, color_image, alpha, 0)
|
| 158 |
+
|
| 159 |
+
# Combine the original image with the blended color image using the mask
|
| 160 |
+
image_cv = np.where(mask == np.array([255, 255, 255]), blended, image_cv)
|
| 161 |
+
|
| 162 |
+
# Convert the result back to PIL format
|
| 163 |
+
result_image_pil = Image.fromarray(cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB))
|
| 164 |
+
return result_image_pil
|
| 165 |
+
|
| 166 |
+
def apply_makeup():
|
| 167 |
+
# Load your input image
|
| 168 |
+
print(image_path)
|
| 169 |
+
input_image = Image.open(image_path)
|
| 170 |
+
foundation_alpha = 0.05 # Transparency factor for foundation
|
| 171 |
+
powder_alpha = 0.05 # Transparency factor for powder
|
| 172 |
+
|
| 173 |
+
output_image_path = "output_image_" + str(round(time.time() * 1000)) + ".jpg"
|
| 174 |
+
|
| 175 |
+
# Apply lipstick, foundation, and powder to the input image
|
| 176 |
+
output_image = apply_lipstick(input_image)
|
| 177 |
+
output_image = apply_color(output_image, foundation_color, foundation_alpha)
|
| 178 |
+
output_image = apply_color(output_image, powder_color, powder_alpha)
|
| 179 |
+
|
| 180 |
+
# Save or display the output image
|
| 181 |
+
output_image.save(output_image_path)
|
| 182 |
+
|
| 183 |
+
return output_image_path
|
| 184 |
+
|
| 185 |
+
# Add a logo and a welcome message
|
| 186 |
+
logo = "https://i.pinimg.com/736x/f8/34/cc/f834ccc788207ae147ab37d2085f6903.jpg" # Replace with your logo URL
|
| 187 |
+
welcome_message = """
|
| 188 |
+
# Skin Tone Classification and Makeup Recommendations
|
| 189 |
+
Upload an image to classify your skin tone and receive personalized makeup recommendations. Find the perfect foundation, lipstick, and powder shades from your favorite brands!
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
# Gradio interface
|
| 193 |
+
with gr.Blocks() as demo:
|
| 194 |
+
gr.Markdown("""
|
| 195 |
+
# Welcome to the Makeup Recommendation and Application Tool
|
| 196 |
+
Upload an image to receive personalized makeup recommendations and see how the makeup looks on you!
|
| 197 |
+
""")
|
| 198 |
+
|
| 199 |
+
with gr.Row():
|
| 200 |
+
with gr.Column():
|
| 201 |
+
gr.Markdown("## Upload and Classify")
|
| 202 |
+
upload_image = gr.Image(type="pil", label="Upload Image")
|
| 203 |
+
classify_btn = gr.Button("Submit")
|
| 204 |
+
result_label = gr.Label(num_top_classes=3, label='Classification Results')
|
| 205 |
+
recommendation_json = gr.JSON(label='Makeup Recommendations')
|
| 206 |
+
|
| 207 |
+
classify_btn.click(fn=classify_and_recommend, inputs=upload_image, outputs=[result_label, recommendation_json])
|
| 208 |
+
|
| 209 |
+
with gr.Column():
|
| 210 |
+
gr.Markdown("## View Edited Image")
|
| 211 |
+
img = gr.Image(label="Edited Image")
|
| 212 |
+
show_image_btn = gr.Button("Show Edited Image")
|
| 213 |
+
show_image_btn.click(fn=apply_makeup, inputs=[], outputs=img)
|
| 214 |
+
|
| 215 |
+
gr.Markdown("""
|
| 216 |
+
### Instructions:
|
| 217 |
+
1. **Upload an Image:** Choose a clear image of your face.
|
| 218 |
+
2. **Submit for Classification:** Click 'Submit' to receive makeup recommendations.
|
| 219 |
+
3. **View Edited Image:** Click 'Show Edited Image' to see the applied makeup.
|
| 220 |
+
""")
|
| 221 |
+
|
| 222 |
+
demo.launch(debug=True, share=True)
|