houloude9 commited on
Commit
0f7dbce
Β·
verified Β·
1 Parent(s): cfbe0bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -47
app.py CHANGED
@@ -1,7 +1,7 @@
1
  #!/usr/bin/env python3
2
  """
3
  Facial Recognition Service with Gradio UI
4
- CPU-only version for Hugging Face Spaces using PyTorch backend
5
  """
6
 
7
  import warnings
@@ -10,41 +10,49 @@ import sys
10
  import numpy as np
11
  import cv2
12
  import gradio as gr
 
13
 
14
  # Suppress warnings
15
  warnings.filterwarnings('ignore')
16
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
17
 
18
- # InsightFace PyTorch backend
19
- from insightface.app import FaceAnalysis
20
-
21
 
22
  class FacialRecognitionService:
23
  def __init__(self):
24
- """Initialize InsightFace model on CPU using PyTorch backend"""
25
- print("Loading InsightFace model (PyTorch, CPU)...")
26
- # Lightweight CPU-friendly model
27
- self.app = FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider'])
28
- self.app.prepare(ctx_id=-1, det_size=(640, 640)) # CPU-only
29
- print("Model loaded βœ…")
 
 
30
 
31
  def extract_face_embedding(self, image: np.ndarray):
32
- """Extract face embedding from an uploaded image (numpy array)"""
33
  try:
34
  if image is None:
35
  return None
36
 
37
- if image.shape[2] == 3:
38
- img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
 
 
 
39
  else:
40
  img_rgb = image
41
 
42
- faces = self.app.get(img_rgb)
43
- if len(faces) == 0:
44
- return None
45
-
46
- largest_face = max(faces, key=lambda x: (x.bbox[2]-x.bbox[0])*(x.bbox[3]-x.bbox[1]))
47
- return largest_face.embedding
 
 
 
 
 
48
 
49
  except Exception as e:
50
  print(f"Error extracting embedding: {e}", file=sys.stderr)
@@ -53,71 +61,145 @@ class FacialRecognitionService:
53
  def calculate_similarity(self, emb1, emb2):
54
  """Cosine similarity normalized to 0-1"""
55
  try:
56
- norm1, norm2 = np.linalg.norm(emb1), np.linalg.norm(emb2)
57
- if norm1 == 0 or norm2 == 0:
58
- return 0.0
59
- emb1, emb2 = emb1 / norm1, emb2 / norm2
60
- return float((np.dot(emb1, emb2) + 1) / 2)
61
  except:
62
- return 0.0
 
 
 
 
 
 
 
 
63
 
64
  def match_faces(self, target_image: np.ndarray, candidate_images: list, threshold: float = 0.6):
 
65
  matches = []
 
66
  target_emb = self.extract_face_embedding(target_image)
67
  if target_emb is None:
68
- return "No face detected in target image"
69
 
70
  for idx, candidate in enumerate(candidate_images):
 
 
 
71
  candidate_emb = self.extract_face_embedding(candidate)
72
  if candidate_emb is None:
73
  continue
 
74
  similarity = self.calculate_similarity(target_emb, candidate_emb)
75
  if similarity >= threshold:
76
  matches.append({
77
- 'index': idx,
78
  'confidence': similarity,
79
  'score': int(similarity * 100)
80
  })
81
 
82
  if not matches:
83
- return "No matches found"
84
 
85
- return "\n".join([f"Candidate {m['index']}: {m['score']}%" for m in matches])
 
 
 
 
 
 
 
86
 
87
 
88
  # Initialize service
 
89
  service = FacialRecognitionService()
90
 
91
 
92
  # Gradio functions
93
  def extract_face(image):
 
 
 
 
94
  embedding = service.extract_face_embedding(image)
95
  if embedding is None:
96
- return "No face detected"
97
- return f"Embedding size: {len(embedding)}"
 
98
 
99
 
100
- def match_faces_fn(target_image, *candidate_images):
 
 
 
 
101
  candidates = [img for img in candidate_images if img is not None]
102
- result = service.match_faces(target_image, candidates)
 
 
 
 
103
  return result
104
 
105
 
106
  # Gradio UI
107
- with gr.Blocks() as demo:
108
- gr.Markdown("## Facial Recognition Service (InsightFace, CPU Only)")
 
 
 
 
 
109
 
110
- with gr.Tab("Extract Embedding"):
111
- input_img = gr.Image(label="Upload Image")
112
- output_embed = gr.Textbox(label="Face Embedding")
113
- btn_extract = gr.Button("Extract")
 
 
 
 
 
114
  btn_extract.click(fn=extract_face, inputs=input_img, outputs=output_embed)
115
 
116
- with gr.Tab("Match Faces"):
117
- target_img = gr.Image(label="Target Image")
118
- candidate_imgs = [gr.Image(label=f"Candidate Image {i+1}") for i in range(5)]
119
- output_matches = gr.Textbox(label="Matches")
120
- btn_match = gr.Button("Match")
121
- btn_match.click(fn=match_faces_fn, inputs=[target_img] + candidate_imgs, outputs=output_matches)
122
-
123
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  #!/usr/bin/env python3
2
  """
3
  Facial Recognition Service with Gradio UI
4
+ Using DeepFace for Hugging Face Spaces compatibility
5
  """
6
 
7
  import warnings
 
10
  import numpy as np
11
  import cv2
12
  import gradio as gr
13
+ from deepface import DeepFace
14
 
15
  # Suppress warnings
16
  warnings.filterwarnings('ignore')
17
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
18
 
 
 
 
19
 
20
  class FacialRecognitionService:
21
  def __init__(self):
22
+ """Initialize DeepFace with VGG-Face model"""
23
+ print("Loading DeepFace model...")
24
+ # Pre-load model
25
+ try:
26
+ DeepFace.build_model("VGG-Face")
27
+ print("Model loaded βœ…")
28
+ except Exception as e:
29
+ print(f"Model loading warning: {e}")
30
 
31
  def extract_face_embedding(self, image: np.ndarray):
32
+ """Extract face embedding from an uploaded image"""
33
  try:
34
  if image is None:
35
  return None
36
 
37
+ # DeepFace expects RGB
38
+ if len(image.shape) == 2: # Grayscale
39
+ img_rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
40
+ elif image.shape[2] == 4: # RGBA
41
+ img_rgb = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
42
  else:
43
  img_rgb = image
44
 
45
+ # Extract embedding
46
+ embedding_objs = DeepFace.represent(
47
+ img_path=img_rgb,
48
+ model_name="VGG-Face",
49
+ enforce_detection=True,
50
+ detector_backend="opencv"
51
+ )
52
+
53
+ if len(embedding_objs) > 0:
54
+ return np.array(embedding_objs[0]["embedding"])
55
+ return None
56
 
57
  except Exception as e:
58
  print(f"Error extracting embedding: {e}", file=sys.stderr)
 
61
  def calculate_similarity(self, emb1, emb2):
62
  """Cosine similarity normalized to 0-1"""
63
  try:
64
+ from sklearn.metrics.pairwise import cosine_similarity
65
+ similarity = cosine_similarity([emb1], [emb2])[0][0]
66
+ # Convert from [-1, 1] to [0, 1]
67
+ return float((similarity + 1) / 2)
 
68
  except:
69
+ # Fallback manual calculation
70
+ try:
71
+ norm1, norm2 = np.linalg.norm(emb1), np.linalg.norm(emb2)
72
+ if norm1 == 0 or norm2 == 0:
73
+ return 0.0
74
+ emb1_norm, emb2_norm = emb1 / norm1, emb2 / norm2
75
+ return float((np.dot(emb1_norm, emb2_norm) + 1) / 2)
76
+ except:
77
+ return 0.0
78
 
79
  def match_faces(self, target_image: np.ndarray, candidate_images: list, threshold: float = 0.6):
80
+ """Match target face against candidate images"""
81
  matches = []
82
+
83
  target_emb = self.extract_face_embedding(target_image)
84
  if target_emb is None:
85
+ return "❌ No face detected in target image"
86
 
87
  for idx, candidate in enumerate(candidate_images):
88
+ if candidate is None:
89
+ continue
90
+
91
  candidate_emb = self.extract_face_embedding(candidate)
92
  if candidate_emb is None:
93
  continue
94
+
95
  similarity = self.calculate_similarity(target_emb, candidate_emb)
96
  if similarity >= threshold:
97
  matches.append({
98
+ 'index': idx + 1,
99
  'confidence': similarity,
100
  'score': int(similarity * 100)
101
  })
102
 
103
  if not matches:
104
+ return "❌ No matches found above threshold"
105
 
106
+ # Sort by confidence
107
+ matches.sort(key=lambda x: x['confidence'], reverse=True)
108
+
109
+ result = "βœ… Matches Found:\n\n"
110
+ for m in matches:
111
+ result += f"πŸ“Έ Candidate {m['index']}: {m['score']}% match\n"
112
+
113
+ return result
114
 
115
 
116
  # Initialize service
117
+ print("Initializing Facial Recognition Service...")
118
  service = FacialRecognitionService()
119
 
120
 
121
  # Gradio functions
122
  def extract_face(image):
123
+ """Extract embedding from single image"""
124
+ if image is None:
125
+ return "❌ Please upload an image"
126
+
127
  embedding = service.extract_face_embedding(image)
128
  if embedding is None:
129
+ return "❌ No face detected in image"
130
+
131
+ return f"βœ… Face detected!\n\nEmbedding size: {len(embedding)} dimensions\nModel: VGG-Face"
132
 
133
 
134
+ def match_faces_fn(target_image, threshold, *candidate_images):
135
+ """Match faces with configurable threshold"""
136
+ if target_image is None:
137
+ return "❌ Please upload a target image"
138
+
139
  candidates = [img for img in candidate_images if img is not None]
140
+
141
+ if len(candidates) == 0:
142
+ return "❌ Please upload at least one candidate image"
143
+
144
+ result = service.match_faces(target_image, candidates, threshold=threshold)
145
  return result
146
 
147
 
148
  # Gradio UI
149
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
150
+ gr.Markdown("""
151
+ # πŸ” Facial Recognition Service
152
+ ### Powered by DeepFace (VGG-Face model)
153
+
154
+ Upload images to extract face embeddings or match faces across multiple images.
155
+ """)
156
 
157
+ with gr.Tab("🎯 Extract Face Embedding"):
158
+ gr.Markdown("Upload a single image to extract facial features.")
159
+ with gr.Row():
160
+ with gr.Column():
161
+ input_img = gr.Image(label="Upload Image", type="numpy")
162
+ btn_extract = gr.Button("πŸ”Ž Extract Embedding", variant="primary")
163
+ with gr.Column():
164
+ output_embed = gr.Textbox(label="Result", lines=5)
165
+
166
  btn_extract.click(fn=extract_face, inputs=input_img, outputs=output_embed)
167
 
168
+ with gr.Tab("πŸ”„ Match Faces"):
169
+ gr.Markdown("Upload a target face and up to 5 candidate images to find matches.")
170
+
171
+ with gr.Row():
172
+ with gr.Column(scale=1):
173
+ target_img = gr.Image(label="🎯 Target Image", type="numpy")
174
+ threshold_slider = gr.Slider(
175
+ minimum=0.3,
176
+ maximum=0.9,
177
+ value=0.6,
178
+ step=0.05,
179
+ label="Match Threshold",
180
+ info="Higher = stricter matching"
181
+ )
182
+ btn_match = gr.Button("πŸ” Find Matches", variant="primary")
183
+
184
+ with gr.Column(scale=1):
185
+ output_matches = gr.Textbox(label="Match Results", lines=12)
186
+
187
+ with gr.Row():
188
+ candidate_imgs = [
189
+ gr.Image(label=f"Candidate {i+1}", type="numpy")
190
+ for i in range(5)
191
+ ]
192
+
193
+ btn_match.click(
194
+ fn=match_faces_fn,
195
+ inputs=[target_img, threshold_slider] + candidate_imgs,
196
+ outputs=output_matches
197
+ )
198
+
199
+ gr.Markdown("""
200
+ ---
201
+ **Note:** This app runs on CPU. Processing may take a few seconds per image.
202
+ """)
203
+
204
+ if __name__ == "__main__":
205
+ demo.launch()