Midnightar commited on
Commit
193f77c
·
verified ·
1 Parent(s): ed5a5f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -66
app.py CHANGED
@@ -6,16 +6,17 @@ from fastapi import FastAPI
6
  from pydantic import BaseModel
7
  import insightface
8
  import gradio as gr
 
9
  from fastapi.middleware.cors import CORSMiddleware
10
 
11
- # ---------- Load Face Detector + Recognition Model ----------
12
  model = insightface.app.FaceAnalysis(name="buffalo_l")
13
  model.prepare(ctx_id=0, det_size=(640, 640))
14
 
15
- # ---------- FastAPI App ----------
16
  app = FastAPI()
17
 
18
- # Add CORS for FlutterFlow
19
  app.add_middleware(
20
  CORSMiddleware,
21
  allow_origins=["*"],
@@ -24,71 +25,60 @@ app.add_middleware(
24
  allow_headers=["*"],
25
  )
26
 
27
- # ---------- API Request Schema ----------
28
  class CompareRequest(BaseModel):
29
- image1: str | None = None # base64
30
- image2: str | None = None # base64
31
- image1_url: str | None = None # URL
32
- image2_url: str | None = None # URL
33
 
34
- # ---------- Helper: Convert base64 to CV2 image ----------
35
  def b64_to_img(b64_string):
36
  try:
37
  img_data = base64.b64decode(b64_string)
38
- np_arr = np.frombuffer(img_data, np.uint8)
39
- img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
40
- return img
41
  except:
42
  return None
43
 
44
- # ---------- Helper: Convert URL to CV2 image ----------
45
  def url_to_img(url):
46
  try:
47
- resp = requests.get(url, timeout=10)
48
- np_arr = np.frombuffer(resp.content, np.uint8)
49
- img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
50
- return img
51
  except:
52
  return None
53
 
54
- # ---------- Helper: Extract face embedding ----------
 
 
 
 
55
  def get_embedding(img):
56
  faces = model.get(img)
57
- if len(faces) == 0:
58
  return None
59
  return faces[0].embedding
60
 
61
- # ---------- POST /compare API ----------
62
  @app.post("/compare")
63
  async def compare_faces(req: CompareRequest):
64
 
65
- # Load image 1
66
- if req.image1:
67
- img1 = b64_to_img(req.image1)
68
- elif req.image1_url:
69
- img1 = url_to_img(req.image1_url)
70
- else:
71
- return {"error": "Image1 missing"}
72
-
73
- # Load image 2
74
- if req.image2:
75
- img2 = b64_to_img(req.image2)
76
- elif req.image2_url:
77
- img2 = url_to_img(req.image2_url)
78
- else:
79
- return {"error": "Image2 missing"}
80
 
81
  if img1 is None or img2 is None:
82
- return {"error": "Invalid image data"}
83
 
84
  emb1 = get_embedding(img1)
85
  emb2 = get_embedding(img2)
86
 
87
  if emb1 is None or emb2 is None:
88
- return {"error": "Face not detected"}
89
 
90
  similarity = float(
91
- np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2))
 
92
  )
93
 
94
  return {
@@ -96,43 +86,42 @@ async def compare_faces(req: CompareRequest):
96
  "match": similarity > 0.55
97
  }
98
 
99
- # ---------- GRADIO UI ----------
100
- def gradio_ui(image1_input, image2_input):
101
-
102
- def load_any(x):
103
- if x.startswith("http://") or x.startswith("https://"):
104
- return url_to_img(x)
105
- else:
106
- return b64_to_img(x)
107
-
108
- img1 = load_any(image1_input)
109
- img2 = load_any(image2_input)
110
 
111
  if img1 is None or img2 is None:
112
- return "Invalid image input"
113
 
114
  emb1 = get_embedding(img1)
115
  emb2 = get_embedding(img2)
116
 
117
  if emb1 is None or emb2 is None:
118
- return "Face not detected"
119
 
120
- similarity = float(
121
- np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2))
122
  )
123
- matched = similarity > 0.55
124
 
125
- return f"Similarity: {similarity:.3f} | Match: {matched}"
126
 
127
- # Build a Gradio Blocks app (needed for mount_gradio_app)
128
- with gr.Blocks() as demo:
129
- gr.Markdown("# Face Comparison API (Text Input)")
130
- img1 = gr.Textbox(label="Image 1 (URL or base64)")
131
- img2 = gr.Textbox(label="Image 2 (URL or base64)")
132
- out = gr.Textbox(label="Result")
133
- btn = gr.Button("Compare")
134
-
135
- btn.click(fn=gradio_ui, inputs=[img1, img2], outputs=out)
136
 
137
- # Mount Gradio UI at "/"
138
- app = gr.mount_gradio_app(app, demo, path="/")
 
 
 
 
 
 
 
 
6
  from pydantic import BaseModel
7
  import insightface
8
  import gradio as gr
9
+ import uvicorn
10
  from fastapi.middleware.cors import CORSMiddleware
11
 
12
+ # -------------------- Load Face Recognition Model --------------------
13
  model = insightface.app.FaceAnalysis(name="buffalo_l")
14
  model.prepare(ctx_id=0, det_size=(640, 640))
15
 
16
+ # -------------------- FASTAPI APP --------------------
17
  app = FastAPI()
18
 
19
+ # Allow all CORS (FlutterFlow)
20
  app.add_middleware(
21
  CORSMiddleware,
22
  allow_origins=["*"],
 
25
  allow_headers=["*"],
26
  )
27
 
28
+ # -------------------- Request Schema --------------------
29
  class CompareRequest(BaseModel):
30
+ image1: str | None = None
31
+ image2: str | None = None
32
+ image1_url: str | None = None
33
+ image2_url: str | None = None
34
 
35
+ # -------------------- Helpers --------------------
36
  def b64_to_img(b64_string):
37
  try:
38
  img_data = base64.b64decode(b64_string)
39
+ arr = np.frombuffer(img_data, np.uint8)
40
+ return cv2.imdecode(arr, cv2.IMREAD_COLOR)
 
41
  except:
42
  return None
43
 
 
44
  def url_to_img(url):
45
  try:
46
+ r = requests.get(url, timeout=5)
47
+ arr = np.frombuffer(r.content, np.uint8)
48
+ return cv2.imdecode(arr, cv2.IMREAD_COLOR)
 
49
  except:
50
  return None
51
 
52
+ def load_any(x):
53
+ if x.startswith("http://") or x.startswith("https://"):
54
+ return url_to_img(x)
55
+ return b64_to_img(x)
56
+
57
  def get_embedding(img):
58
  faces = model.get(img)
59
+ if not faces:
60
  return None
61
  return faces[0].embedding
62
 
63
+ # -------------------- /compare API --------------------
64
  @app.post("/compare")
65
  async def compare_faces(req: CompareRequest):
66
 
67
+ img1 = load_any(req.image1 or req.image1_url or "")
68
+ img2 = load_any(req.image2 or req.image2_url or "")
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  if img1 is None or img2 is None:
71
+ return {"error": "Invalid image1/image2 or URLs"}
72
 
73
  emb1 = get_embedding(img1)
74
  emb2 = get_embedding(img2)
75
 
76
  if emb1 is None or emb2 is None:
77
+ return {"error": "No face detected in one or both images."}
78
 
79
  similarity = float(
80
+ np.dot(emb1, emb2) /
81
+ (np.linalg.norm(emb1) * np.linalg.norm(emb2))
82
  )
83
 
84
  return {
 
86
  "match": similarity > 0.55
87
  }
88
 
89
+ # -------------------- GRADIO UI --------------------
90
+ def gradio_ui(img1_text, img2_text):
91
+ img1 = load_any(img1_text)
92
+ img2 = load_any(img2_text)
 
 
 
 
 
 
 
93
 
94
  if img1 is None or img2 is None:
95
+ return "Invalid base64 or URL."
96
 
97
  emb1 = get_embedding(img1)
98
  emb2 = get_embedding(img2)
99
 
100
  if emb1 is None or emb2 is None:
101
+ return "Face not detected."
102
 
103
+ similarity = np.dot(emb1, emb2) / (
104
+ np.linalg.norm(emb1) * np.linalg.norm(emb2)
105
  )
 
106
 
107
+ return f"Similarity: {similarity:.3f} | Match: {similarity > 0.55}"
108
 
109
+ gradio_ui_app = gr.Interface(
110
+ fn=gradio_ui,
111
+ inputs=[
112
+ gr.Textbox(label="Image 1 (base64 or URL)"),
113
+ gr.Textbox(label="Image 2 (base64 or URL)")
114
+ ],
115
+ outputs="text",
116
+ title="Face Match API (Text Input)"
117
+ )
118
 
119
+ # IMPORTANT: Launching Gradio inside FastAPI server
120
+ # HuggingFace will call "app" automatically
121
+ @app.get("/")
122
+ async def serve_gradio():
123
+ return gradio_ui_app.launch(
124
+ inline=True, # IMPORTANT: embed inside FastAPI
125
+ share=False,
126
+ prevent_thread_lock=True
127
+ )