arshtech commited on
Commit
00dc7c2
·
verified ·
1 Parent(s): 11dd192

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -18
app.py CHANGED
@@ -1,22 +1,87 @@
1
- import numpy as np
2
  import cv2
3
- import gradio as gr
 
4
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- def detect_faces(image):
7
- image_np = np.array(image)
8
- gray_image = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
9
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
10
- faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
11
- for (x, y, w, h) in faces:
12
- cv2.rectangle(image_np, (x, y), (x+w, y+h), (0, 255, 0), 2)
13
- return image_np
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- iface= gr.Interface(
16
- fn = detect_faces,
17
- inputs ="image",
18
- outputs ="image",
19
- title ="Face Detection",
20
- description ="Upload an image, and the model will detect faces and draw bounding boxes around them.",
21
- )
22
- iface.launch()
 
 
1
  import cv2
2
+ import numpy as np
3
+ from flask import Flask, render_template, request, jsonify, send_file
4
  from PIL import Image
5
+ import io
6
+ import base64
7
+
8
+ app = Flask(__name__)
9
+
10
+ # Initialize face detector
11
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
12
+
13
+ def detect_faces(image_data, scale_factor=1.1):
14
+ """Detect faces in image and return results"""
15
+ # Convert base64 image to numpy array
16
+ image_data = image_data.split(',')[1] # Remove data:image/jpeg;base64,
17
+ image_bytes = base64.b64decode(image_data)
18
+ image = Image.open(io.BytesIO(image_bytes))
19
+ image_np = np.array(image)
20
+
21
+ # Convert to grayscale for face detection
22
+ gray_image = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
23
+
24
+ # Detect faces
25
+ faces = face_cascade.detectMultiScale(
26
+ gray_image,
27
+ scaleFactor=scale_factor,
28
+ minNeighbors=5,
29
+ minSize=(30, 30)
30
+ )
31
+
32
+ # Draw bounding boxes
33
+ for (x, y, w, h) in faces:
34
+ cv2.rectangle(image_np, (x, y), (x+w, y+h), (0, 255, 0), 2)
35
+ cv2.putText(image_np, f"Face", (x, y-10),
36
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
37
+
38
+ # Convert back to base64
39
+ result_image = Image.fromarray(image_np)
40
+ buffered = io.BytesIO()
41
+ result_image.save(buffered, format="JPEG")
42
+ result_base64 = base64.b64encode(buffered.getvalue()).decode()
43
+
44
+ # Simple age/gender estimation (placeholder)
45
+ results = []
46
+ for i, (x, y, w, h) in enumerate(faces):
47
+ # Very basic mock data
48
+ import random
49
+ ages = ["20-25", "26-32", "33-40", "41-50", "51-60"]
50
+ genders = ["Male", "Female"]
51
+
52
+ results.append({
53
+ 'id': i + 1,
54
+ 'age': random.choice(ages),
55
+ 'gender': random.choice(genders),
56
+ 'position': {'x': int(x), 'y': int(y), 'width': int(w), 'height': int(h)}
57
+ })
58
+
59
+ return f"data:image/jpeg;base64,{result_base64}", results
60
+
61
+ @app.route('/')
62
+ def index():
63
+ return render_template('index.html')
64
 
65
+ @app.route('/detect', methods=['POST'])
66
+ def detect():
67
+ try:
68
+ data = request.json
69
+ image_data = data['image']
70
+ scale_factor = float(data.get('scale', 1.1))
71
+
72
+ result_image, face_data = detect_faces(image_data, scale_factor)
73
+
74
+ return jsonify({
75
+ 'success': True,
76
+ 'result_image': result_image,
77
+ 'faces_detected': len(face_data),
78
+ 'face_data': face_data
79
+ })
80
+ except Exception as e:
81
+ return jsonify({
82
+ 'success': False,
83
+ 'error': str(e)
84
+ })
85
 
86
+ if __name__ == '__main__':
87
+ app.run(debug=True, host='0.0.0.0', port=5000)