EngAbod commited on
Commit
eab9944
·
1 Parent(s): e9b42e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -334
app.py CHANGED
@@ -1,344 +1,33 @@
1
- from turn import get_ice_servers
2
-
3
  import cv2
4
- import mediapipe as mp
5
  import numpy as np
6
- import time
7
- import math
8
- import streamlit as st
9
- import av
10
-
11
- from tensorflow.keras.models import load_model
12
- from scipy.signal import convolve2d
13
- from skimage import color
14
- from skimage import io
15
- from sklearn.metrics import accuracy_score
16
-
17
- # VECTORIZATION the u factor
18
- import matplotlib.pyplot as plt
19
- import os
20
- import torch
21
- import torchvision.transforms as transforms
22
- import torchvision.models as models
23
- from PIL import Image
24
- from tensorflow.keras.models import Sequential
25
- from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout
26
- from tensorflow.keras.optimizers import Adam
27
- from streamlit_webrtc import webrtc_streamer
28
-
29
- num_bins = 256
30
-
31
- mp_face_mesh = mp.solutions.face_mesh
32
- face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
33
- mp_drawing = mp.solutions.drawing_utils
34
- drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
35
-
36
- # Load the model
37
- model = load_model('best_model_HQ_v9.h5')
38
- # model2 = load_model('best_model_HQ_v9.h5')
39
- def u_sliding_factor(image_channel, P):
40
- result = np.zeros(image_channel.shape, np.float32)
41
-
42
- # Define the sliding window size
43
- window_size = (3, 3)
44
-
45
- # Create the convolution kernel
46
- kernel = np.ones(window_size, np.float32)
47
- kernel[1, 1] = 0
48
- kernel = kernel / (2 * P)
49
- kernal2 = np.zeros(window_size, np.float32)
50
- kernal2[1, 1] = 1
51
- kernal2 = kernal2 / 2
52
-
53
- # Perform the convolution using scipy's convolve2d
54
- convolution_matrix = cv2.filter2D(image_channel, -1, kernel) + cv2.filter2D(image_channel, -1, kernal2)
55
- result = convolution_matrix[1:-1, 1:-1]
56
-
57
- return result.astype(np.float32)
58
-
59
- def C_list_calculate(P):
60
- C = []
61
- for count in range(1, 9):
62
- c_value = ((P - count) * (count - 1)) / math.floor(((P - 1) / 2)**2)
63
- C.append(c_value)
64
- return C
65
-
66
- def ED_LBP_Sliding_Matrix(I, P):
67
- # Define the amount of padding
68
- padding_amount = 1
69
-
70
- # Pad the array with zeros
71
- I = np.pad(I, pad_width=padding_amount, mode='constant')
72
- K = (2**P) - 1
73
- C_list = C_list_calculate(8)
74
- u_fac_matrix = u_sliding_factor(I.astype(np.float32), P)
75
- slid_factor = np.zeros((u_fac_matrix.shape), np.float32)
76
- m, n = u_fac_matrix.shape
77
- ED_LBP = np.zeros(u_fac_matrix.shape, np.float32)
78
- ED_LBP_matrix = np.zeros((u_fac_matrix.shape), np.float32)
79
- K_matrix = np.ones(u_fac_matrix.shape).astype(np.float32) * K
80
- offsets = [(0, 1), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (1, 0), (0, 0)]
81
- count = 1
82
-
83
- for offset in offsets:
84
- row_offset, col_offset = offset
85
- sliding_matrix = I[row_offset:row_offset + m, col_offset:col_offset + n].astype(np.float32) - u_fac_matrix.astype(np.float32)
86
- slid_factor = np.maximum(sliding_matrix, 0).astype(np.float32)
87
- k_norm = K_matrix.astype(np.float32) - u_fac_matrix.astype(np.float32)
88
- k_norm_nonzero = np.where(k_norm == 0, 1e-10, k_norm)
89
- A_factor = np.where(k_norm != 0, slid_factor / k_norm_nonzero, 0)
90
- ED_LBP_matrix = (A_factor.astype(np.float32) * C_list[count - 1]) + np.ones(A_factor.shape).astype(np.float32)
91
- ED_LBP = ED_LBP + np.where(sliding_matrix >= 0, 2**((count - 1) * ED_LBP_matrix.astype(np.float32)), 0)
92
- count = count + 1
93
-
94
- ED_LBP = np.where(ED_LBP > 255, 255, np.round(ED_LBP))
95
-
96
- return ED_LBP.astype(int)
97
-
98
- def compute_histogram(image, num_bins):
99
- hist = cv2.calcHist([image], [0], None, [num_bins], [0, num_bins])
100
- hist = hist / hist.sum() # Normalize the histogram
101
- return hist
102
-
103
- def spatial_pyramid(image, num_bins):
104
- ED_LBP_image = np.zeros((image.shape), np.int16)
105
- num_channels = image.shape[2]
106
- histograms = []
107
-
108
- for channel in range(num_channels):
109
- ED_LBP_image[:, :, channel] = ED_LBP_Sliding_Matrix(image[:, :, channel].astype(np.int16), 8)
110
-
111
- # Level 0: Compute histogram for the entire channel
112
- H1_channel = compute_histogram(ED_LBP_image[:, :, channel].astype(np.uint8), num_bins).ravel()
113
-
114
- # Level 2: Compute histograms for 4x4 grids
115
- grid_size = 4
116
- H2_channel = np.empty((grid_size, grid_size, num_bins))
117
- grid_height, grid_width = ED_LBP_image[:, :, channel].shape[0] // grid_size, ED_LBP_image[:, :, channel].shape[1] // grid_size
118
- for m in range(grid_size):
119
- for n in range(grid_size):
120
- grid_image = ED_LBP_image[m * grid_height: (m + 1) * grid_height,
121
- n * grid_width: (n + 1) * grid_width, channel]
122
- H2_channel[m, n] = compute_histogram(grid_image.astype(np.uint8), num_bins).ravel()
123
-
124
- H2_channel = H2_channel.reshape(-1)
125
-
126
- # Concatenate histograms from level 0 and level 2
127
- Hs_channel = np.concatenate((H1_channel, H2_channel))
128
- histograms.append(Hs_channel)
129
-
130
- # Concatenate histograms from all channels
131
- feature_vector = np.concatenate(histograms)
132
- return feature_vector
133
- def add_circle_image_to_black_image(image):
134
- # Convert the image to RGB format if it is not already
135
- if len(image.shape) == 2:
136
- image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
137
-
138
- # Get the height and width of the input image
139
- height, width, _ = image.shape
140
-
141
- # Create a black image with the same height and width as the input image
142
- black_image = np.zeros((height, width, 3), dtype=np.uint8)
143
 
144
- # Calculate the radius of the circle
145
- radius = int(min(height, width) / 2)
 
 
 
 
 
 
146
 
147
- # Create a mask for the circle
148
- mask = np.zeros((height, width), dtype=np.uint8)
149
- cv2.circle(mask, (width // 2, height // 2), radius, (255, 255, 255), -1)
150
 
151
- # Bitwise AND the input image with the mask to create a circular image
152
- circular_image = cv2.bitwise_and(image, image, mask=mask)
153
 
154
- # Add the circular image to the black image at the center
155
- black_image[height // 2 - radius: height // 2 + radius, width // 2 - radius: width // 2 + radius] = circular_image
 
156
 
157
- # Return the black image with the circular image in the center
158
- return black_image
159
- class VideoProcessor:
160
- num_bins = 256
161
- video_stopped = False
162
-
163
- def recv(self, frame):
164
- frm = frame.to_ndarray(format="bgr24")
165
- frm = cv2.flip(frm,1)
166
-
167
- gray_image = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
168
- average_brightness = cv2.mean(gray_image)[0]
169
- text3 = str(average_brightness)
170
- cv2.putText(frm, text3, (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
171
- flag = 0
172
- # # Denoise the image using Gaussian blur (optional)
173
- # frm = cv2.GaussianBlur(frm, (5, 5), 0)
174
-
175
- # # Enhance image quality by increasing contrast and brightness
176
- # alpha = 1.5 # Contrast control (1.0 means no change)
177
- # beta = 30 # Brightness control (0 means no change)
178
- # enhanced_image = cv2.convertScaleAbs(frm, alpha=alpha, beta=beta)
179
- # frm = enhanced_image
180
 
181
- if average_brightness < 100:
182
- text = "Bad Light, increase the light"
183
- cv2.putText(frm, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0))
184
- return av.VideoFrame.from_ndarray(frm, format='bgr24')
185
- else:
186
- rgb_frame = cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)
187
- results = face_mesh.process(rgb_frame)
188
- img_h, img_w, img_c = frm.shape
189
- face_3d = []
190
- face_2d = []
191
-
192
- if results.multi_face_landmarks:
193
- for landmarks in results.multi_face_landmarks:
194
- text = "No Face"
195
- for idx, lm in enumerate(landmarks.landmark):
196
- if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
197
- if idx == 1:
198
- nose_2d = (lm.x * img_w, lm.y * img_h)
199
- nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 3000)
200
- x, y = int(lm.x * img_w), int(lm.y * img_h)
201
-
202
-
203
- # Get the 2d coordinate
204
- face_2d.append([x, y])
205
-
206
-
207
- # Get 3d coordinate
208
- face_3d.append([x, y, lm.z])
209
-
210
- # Convert to numpy array
211
- # Error from
212
- face_2d = np.array(face_2d, dtype=np.float32)
213
- face_3d = np.array(face_3d, dtype=np.float32)
214
-
215
- # The camera matrix
216
- focal_length = 1 * img_w
217
- cam_matrix = np.array([[focal_length, 0, img_h / 2],
218
- [0, focal_length, img_w / 2],
219
- [0, 0, 1]])
220
-
221
- # The distance matrix
222
- dist_matrix = np.zeros((4, 1), dtype=np.float64)
223
-
224
- #solve PnP
225
- success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
226
-
227
- #get rotational matrix
228
- rmat ,jac = cv2.Rodrigues(rot_vec)
229
-
230
- #Get angles1
231
- angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
232
-
233
- #get y rotation degree
234
- x = angles[0] * 360
235
- y = angles[1] * 360
236
- z = angles[2] * 360
237
- # see where the user's head tilting
238
- if y < -10:
239
- text = "Look Right"
240
- elif y > 10:
241
- text = "Look Left"
242
- elif x < -10:
243
- text = "Look Up"
244
- elif x > 10:
245
- text = "Look Down"
246
- else:
247
- features_list=[]
248
- features_list2=[]
249
- # Check if there are face landmarks detected
250
- gray = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
251
-
252
-
253
- # Detect faces using cascade classifier
254
- face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
255
- faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
256
- expansion_factor = 1.5
257
- num_bins = 256
258
- biggest_face = None
259
- biggest_area = 0
260
- target_size = (512,512)
261
- for (x, y, w, h) in faces:
262
- # Calculate the expanded dimensions
263
- expanded_x = max(0, int(x - (w * (expansion_factor - 1) / 2)))
264
- expanded_y = max(0, int(y - (h * (expansion_factor - 1) / 2)))
265
- expanded_w = min(img_w, int(w * expansion_factor))
266
- expanded_h = min(img_h, int(h * expansion_factor))
267
-
268
- # Crop the expanded face region from the frame
269
- current_area = expanded_w * expanded_h
270
- if current_area > biggest_area:
271
- biggest_area = current_area
272
- biggest_face = frm[expanded_y:expanded_y + expanded_h, expanded_x:expanded_x + expanded_w]
273
- # biggest_face = frm[y:y + h, x:x + w]
274
- resized_face = cv2.resize(biggest_face, target_size)
275
- if biggest_face is not None:
276
-
277
- # Perform spatial pyramid feature extraction
278
- rgb_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2RGB), num_bins)
279
- hsv_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2HSV), num_bins)
280
- ycbcr_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2YCrCb), num_bins)
281
-
282
-
283
- if rgb_features.size > 0 and hsv_features.size > 0 and ycbcr_features.size > 0:
284
- combined_features = np.concatenate((rgb_features, hsv_features, ycbcr_features))
285
- features_list.append(combined_features)
286
- if len(features_list) > 0:
287
- X_array = np.array(features_list)
288
- print(X_array.shape)
289
- X_test_array_reshaped = np.expand_dims(X_array, axis=-1)
290
- prediction = model.predict(X_test_array_reshaped)
291
- # predection2 = model2.predict(X_test_array_reshaped)
292
- if prediction >= 0.1:
293
- text = "Real Live Person"
294
- text2 = str(prediction[0])
295
- cv2.putText(frm, text2, (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
296
- flag = 1
297
- # st.text("Real Live Person")
298
- # self.video_stopped = True
299
- #save current resized_face
300
- else:
301
- text= "Not Live Image"
302
- text2 = str(prediction[0])
303
- cv2.putText(frm, text2, (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
304
- # st.text("Not Live Image")
305
- # self.video_stopped = True
306
- # else:
307
- # text = "Fake Image"
308
-
309
- # Display the nose direction
310
- nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix, dist_matrix)
311
-
312
- p1 = (int(nose_2d[0]), int(nose_2d[1]))
313
- p2 = (int(nose_2d[0] + y*10), int(nose_2d[1] - x * 10))
314
-
315
- cv2.line(frm, p1, p2, (255,0,0), 3)
316
-
317
- cv2.putText(frm, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), thickness=3, lineType=cv2.LINE_AA)
318
- cv2.putText(frm, "x :" + str(np.round(x,2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
319
- cv2.putText(frm, "y :" + str(np.round(x,2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
320
- cv2.putText(frm, "z :" + str(np.round(x,2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
321
-
322
-
323
- mp_drawing.draw_landmarks(
324
- image=frm,
325
- landmark_list=landmarks,
326
- connections=mp_face_mesh.FACEMESH_TESSELATION,
327
- landmark_drawing_spec=drawing_spec,
328
- connection_drawing_spec=drawing_spec,
329
- )
330
- else:
331
- text = "There is no Face"
332
- # Add the text to the image
333
- if flag == 1:
334
- cv2.putText(frm, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0))
335
- else:
336
- cv2.putText(frm, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
337
- frm = add_circle_image_to_black_image(frm)
338
- frm = frm.to_ndarray(format="bgr24")
339
- return av.VideoFrame.from_ndarray(frm, format='bgr24')
340
- # Inside your Streamlit app
341
 
342
- st.title("التركيز على وسط الشاشة")
 
343
 
344
- webrtc_streamer(key="example", video_processor_factory=VideoProcessor,media_stream_constraints={"video": True, "audio": False},rtc_configuration={"iceServers": get_ice_servers()},)
 
1
+ import streamlit as st
 
2
  import cv2
 
3
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ # Function to create a Blake-style image with a circular center
6
+ def create_blake_image(input_image):
7
+ # Create a circular mask
8
+ height, width, _ = input_image.shape
9
+ mask = np.zeros((height, width, 3), dtype=np.uint8)
10
+ center = (width // 2, height // 2)
11
+ radius = min(center)
12
+ cv2.circle(mask, center, radius, (255, 255, 255), thickness=-1)
13
 
14
+ # Apply the circular mask to the input image
15
+ circular_image = cv2.bitwise_and(input_image, mask)
 
16
 
17
+ return circular_image
 
18
 
19
+ # Streamlit app
20
+ st.title("Blake Image with Circular Center")
21
+ picture = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
22
 
23
+ if picture:
24
+ # Read the image
25
+ input_image = cv2.imdecode(np.fromstring(picture.read(), np.uint8), 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ # Create the Blake-style image
28
+ blake_image = create_blake_image(input_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # Display the original and Blake-style images
31
+ st.image([input_image, blake_image], caption=["Original Image", "Blake Image"], use_container_width=True)
32
 
33
+ st.text("Upload an image to see the Blake-style transformation.")