EngAbod commited on
Commit
40bddee
·
1 Parent(s): 7c19723

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +238 -45
app.py CHANGED
@@ -1,53 +1,246 @@
1
- import streamlit as st
2
- import numpy as np
3
- import cv2
4
- from io import BytesIO
5
- from streamlit_webrtc import webrtc_streamer
6
  from turn import get_ice_servers
 
 
 
 
 
 
 
7
  import av
8
 
9
- def create_blake_image(input_image):
10
- # Read the image from the BytesIO object
11
- img = cv2.imdecode(np.frombuffer(input_image.read(), np.uint8), -1)
12
-
13
- # Get the shape of the original image
14
- height, width, _ = img.shape
15
-
16
- # Create a circular mask with the same shape
17
- mask = np.zeros((height, width), dtype=np.uint8)
18
- circle_center = (width // 2, height // 2)
19
- circle_radius = min(width, height) // 2
20
- cv2.circle(mask, circle_center, circle_radius, 255, thickness=-1)
21
-
22
- # Apply the mask to the original image
23
- result = cv2.bitwise_and(img, img, mask=mask)
24
-
25
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
 
 
 
 
 
 
27
 
 
 
 
28
 
29
- # Continue with the rest of your processing
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- class VideoProcessor:
32
- num_bins = 256
33
- video_stopped = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- def recv(self, frame):
36
- frm = frame.to_ndarray(format="bgr24")
37
- # Print the format and shape of the original image
38
- text = str(frm.dtype)
39
- cv2.putText(frm, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0))
40
- frm = cv2.flip(frm,1)
41
- modified_frm = create_blake_image(frm)
42
- text2 = str(frm.shape)
43
- cv2.putText(modified_frm, text2, (30, 70), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0))
44
- # Print the format and shape of the original image
45
- st.write(f"Original Image Format: {modified_frm.dtype}")
46
- st.write(f"Original Image Shape: {modified_frm.shape}")
47
-
48
- return av.VideoFrame.from_ndarray(modified_frm , format='bgr24')
49
- # Inside your Streamlit app
50
-
51
- st.title("التركيز على وسط الشاشة")
52
-
53
- webrtc_streamer(key="example", video_processor_factory=VideoProcessor,media_stream_constraints={"video": True, "audio": False},rtc_configuration={"iceServers": get_ice_servers()},)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from turn import get_ice_servers
2
+
3
+ import cv2
4
+ import mediapipe as mp
5
+ import numpy as np
6
+ import time
7
+ import math
8
+ import streamlit as st
9
  import av
10
 
11
+ from tensorflow.keras.models import load_model
12
+ from scipy.signal import convolve2d
13
+ from skimage import color
14
+ from skimage import io
15
+ from sklearn.metrics import accuracy_score
16
+
17
+ # VECTORIZATION the u factor
18
+ import matplotlib.pyplot as plt
19
+ import os
20
+ import torch
21
+ import torchvision.transforms as transforms
22
+ import torchvision.models as models
23
+ from PIL import Image
24
+ from tensorflow.keras.models import Sequential
25
+ from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout
26
+ from tensorflow.keras.optimizers import Adam
27
+ from streamlit_webrtc import webrtc_streamer
28
+
29
+ num_bins = 256
30
+
31
+ mp_face_mesh = mp.solutions.face_mesh
32
+ face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
33
+ mp_drawing = mp.solutions.drawing_utils
34
+ drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
35
+
36
+ # Load the model
37
+ model = load_model('best_model_HQ_v8.h5')
38
+ model2 = load_model('best_model_HQ_v9.h5')
39
+ def u_sliding_factor(image_channel, P):
40
+ result = np.zeros(image_channel.shape, np.float32)
41
+
42
+ # Define the sliding window size
43
+ window_size = (3, 3)
44
+
45
+ # Create the convolution kernel
46
+ kernel = np.ones(window_size, np.float32)
47
+ kernel[1, 1] = 0
48
+ kernel = kernel / (2 * P)
49
+ kernal2 = np.zeros(window_size, np.float32)
50
+ kernal2[1, 1] = 1
51
+ kernal2 = kernal2 / 2
52
+
53
+ # Perform the convolution using scipy's convolve2d
54
+ convolution_matrix = cv2.filter2D(image_channel, -1, kernel) + cv2.filter2D(image_channel, -1, kernal2)
55
+ result = convolution_matrix[1:-1, 1:-1]
56
+
57
+ return result.astype(np.float32)
58
 
59
+ def C_list_calculate(P):
60
+ C = []
61
+ for count in range(1, 9):
62
+ c_value = ((P - count) * (count - 1)) / math.floor(((P - 1) / 2)**2)
63
+ C.append(c_value)
64
+ return C
65
 
66
+ def ED_LBP_Sliding_Matrix(I, P):
67
+ # Define the amount of padding
68
+ padding_amount = 1
69
 
70
+ # Pad the array with zeros
71
+ I = np.pad(I, pad_width=padding_amount, mode='constant')
72
+ K = (2**P) - 1
73
+ C_list = C_list_calculate(8)
74
+ u_fac_matrix = u_sliding_factor(I.astype(np.float32), P)
75
+ slid_factor = np.zeros((u_fac_matrix.shape), np.float32)
76
+ m, n = u_fac_matrix.shape
77
+ ED_LBP = np.zeros(u_fac_matrix.shape, np.float32)
78
+ ED_LBP_matrix = np.zeros((u_fac_matrix.shape), np.float32)
79
+ K_matrix = np.ones(u_fac_matrix.shape).astype(np.float32) * K
80
+ offsets = [(0, 1), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (1, 0), (0, 0)]
81
+ count = 1
82
 
83
+ for offset in offsets:
84
+ row_offset, col_offset = offset
85
+ sliding_matrix = I[row_offset:row_offset + m, col_offset:col_offset + n].astype(np.float32) - u_fac_matrix.astype(np.float32)
86
+ slid_factor = np.maximum(sliding_matrix, 0).astype(np.float32)
87
+ k_norm = K_matrix.astype(np.float32) - u_fac_matrix.astype(np.float32)
88
+ k_norm_nonzero = np.where(k_norm == 0, 1e-10, k_norm)
89
+ A_factor = np.where(k_norm != 0, slid_factor / k_norm_nonzero, 0)
90
+ ED_LBP_matrix = (A_factor.astype(np.float32) * C_list[count - 1]) + np.ones(A_factor.shape).astype(np.float32)
91
+ ED_LBP = ED_LBP + np.where(sliding_matrix >= 0, 2**((count - 1) * ED_LBP_matrix.astype(np.float32)), 0)
92
+ count = count + 1
93
+
94
+ ED_LBP = np.where(ED_LBP > 255, 255, np.round(ED_LBP))
95
+
96
+ return ED_LBP.astype(int)
97
+
98
+ def compute_histogram(image, num_bins):
99
+ hist = cv2.calcHist([image], [0], None, [num_bins], [0, num_bins])
100
+ hist = hist / hist.sum() # Normalize the histogram
101
+ return hist
102
+
103
+ def spatial_pyramid(image, num_bins):
104
+ ED_LBP_image = np.zeros((image.shape), np.int16)
105
+ num_channels = image.shape[2]
106
+ histograms = []
107
+
108
+ for channel in range(num_channels):
109
+ ED_LBP_image[:, :, channel] = ED_LBP_Sliding_Matrix(image[:, :, channel].astype(np.int16), 8)
110
+
111
+ # Level 0: Compute histogram for the entire channel
112
+ H1_channel = compute_histogram(ED_LBP_image[:, :, channel].astype(np.uint8), num_bins).ravel()
113
+
114
+ # Level 2: Compute histograms for 4x4 grids
115
+ grid_size = 4
116
+ H2_channel = np.empty((grid_size, grid_size, num_bins))
117
+ grid_height, grid_width = ED_LBP_image[:, :, channel].shape[0] // grid_size, ED_LBP_image[:, :, channel].shape[1] // grid_size
118
+ for m in range(grid_size):
119
+ for n in range(grid_size):
120
+ grid_image = ED_LBP_image[m * grid_height: (m + 1) * grid_height,
121
+ n * grid_width: (n + 1) * grid_width, channel]
122
+ H2_channel[m, n] = compute_histogram(grid_image.astype(np.uint8), num_bins).ravel()
123
+
124
+ H2_channel = H2_channel.reshape(-1)
125
+
126
+ # Concatenate histograms from level 0 and level 2
127
+ Hs_channel = np.concatenate((H1_channel, H2_channel))
128
+ histograms.append(Hs_channel)
129
+
130
+ # Concatenate histograms from all channels
131
+ feature_vector = np.concatenate(histograms)
132
+ return feature_vector
133
+
134
+
135
+ # Add custom CSS styles
136
+ st.markdown(
137
+ """
138
+ <style>
139
+ .st-title {
140
+ font-size: 24px; /* Larger font for the title */
141
+ text-align: center;
142
+ }
143
+ .st-text {
144
+ font-size: 16px; /* Smaller font for the text */
145
+ text-align: center;
146
+ margin: 10px 0;
147
+ }
148
+ .st-button {
149
+ font-size: 20px;
150
+ }
151
+ .centered {
152
+ display: flex;
153
+ justify-content: center;
154
+ align-items: center;
155
+ flex-direction: column;
156
+ }
157
+ </style>
158
+ """,
159
+ unsafe_allow_html=True
160
+ )
161
+
162
+ # Define the app title
163
+ st.markdown("<h1 class='st-title'>نظام كشف التزييف</h1>", unsafe_allow_html=True)
164
+ st.markdown("<p class='st-text'>قم بقراءة شروط الاستخدام في الاسفل قبل استخدام النظام</p>", unsafe_allow_html=True)
165
+
166
+
167
+ picture = st.camera_input("Take a picture")
168
+
169
+ if picture:
170
+ bytes_data = picture.getvalue()
171
+ frm = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)
172
+ img_h, img_w, img_c = frm.shape
173
+ frm = cv2.flip(frm,1)
174
+ features_list=[]
175
+ features_list2=[]
176
+ # Check if there are face landmarks detected
177
+ gray = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
178
+ average_brightness = cv2.mean(gray)[0]
179
+ if average_brightness < 100:
180
+ st.text("إضاءة غير جيدة")
181
+ st.text("انتقل الى مكان جيد الإضاءة")
182
+ else:
183
+
184
+ # Detect faces using cascade classifier
185
+ face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
186
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
187
+ expansion_factor = 1.5
188
+ num_bins = 256
189
+ biggest_face = None
190
+ biggest_area = 0
191
+ target_size = (512,512)
192
+ for (x, y, w, h) in faces:
193
+ # Calculate the expanded dimensions
194
+ expanded_x = max(0, int(x - (w * (expansion_factor - 1) / 2)))
195
+ expanded_y = max(0, int(y - (h * (expansion_factor - 1) / 2)))
196
+ expanded_w = min(img_w, int(w * expansion_factor))
197
+ expanded_h = min(img_h, int(h * expansion_factor))
198
+
199
+ # Crop the expanded face region from the frame
200
+ current_area = expanded_w * expanded_h
201
+ if current_area > biggest_area:
202
+ biggest_area = current_area
203
+ biggest_face = frm[expanded_y:expanded_y + expanded_h, expanded_x:expanded_x + expanded_w]
204
+ # biggest_face = frm[y:y + h, x:x + w]
205
+ resized_face = cv2.resize(biggest_face, target_size)
206
+ if biggest_face is not None:
207
+
208
+ # Perform spatial pyramid feature extraction
209
+ rgb_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2RGB), num_bins)
210
+ hsv_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2HSV), num_bins)
211
+ ycbcr_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2YCrCb), num_bins)
212
 
213
+
214
+ if rgb_features.size > 0 and hsv_features.size > 0 and ycbcr_features.size > 0:
215
+ combined_features = np.concatenate((rgb_features, hsv_features, ycbcr_features))
216
+ features_list.append(combined_features)
217
+ if len(features_list) > 0:
218
+ X_array = np.array(features_list)
219
+ # print(X_array.shape)
220
+ X_test_array_reshaped = np.expand_dims(X_array, axis=-1)
221
+ prediction = model.predict(X_test_array_reshaped)
222
+ prediction2 = model2.predict(X_test_array_reshaped)
223
+ if prediction >= 0.00001 and prediction2 >= 0.000001:
224
+ st.text("صورة حقيقية")
225
+ # st.text(str(prediction[0]))
226
+ # st.text(str(prediction2[0]))
227
+ else:
228
+ st.text("صورة مزيفة")
229
+ # st.text(str(prediction[0]))
230
+ # st.text(str(prediction2[0]))
231
+ else:
232
+ st.text("لا يوجد وجه")
233
+ # Provide guidance for users
234
+ st.markdown("<p class='st-text'>في حالة حصلت على نتيجة غير حقيقية احرص على تحقيق الشروط في الاسفل</p>", unsafe_allow_html=True)
235
+
236
+ st.image(picture)
237
+ # Define the app title
238
+ st.markdown("<h1 class='st-title'>شروط الاستخدام</h1>", unsafe_allow_html=True)
239
+
240
+ # Add informative text with centered alignment
241
+ st.markdown("<p class='st-text'>يجب توفر إضاءة جيدة</p>", unsafe_allow_html=True)
242
+ st.markdown("<p class='st-text'>يجب استخدام كاميرا هاتف بدفة جيدة</p>", unsafe_allow_html=True)
243
+ st.markdown("<p class='st-text'>يجب ان يكون الوجه مقابلا للشاشة بشكل مستقيم</p>", unsafe_allow_html=True)
244
+ st.markdown("<p class='st-text'>التركيز على مكان الكاميرا عند الالتقاط</p>", unsafe_allow_html=True)
245
+ st.markdown("<p class='st-text'>الحرص على ان لا يكون خلفك خلفية تعكس الضوء مثل الزجاج</p>", unsafe_allow_html=True)
246
+ st.markdown("<p class='st-text'>يفضل ان يكون خلفك خلفية صلدة مثل الجدار</p>", unsafe_allow_html=True)