EngAbod's picture
Update app.py
a3f864c verified
raw
history blame
11.4 kB
from turn import get_ice_servers
import cv2
import numpy as np
import time
import math
import streamlit as st
import av
from tensorflow.keras.models import load_model
from scipy.signal import convolve2d
from skimage import color
from skimage import io
from sklearn.metrics import accuracy_score
# VECTORIZATION the u factor
import matplotlib.pyplot as plt
import os
from PIL import Image
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from streamlit_webrtc import webrtc_streamer
import dlib
num_bins = 256
# Load the model
model = load_model('best_model_HQ_enhanced_v12.h5')
# model2 = load_model('best_model_HQ_v9.h5')
# model3 = load_model('best_model_HQ_enhanced_v12.h5')
def u_sliding_factor(image_channel, P):
result = np.zeros(image_channel.shape, np.float32)
# Define the sliding window size
window_size = (3, 3)
# Create the convolution kernel
kernel = np.ones(window_size, np.float32)
kernel[1, 1] = 0
kernel = kernel / (2 * P)
kernal2 = np.zeros(window_size, np.float32)
kernal2[1, 1] = 1
kernal2 = kernal2 / 2
# Perform the convolution using scipy's convolve2d
convolution_matrix = cv2.filter2D(image_channel, -1, kernel) + cv2.filter2D(image_channel, -1, kernal2)
result = convolution_matrix[1:-1, 1:-1]
return result.astype(np.float32)
def C_list_calculate(P):
C = []
for count in range(1, 9):
c_value = ((P - count) * (count - 1)) / math.floor(((P - 1) / 2)**2)
C.append(c_value)
return C
def ED_LBP_Sliding_Matrix(I, P):
# Define the amount of padding
padding_amount = 1
# Pad the array with zeros
I = np.pad(I, pad_width=padding_amount, mode='constant')
K = (2**P) - 1
C_list = C_list_calculate(8)
u_fac_matrix = u_sliding_factor(I.astype(np.float32), P)
slid_factor = np.zeros((u_fac_matrix.shape), np.float32)
m, n = u_fac_matrix.shape
ED_LBP = np.zeros(u_fac_matrix.shape, np.float32)
ED_LBP_matrix = np.zeros((u_fac_matrix.shape), np.float32)
K_matrix = np.ones(u_fac_matrix.shape).astype(np.float32) * K
offsets = [(0, 1), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (1, 0), (0, 0)]
count = 1
for offset in offsets:
row_offset, col_offset = offset
sliding_matrix = I[row_offset:row_offset + m, col_offset:col_offset + n].astype(np.float32) - u_fac_matrix.astype(np.float32)
slid_factor = np.maximum(sliding_matrix, 0).astype(np.float32)
k_norm = K_matrix.astype(np.float32) - u_fac_matrix.astype(np.float32)
k_norm_nonzero = np.where(k_norm == 0, 1e-10, k_norm)
A_factor = np.where(k_norm != 0, slid_factor / k_norm_nonzero, 0)
ED_LBP_matrix = (A_factor.astype(np.float32) * C_list[count - 1]) + np.ones(A_factor.shape).astype(np.float32)
ED_LBP = ED_LBP + np.where(sliding_matrix >= 0, 2**((count - 1) * ED_LBP_matrix.astype(np.float32)), 0)
count = count + 1
ED_LBP = np.where(ED_LBP > 255, 255, np.round(ED_LBP))
return ED_LBP.astype(int)
def compute_histogram(image, num_bins):
hist = cv2.calcHist([image], [0], None, [num_bins], [0, num_bins])
hist = hist / hist.sum() # Normalize the histogram
return hist
def spatial_pyramid(image, num_bins):
ED_LBP_image = np.zeros((image.shape), np.int16)
num_channels = image.shape[2]
histograms = []
for channel in range(num_channels):
ED_LBP_image[:, :, channel] = ED_LBP_Sliding_Matrix(image[:, :, channel].astype(np.int16), 8)
# Level 0: Compute histogram for the entire channel
H1_channel = compute_histogram(ED_LBP_image[:, :, channel].astype(np.uint8), num_bins).ravel()
# Level 2: Compute histograms for 4x4 grids
grid_size = 4
H2_channel = np.empty((grid_size, grid_size, num_bins))
grid_height, grid_width = ED_LBP_image[:, :, channel].shape[0] // grid_size, ED_LBP_image[:, :, channel].shape[1] // grid_size
for m in range(grid_size):
for n in range(grid_size):
grid_image = ED_LBP_image[m * grid_height: (m + 1) * grid_height,
n * grid_width: (n + 1) * grid_width, channel]
H2_channel[m, n] = compute_histogram(grid_image.astype(np.uint8), num_bins).ravel()
H2_channel = H2_channel.reshape(-1)
# Concatenate histograms from level 0 and level 2
Hs_channel = np.concatenate((H1_channel, H2_channel))
histograms.append(Hs_channel)
# Concatenate histograms from all channels
feature_vector = np.concatenate(histograms)
return feature_vector
# Add custom CSS styles
st.markdown(
"""
<style>
.st-title {
font-size: 24px; /* Larger font for the title */
text-align: center;
}
.st-text {
font-size: 16px; /* Smaller font for the text */
text-align: center;
margin: 10px 0;
}
.st-button {
font-size: 20px;
}
.centered {
display: flex;
justify-content: center;
align-items: center;
flex-direction: column;
}
</style>
""",
unsafe_allow_html=True
)
# Define the app title
st.markdown("<h1 class='st-title'>نظام كشف التزييف</h1>", unsafe_allow_html=True)
st.markdown("<p class='st-text'>قم بقراءة شروط الاستخدام في الاسفل قبل استخدام النظام</p>", unsafe_allow_html=True)
picture = st.camera_input("Take a picture")
if picture:
bytes_data = picture.getvalue()
frm = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)
img_h, img_w, img_c = frm.shape
frm = cv2.flip(frm,1)
features_list=[]
features_list2=[]
# Check if there are face landmarks detected
gray = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
average_brightness = cv2.mean(gray)[0]
# st.text(str(average_brightness))
if average_brightness < 100:
st.text("إضاءة غير جيدة")
st.text("انتقل الى مكان جيد الإضاءة")
else:
# Initialize the face detector
faceDetector = dlib.get_frontal_face_detector()
# Assuming 'frm' is your input image
faces = faceDetector(frm, 0)
# Check if there are any faces detected
if len(faces) > 0:
largest_face = None
max_area = 0
# Find the largest face
for face in faces:
w = face.right() - face.left()
h = face.bottom() - face.top()
area = w * h
if area > max_area:
max_area = area
largest_face = face
# If a largest face is detected
if largest_face is not None:
# Calculate the expanded dimensions
expansion_factor = 1.5
x = largest_face.left()
y = largest_face.top()
w = largest_face.right() - x
h = largest_face.bottom() - y
expanded_x = max(0, int(x - w * (expansion_factor - 1) / 2))
expanded_y = max(0, int(y - h * (expansion_factor - 1) / 2))
expanded_w = min(frm.shape[1], int(w * expansion_factor))
expanded_h = min(frm.shape[0], int(h * expansion_factor))
# Adjust the width and height to avoid going beyond image boundaries
expanded_w = min(expanded_w, frm.shape[1] - expanded_x)
expanded_h = min(expanded_h, frm.shape[0] - expanded_y)
# Extract the expanded face area
# face_img = frm[expanded_y:expanded_y + expanded_h, expanded_x:expanded_x + expanded_w]
face_img = frm[y:y + h, x:x + w]
# Now, 'face_img' contains the largest face. You can process it further as needed.
if np.array(face_img).size != 0 :
target_size = (512, 512)
resized_face = cv2.resize(face_img, target_size)
# Perform spatial pyramid feature extraction
rgb_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2RGB), num_bins)
hsv_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2HSV), num_bins)
ycbcr_features = spatial_pyramid(cv2.cvtColor(resized_face, cv2.COLOR_BGR2YCrCb), num_bins)
if rgb_features.size > 0 and hsv_features.size > 0 and ycbcr_features.size > 0:
combined_features = np.concatenate((rgb_features, hsv_features, ycbcr_features))
features_list.append(combined_features)
if len(features_list) > 0:
X_array = np.array(features_list)
# print(X_array.shape)
X_test_array_reshaped = np.expand_dims(X_array, axis=-1)
# prediction = model3.predict(X_test_array_reshaped)
# prediction2 = model2.predict(X_test_array_reshaped)
prediction3 = model.predict(X_test_array_reshaped)
if prediction3 >=0.5:
st.text("Real")
st.text(str(prediction3[0]))
else:
st.text("Fack")
st.text(str(prediction3[0]))
# if prediction >= 0.85 and prediction2 >= 0.0000000000001:
# st.text("صورة حقيقية")
# # st.text(str(prediction[0]))
# # st.text(str(prediction2[0]))
# else:
# st.text("صورة مزيفة")
# st.text(str(prediction[0]))
# # st.text(str(prediction2[0]))
else:
st.text("لا يوجد وجه")
else:
st.text("لا يوجد وجه")
# Provide guidance for users
st.markdown("<p class='st-text'>في حالة حصلت على نتيجة غير حقيقية احرص على تحقيق الشروط في الاسفل</p>", unsafe_allow_html=True)
st.image(picture)
# Define the app title
st.markdown("<h1 class='st-title'>شروط الاستخدام</h1>", unsafe_allow_html=True)
# Add informative text with centered alignment
st.markdown("<p class='st-text'>يجب توفر إضاءة جيدة</p>", unsafe_allow_html=True)
st.markdown("<p class='st-text'>يجب استخدام كاميرا هاتف بدقة جيدة</p>", unsafe_allow_html=True)
st.markdown("<p class='st-text'>يجب ان يكون الوجه مقابلا للشاشة بشكل مستقيم</p>", unsafe_allow_html=True)
st.markdown("<p class='st-text'>التركيز على مكان الكاميرا عند الالتقاط</p>", unsafe_allow_html=True)
st.markdown("<p class='st-text'>الحرص على ان لا يكون خلفك خلفية تعكس الضوء مثل الزجاج</p>", unsafe_allow_html=True)
st.markdown("<p class='st-text'>يفضل ان يكون خلفك خلفية صلدة مثل الجدار</p>", unsafe_allow_html=True)