Spaces:
Runtime error
Runtime error
Upload 25 files
Browse files- .gitattributes +36 -35
- Dockerfile +29 -0
- README.md +12 -11
- app/Hackathon_setup/__init__.py +0 -0
- app/Hackathon_setup/exp_recognition.py +71 -0
- app/Hackathon_setup/exp_recognition_model.py +31 -0
- app/Hackathon_setup/face_recognition.py +170 -0
- app/Hackathon_setup/face_recognition_model.py +129 -0
- app/Hackathon_setup/haarcascade_eye.xml +0 -0
- app/Hackathon_setup/haarcascade_frontalface_default.xml +0 -0
- app/Hackathon_setup/hackathon4a_face_similarity_and_recognition (1).py +818 -0
- app/Hackathon_setup/lbpcascade_frontalface.xml +1505 -0
- app/Hackathon_setup/siamese_model.t7 +3 -0
- app/__init__.py +1 -0
- app/config.py +25 -0
- app/main.py +148 -0
- app/static/Person1_1697805233.jpg +0 -0
- app/templates/expr_recognition.html +32 -0
- app/templates/face_recognition.html +32 -0
- app/templates/index.html +29 -0
- app/templates/predict_expr_recognition.html +37 -0
- app/templates/predict_face_recognition.html +37 -0
- app/templates/predict_similarity.html +38 -0
- app/templates/similarity.html +35 -0
- requirements.txt +19 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,36 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.
|
| 10 |
-
*.
|
| 11 |
-
*.
|
| 12 |
-
*.
|
| 13 |
-
*.
|
| 14 |
-
*.
|
| 15 |
-
*.
|
| 16 |
-
*.
|
| 17 |
-
*.
|
| 18 |
-
*.
|
| 19 |
-
*.
|
| 20 |
-
*.
|
| 21 |
-
*.
|
| 22 |
-
*.
|
| 23 |
-
*.
|
| 24 |
-
*.
|
| 25 |
-
*.
|
| 26 |
-
|
| 27 |
-
*
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.
|
| 30 |
-
*.
|
| 31 |
-
*.
|
| 32 |
-
*.
|
| 33 |
-
*.
|
| 34 |
-
*.
|
| 35 |
-
*
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.t7 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-bookworm
|
| 2 |
+
|
| 3 |
+
COPY requirements.txt requirements.txt
|
| 4 |
+
|
| 5 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 6 |
+
bzip2 \
|
| 7 |
+
g++ \
|
| 8 |
+
git \
|
| 9 |
+
graphviz \
|
| 10 |
+
libgl1-mesa-glx \
|
| 11 |
+
libhdf5-dev \
|
| 12 |
+
openmpi-bin \
|
| 13 |
+
wget \
|
| 14 |
+
python3-tk && \
|
| 15 |
+
rm -rf /var/lib/apt/lists/*
|
| 16 |
+
|
| 17 |
+
RUN pip install --upgrade pip
|
| 18 |
+
|
| 19 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 20 |
+
|
| 21 |
+
RUN useradd -m -u 1000 myuser
|
| 22 |
+
|
| 23 |
+
USER myuser
|
| 24 |
+
|
| 25 |
+
COPY --chown=myuser app app
|
| 26 |
+
|
| 27 |
+
EXPOSE 8001
|
| 28 |
+
|
| 29 |
+
CMD ["python", "app/main.py"]
|
README.md
CHANGED
|
@@ -1,11 +1,12 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: FaceSimilarity
|
| 3 |
+
emoji: 🏢
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
license: mit
|
| 9 |
+
short_description: Face Similarity application
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app/Hackathon_setup/__init__.py
ADDED
|
File without changes
|
app/Hackathon_setup/exp_recognition.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
from matplotlib import pyplot as plt
|
| 4 |
+
import torch
|
| 5 |
+
# In the below line,remove '.' while working on your local system.However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
|
| 6 |
+
from .exp_recognition_model import *
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import base64
|
| 9 |
+
import io
|
| 10 |
+
import os
|
| 11 |
+
## Add more imports if required
|
| 12 |
+
|
| 13 |
+
#############################################################################################################################
|
| 14 |
+
# Caution: Don't change any of the filenames, function names and definitions #
|
| 15 |
+
# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
|
| 16 |
+
#############################################################################################################################
|
| 17 |
+
|
| 18 |
+
# Current_path stores absolute path of the file from where it runs.
|
| 19 |
+
current_path = os.path.dirname(os.path.abspath(__file__))
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
#1) The below function is used to detect faces in the given image.
|
| 23 |
+
#2) It returns only one image which has maximum area out of all the detected faces in the photo.
|
| 24 |
+
#3) If no face is detected,then it returns zero(0).
|
| 25 |
+
|
| 26 |
+
def detected_face(image):
|
| 27 |
+
eye_haar = current_path + '/haarcascade_eye.xml'
|
| 28 |
+
face_haar = current_path + '/haarcascade_frontalface_default.xml'
|
| 29 |
+
face_cascade = cv2.CascadeClassifier(face_haar)
|
| 30 |
+
eye_cascade = cv2.CascadeClassifier(eye_haar)
|
| 31 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 32 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
| 33 |
+
face_areas=[]
|
| 34 |
+
images = []
|
| 35 |
+
required_image=0
|
| 36 |
+
for i, (x,y,w,h) in enumerate(faces):
|
| 37 |
+
face_cropped = gray[y:y+h, x:x+w]
|
| 38 |
+
face_areas.append(w*h)
|
| 39 |
+
images.append(face_cropped)
|
| 40 |
+
required_image = images[np.argmax(face_areas)]
|
| 41 |
+
required_image = Image.fromarray(required_image)
|
| 42 |
+
return required_image
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
#1) Images captured from mobile is passed as parameter to the below function in the API call, It returns the Expression detected by your network.
|
| 46 |
+
#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function.
|
| 47 |
+
#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode.
|
| 48 |
+
#4) Perform necessary transformations to the input(detected face using the above function), this should return the Expression in string form ex: "Anger"
|
| 49 |
+
#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function
|
| 50 |
+
##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
|
| 51 |
+
def get_expression(img):
|
| 52 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 53 |
+
|
| 54 |
+
##########################################################################################
|
| 55 |
+
##Example for loading a model using weight state dictionary: ##
|
| 56 |
+
## face_det_net = facExpRec() #Example Network ##
|
| 57 |
+
## model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device) ##
|
| 58 |
+
## face_det_net.load_state_dict(model['net_dict']) ##
|
| 59 |
+
## ##
|
| 60 |
+
##current_path + '/<network_definition>' is path of the saved model if present in ##
|
| 61 |
+
##the same path as this file, we recommend to put in the same directory ##
|
| 62 |
+
##########################################################################################
|
| 63 |
+
##########################################################################################
|
| 64 |
+
|
| 65 |
+
face = detected_face(img)
|
| 66 |
+
if face==0:
|
| 67 |
+
face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
|
| 68 |
+
|
| 69 |
+
# YOUR CODE HERE, return expression using your model
|
| 70 |
+
|
| 71 |
+
return "YET TO BE CODED"
|
app/Hackathon_setup/exp_recognition_model.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torchvision import transforms
|
| 5 |
+
## Add more imports if required
|
| 6 |
+
|
| 7 |
+
####################################################################################################################
|
| 8 |
+
# Define your model and transform and all necessary helper functions here #
|
| 9 |
+
# They will be imported to the exp_recognition.py file #
|
| 10 |
+
####################################################################################################################
|
| 11 |
+
|
| 12 |
+
# Definition of classes as dictionary
|
| 13 |
+
classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5: 'SADNESS', 6: 'SURPRISE'}
|
| 14 |
+
|
| 15 |
+
# Example Network
|
| 16 |
+
class facExpRec(torch.nn.Module):
|
| 17 |
+
def __init__(self):
|
| 18 |
+
pass # remove 'pass' once you have written your code
|
| 19 |
+
#YOUR CODE HERE
|
| 20 |
+
|
| 21 |
+
def forward(self, x):
|
| 22 |
+
pass # remove 'pass' once you have written your code
|
| 23 |
+
#YOUR CODE HERE
|
| 24 |
+
|
| 25 |
+
# Sample Helper function
|
| 26 |
+
def rgb2gray(image):
|
| 27 |
+
return image.convert('L')
|
| 28 |
+
|
| 29 |
+
# Sample Transformation function
|
| 30 |
+
#YOUR CODE HERE for changing the Transformation values.
|
| 31 |
+
trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor()])
|
app/Hackathon_setup/face_recognition.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
from matplotlib import pyplot as plt
|
| 4 |
+
import torch
|
| 5 |
+
# In the below line,remove '.' while working on your local system. However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
|
| 6 |
+
from .face_recognition_model import *
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import base64
|
| 9 |
+
import io
|
| 10 |
+
import os
|
| 11 |
+
import joblib
|
| 12 |
+
import pickle
|
| 13 |
+
# Add more imports if required
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
###########################################################################################################################################
|
| 18 |
+
# Caution: Don't change any of the filenames, function names and definitions #
|
| 19 |
+
# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
|
| 20 |
+
###########################################################################################################################################
|
| 21 |
+
|
| 22 |
+
# Current_path stores absolute path of the file from where it runs.
|
| 23 |
+
current_path = os.path.dirname(os.path.abspath(__file__))
|
| 24 |
+
# --- GLOBAL SETUP: Must match your training transforms ---
|
| 25 |
+
# Define the transformation pipeline for inference
|
| 26 |
+
trnscm = transforms.Compose([
|
| 27 |
+
transforms.Grayscale(num_output_channels=1),
|
| 28 |
+
transforms.Resize((100, 100)),
|
| 29 |
+
transforms.ToTensor()
|
| 30 |
+
])
|
| 31 |
+
CLASS_NAMES = ['Person0', 'Person1', 'Person2', 'Person3', 'Person4'] # ADJUST THIS!
|
| 32 |
+
|
| 33 |
+
# --- Model Filenames ---
|
| 34 |
+
SIAMESE_MODEL_PATH = current_path + '/siamese_model.t7'
|
| 35 |
+
KNN_CLASSIFIER_PATH = current_path + '/decision_tree_model.sav'
|
| 36 |
+
SCALER_PATH = current_path + '/face_recognition_scaler.sav'
|
| 37 |
+
|
| 38 |
+
#1) The below function is used to detect faces in the given image.
|
| 39 |
+
#2) It returns only one image which has maximum area out of all the detected faces in the photo.
|
| 40 |
+
#3) If no face is detected,then it returns zero(0).
|
| 41 |
+
|
| 42 |
+
def detected_face(image):
|
| 43 |
+
eye_haar = current_path + '/haarcascade_eye.xml'
|
| 44 |
+
face_haar = current_path + '/haarcascade_frontalface_default.xml'
|
| 45 |
+
face_cascade = cv2.CascadeClassifier(face_haar)
|
| 46 |
+
eye_cascade = cv2.CascadeClassifier(eye_haar)
|
| 47 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 48 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
| 49 |
+
face_areas=[]
|
| 50 |
+
images = []
|
| 51 |
+
required_image=0
|
| 52 |
+
for i, (x,y,w,h) in enumerate(faces):
|
| 53 |
+
face_cropped = gray[y:y+h, x:x+w]
|
| 54 |
+
face_areas.append(w*h)
|
| 55 |
+
images.append(face_cropped)
|
| 56 |
+
required_image = images[np.argmax(face_areas)]
|
| 57 |
+
required_image = Image.fromarray(required_image)
|
| 58 |
+
return required_image
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
#1) Images captured from mobile is passed as parameter to the below function in the API call. It returns the similarity measure between given images.
|
| 62 |
+
#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function.
|
| 63 |
+
#3) Define an object to your siamese network here in the function and load the weight from the trained network, set it in evaluation mode.
|
| 64 |
+
#4) Get the features for both the faces from the network and return the similarity measure, Euclidean,cosine etc can be it. But choose the Relevant measure.
|
| 65 |
+
#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function
|
| 66 |
+
#Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
|
| 67 |
+
def get_similarity(img1, img2):
|
| 68 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 69 |
+
|
| 70 |
+
det_img1 = detected_face(img1)
|
| 71 |
+
det_img2 = detected_face(img2)
|
| 72 |
+
if(det_img1 == 0 or det_img2 == 0):
|
| 73 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 74 |
+
det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
|
| 75 |
+
face1 = trnscm(det_img1).unsqueeze(0)
|
| 76 |
+
face2 = trnscm(det_img2).unsqueeze(0)
|
| 77 |
+
##########################################################################################
|
| 78 |
+
##Example for loading a model using weight state dictionary: ##
|
| 79 |
+
## feature_net = light_cnn() #Example Network ##
|
| 80 |
+
## model = torch.load(current_path + '/siamese_model.t7', map_location=device) ##
|
| 81 |
+
## feature_net.load_state_dict(model['net_dict']) ##
|
| 82 |
+
## ##
|
| 83 |
+
##current_path + '/<network_definition>' is path of the saved model if present in ##
|
| 84 |
+
##the same path as this file, we recommend to put in the same directory ##
|
| 85 |
+
##########################################################################################
|
| 86 |
+
##########################################################################################
|
| 87 |
+
|
| 88 |
+
# YOUR CODE HERE, load the model
|
| 89 |
+
|
| 90 |
+
# YOUR CODE HERE, return similarity measure using your model
|
| 91 |
+
# 1. Initialize and Load Siamese Network
|
| 92 |
+
try:
|
| 93 |
+
# Assuming your Siamese Network class is named 'SiameseNetwork'
|
| 94 |
+
siamese_net = SiameseNetwork().to(device)
|
| 95 |
+
siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
|
| 96 |
+
siamese_net.eval()
|
| 97 |
+
except Exception as e:
|
| 98 |
+
print(f"Error loading Siamese Model get_similarity: {e}")
|
| 99 |
+
return -1 # Return error code
|
| 100 |
+
|
| 101 |
+
# 2. Get Features (Embeddings)
|
| 102 |
+
with torch.no_grad():
|
| 103 |
+
# Get the feature vector from one tower/forward_once method
|
| 104 |
+
# Ensure your SiameseNetwork class has a forward_once or get_embedding method
|
| 105 |
+
embed1 = siamese_net.forward_once(face1).cpu().numpy()
|
| 106 |
+
embed2 = siamese_net.forward_once(face2).cpu().numpy()
|
| 107 |
+
|
| 108 |
+
# 3. Calculate Similarity Measure
|
| 109 |
+
# The Euclidean distance is the fundamental metric used by the Triplet/Contrastive loss.
|
| 110 |
+
# We return the NEGATIVE Euclidean distance or COSINE similarity, as *higher* value usually means *more* similar.
|
| 111 |
+
|
| 112 |
+
# Option A: Euclidean Distance (Lower is better) -> return NEGATIVE distance for API expectation
|
| 113 |
+
# distance = euclidean_distances(embed1, embed2)[0][0]
|
| 114 |
+
# similarity = -distance
|
| 115 |
+
|
| 116 |
+
# Option B: Cosine Similarity (Higher is better) -> Recommended
|
| 117 |
+
similarity = cosine_similarity(embed1, embed2)[0][0]
|
| 118 |
+
|
| 119 |
+
return float(similarity)
|
| 120 |
+
|
| 121 |
+
#1) Image captured from mobile is passed as parameter to this function in the API call, It returns the face class in the string form ex: "Person1"
|
| 122 |
+
#2) The image is passed to the function in base64 encoding, Code to decode the image provided within the function
|
| 123 |
+
#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode
|
| 124 |
+
#4) Perform necessary transformations to the input(detected face using the above function).
|
| 125 |
+
#5) Along with the siamese, you need the classifier as well, which is to be finetuned with the faces that you are training
|
| 126 |
+
##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
|
| 127 |
+
def get_face_class(img1):
|
| 128 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 129 |
+
|
| 130 |
+
det_img1 = detected_face(img1)
|
| 131 |
+
if(det_img1 == 0):
|
| 132 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 133 |
+
##YOUR CODE HERE, return face class here
|
| 134 |
+
##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
|
| 135 |
+
##Better Hint: Siamese experiment is covered in one of the labs
|
| 136 |
+
face1_tensor = trnscm(det_img1).unsqueeze(0).to(device)
|
| 137 |
+
|
| 138 |
+
# 1. Load Siamese Network (Feature Extractor)
|
| 139 |
+
try:
|
| 140 |
+
siamese_net = SiameseNetwork().to(device)
|
| 141 |
+
siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
|
| 142 |
+
siamese_net.eval()
|
| 143 |
+
except Exception as e:
|
| 144 |
+
return f"Error loading Siamese Model get_face_class: {e}"
|
| 145 |
+
|
| 146 |
+
# 2. Extract Embedding
|
| 147 |
+
with torch.no_grad():
|
| 148 |
+
embedding_np = siamese_net.forward_once(face1_tensor).cpu().numpy()
|
| 149 |
+
|
| 150 |
+
# 3. Load Sklearn Scaler and Classifier (Joblib)
|
| 151 |
+
try:
|
| 152 |
+
knn_classifier = joblib.load(KNN_CLASSIFIER_PATH)
|
| 153 |
+
scaler = joblib.load(SCALER_PATH)
|
| 154 |
+
except Exception as e:
|
| 155 |
+
return f"Error loading Sklearn models: {e}"
|
| 156 |
+
|
| 157 |
+
# 4. Preprocess Embedding and Predict
|
| 158 |
+
# The embedding must be reshaped to (1, N_features) for the scaler
|
| 159 |
+
embedding_scaled = scaler.transform(embedding_np.reshape(1, -1))
|
| 160 |
+
|
| 161 |
+
# Perform prediction (returns a NumPy array with the predicted label index)
|
| 162 |
+
predicted_label_index = knn_classifier.predict(embedding_scaled)[0]
|
| 163 |
+
|
| 164 |
+
# 5. Map index to Class Name
|
| 165 |
+
if predicted_label_index < len(CLASS_NAMES):
|
| 166 |
+
predicted_class_name = CLASS_NAMES[predicted_label_index]
|
| 167 |
+
else:
|
| 168 |
+
predicted_class_name = "UNKNOWN_CLASS"
|
| 169 |
+
|
| 170 |
+
return predicted_class_name
|
app/Hackathon_setup/face_recognition_model.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torchvision
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from torchvision import transforms
|
| 7 |
+
# Add more imports if required
|
| 8 |
+
|
| 9 |
+
# Sample Transformation function
|
| 10 |
+
# YOUR CODE HERE for changing the Transformation values.
|
| 11 |
+
trnscm = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()])
|
| 12 |
+
|
| 13 |
+
##Example Network
|
| 14 |
+
class Siamese(torch.nn.Module):
|
| 15 |
+
def __init__(self):
|
| 16 |
+
super(Siamese, self).__init__()
|
| 17 |
+
#YOUR CODE HERE
|
| 18 |
+
|
| 19 |
+
def forward(self, x):
|
| 20 |
+
pass # remove 'pass' once you have written your code
|
| 21 |
+
#YOUR CODE HERE
|
| 22 |
+
|
| 23 |
+
##########################################################################################################
|
| 24 |
+
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
|
| 25 |
+
## classifier = nn.Sequential(nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Linear...) ##
|
| 26 |
+
##########################################################################################################
|
| 27 |
+
|
| 28 |
+
# YOUR CODE HERE for pytorch classifier
|
| 29 |
+
|
| 30 |
+
# Definition of classes as dictionary
|
| 31 |
+
classes = ['person1','person2','person3','person4','person5','person6','person7']
|
| 32 |
+
def get_similarity(img1, img2):
|
| 33 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 34 |
+
|
| 35 |
+
det_img1 = detected_face(img1)
|
| 36 |
+
det_img2 = detected_face(img2)
|
| 37 |
+
if(det_img1 == 0 or det_img2 == 0):
|
| 38 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 39 |
+
det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
|
| 40 |
+
face1 = trnscm(det_img1).unsqueeze(0)
|
| 41 |
+
face2 = trnscm(det_img2).unsqueeze(0)
|
| 42 |
+
##########################################################################################
|
| 43 |
+
##Example for loading a model using weight state dictionary: ##
|
| 44 |
+
## feature_net = light_cnn() #Example Network ##
|
| 45 |
+
## model = torch.load(current_path + '/siamese_model.t7', map_location=device) ##
|
| 46 |
+
## feature_net.load_state_dict(model['net_dict']) ##
|
| 47 |
+
## ##
|
| 48 |
+
##current_path + '/<network_definition>' is path of the saved model if present in ##
|
| 49 |
+
##the same path as this file, we recommend to put in the same directory ##
|
| 50 |
+
##########################################################################################
|
| 51 |
+
##########################################################################################
|
| 52 |
+
|
| 53 |
+
# YOUR CODE HERE, load the model
|
| 54 |
+
|
| 55 |
+
# YOUR CODE HERE, return similarity measure using your model
|
| 56 |
+
# 1. Initialize and Load Siamese Network
|
| 57 |
+
try:
|
| 58 |
+
# Assuming your Siamese Network class is named 'SiameseNetwork'
|
| 59 |
+
siamese_net = SiameseNetwork().to(device)
|
| 60 |
+
siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
|
| 61 |
+
siamese_net.eval()
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"Error loading Siamese Model: {e}")
|
| 64 |
+
return -1 # Return error code
|
| 65 |
+
|
| 66 |
+
# 2. Get Features (Embeddings)
|
| 67 |
+
with torch.no_grad():
|
| 68 |
+
# Get the feature vector from one tower/forward_once method
|
| 69 |
+
# Ensure your SiameseNetwork class has a forward_once or get_embedding method
|
| 70 |
+
embed1 = siamese_net.forward_once(face1).cpu().numpy()
|
| 71 |
+
embed2 = siamese_net.forward_once(face2).cpu().numpy()
|
| 72 |
+
|
| 73 |
+
# 3. Calculate Similarity Measure
|
| 74 |
+
# The Euclidean distance is the fundamental metric used by the Triplet/Contrastive loss.
|
| 75 |
+
# We return the NEGATIVE Euclidean distance or COSINE similarity, as *higher* value usually means *more* similar.
|
| 76 |
+
|
| 77 |
+
# Option A: Euclidean Distance (Lower is better) -> return NEGATIVE distance for API expectation
|
| 78 |
+
# distance = euclidean_distances(embed1, embed2)[0][0]
|
| 79 |
+
# similarity = -distance
|
| 80 |
+
|
| 81 |
+
# Option B: Cosine Similarity (Higher is better) -> Recommended
|
| 82 |
+
similarity = cosine_similarity(embed1, embed2)[0][0]
|
| 83 |
+
|
| 84 |
+
return float(similarity)
|
| 85 |
+
|
| 86 |
+
def get_face_class(img1):
|
| 87 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 88 |
+
|
| 89 |
+
det_img1 = detected_face(img1)
|
| 90 |
+
if(det_img1 == 0):
|
| 91 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 92 |
+
##YOUR CODE HERE, return face class here
|
| 93 |
+
##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
|
| 94 |
+
##Better Hint: Siamese experiment is covered in one of the labs
|
| 95 |
+
face1_tensor = trnscm(det_img1).unsqueeze(0).to(device)
|
| 96 |
+
|
| 97 |
+
# 1. Load Siamese Network (Feature Extractor)
|
| 98 |
+
try:
|
| 99 |
+
siamese_net = SiameseNetwork().to(device)
|
| 100 |
+
siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
|
| 101 |
+
siamese_net.eval()
|
| 102 |
+
except Exception as e:
|
| 103 |
+
return f"Error loading Siamese Model get_face_class: {e}"
|
| 104 |
+
|
| 105 |
+
# 2. Extract Embedding
|
| 106 |
+
with torch.no_grad():
|
| 107 |
+
embedding_np = siamese_net.forward_once(face1_tensor).cpu().numpy()
|
| 108 |
+
|
| 109 |
+
# 3. Load Sklearn Scaler and Classifier (Joblib)
|
| 110 |
+
try:
|
| 111 |
+
knn_classifier = joblib.load(KNN_CLASSIFIER_PATH)
|
| 112 |
+
scaler = joblib.load(SCALER_PATH)
|
| 113 |
+
except Exception as e:
|
| 114 |
+
return f"Error loading Sklearn models: {e}"
|
| 115 |
+
|
| 116 |
+
# 4. Preprocess Embedding and Predict
|
| 117 |
+
# The embedding must be reshaped to (1, N_features) for the scaler
|
| 118 |
+
embedding_scaled = scaler.transform(embedding_np.reshape(1, -1))
|
| 119 |
+
|
| 120 |
+
# Perform prediction (returns a NumPy array with the predicted label index)
|
| 121 |
+
predicted_label_index = knn_classifier.predict(embedding_scaled)[0]
|
| 122 |
+
|
| 123 |
+
# 5. Map index to Class Name
|
| 124 |
+
if predicted_label_index < len(CLASS_NAMES):
|
| 125 |
+
predicted_class_name = CLASS_NAMES[predicted_label_index]
|
| 126 |
+
else:
|
| 127 |
+
predicted_class_name = "UNKNOWN_CLASS"
|
| 128 |
+
|
| 129 |
+
return predicted_class_name
|
app/Hackathon_setup/haarcascade_eye.xml
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app/Hackathon_setup/haarcascade_frontalface_default.xml
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app/Hackathon_setup/hackathon4a_face_similarity_and_recognition (1).py
ADDED
|
@@ -0,0 +1,818 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Hackathon4a_Face_Similarity_and_Recognition.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1-Gc2gRNagx-DUpMULf7O-GMFdM8526qW
|
| 8 |
+
|
| 9 |
+
# Advanced Certification in AIML
|
| 10 |
+
## A Program by IIIT-H and TalentSprint
|
| 11 |
+
|
| 12 |
+
Face recognition is a method of identifying or verifying the identity of an individual using their face. Face recognition systems can be used to identify people in photos, video, or in real-time. The recent major breakthrough in increasing the face recognition accuracy has come from advancements in Deep Learning and CNNs.
|
| 13 |
+
|
| 14 |
+
**Objectives:**
|
| 15 |
+
|
| 16 |
+
**Stage 1:** Build a Siamese Network and obtain the Siamese Representation for the AT&T faces dataset
|
| 17 |
+
|
| 18 |
+
**Stage 2 (10 Marks):** Use the same Siamese Model to deploy the application for Face Similarity on HuggingFace spaces
|
| 19 |
+
|
| 20 |
+
**Stage 3 (10 Marks):** Get the Siamese Network Representation of the Team Data and build a classifier to perform Face Recognition
|
| 21 |
+
|
| 22 |
+
##**Stage 1: Build a Siamese Network**
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
---
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
* Define a Siamese network and obtain Siamese Representation on the AT&T Faces Dataset (code given)
|
| 29 |
+
|
| 30 |
+
**Dataset download**
|
| 31 |
+
|
| 32 |
+
The [AT&T Faces Dataset](https://git-disl.github.io/GTDLBench/figures/faces.gif) contains 10 different images of each of 40 distinct persons. Images were taken with variations in times, lighting, facial expressions and facial details (eg. glasses / no glasses).
|
| 33 |
+
|
| 34 |
+
Dataset Statistics: Color: Grayscale; Sample Size: 92x112; Total Samples: 400; Dataset Size: 4.5 MB (compressed in .tar.z)
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
#@title #####Download the AT&T Dataset.
|
| 38 |
+
|
| 39 |
+
import os
|
| 40 |
+
import urllib.request
|
| 41 |
+
import zipfile
|
| 42 |
+
|
| 43 |
+
def setup():
|
| 44 |
+
# Download AT&T dataset
|
| 45 |
+
if not os.path.exists("data-20190607T005435Z-001.zip"):
|
| 46 |
+
print("Downloading AT&T dataset...")
|
| 47 |
+
urllib.request.urlretrieve("https://cdn.talentsprint.com/aiml/FaceRecogHackathon/Datasets/ATandT/data-20190607T005435Z-001.zip", "data-20190607T005435Z-001.zip")
|
| 48 |
+
|
| 49 |
+
# Extract dataset
|
| 50 |
+
if not os.path.exists("data"):
|
| 51 |
+
print("Extracting dataset...")
|
| 52 |
+
with zipfile.ZipFile("data-20190607T005435Z-001.zip", 'r') as zip_ref:
|
| 53 |
+
zip_ref.extractall(".")
|
| 54 |
+
|
| 55 |
+
# Download face cascade
|
| 56 |
+
if not os.path.exists("lbpcascade_frontalface.xml"):
|
| 57 |
+
print("Downloading face cascade...")
|
| 58 |
+
urllib.request.urlretrieve("https://cdn.iisc.talentsprint.com/AIandMLOps/Datasets/lbpcascade_frontalface.xml", "lbpcascade_frontalface.xml")
|
| 59 |
+
|
| 60 |
+
print("Setup completed successfully")
|
| 61 |
+
return
|
| 62 |
+
|
| 63 |
+
setup()
|
| 64 |
+
|
| 65 |
+
import os
|
| 66 |
+
print("Current directory contents:")
|
| 67 |
+
for item in os.listdir('.'):
|
| 68 |
+
print(item)
|
| 69 |
+
|
| 70 |
+
"""#### **Import Required Packages**
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
# Commented out IPython magic to ensure Python compatibility.
|
| 75 |
+
# %matplotlib inline
|
| 76 |
+
import torchvision
|
| 77 |
+
import torchvision.datasets as dset
|
| 78 |
+
import torchvision.transforms as transforms
|
| 79 |
+
from torch.utils.data import DataLoader,Dataset
|
| 80 |
+
import matplotlib.pyplot as plt
|
| 81 |
+
import torchvision.utils
|
| 82 |
+
import numpy as np
|
| 83 |
+
import random
|
| 84 |
+
import cv2
|
| 85 |
+
from PIL import Image # PIL (Pillow) is the Python Image Library. Used to cut and resize images, or do simple manipulation.
|
| 86 |
+
import torch
|
| 87 |
+
from torch.autograd import Variable
|
| 88 |
+
import PIL.ImageOps
|
| 89 |
+
import torch.nn as nn
|
| 90 |
+
from torch import optim
|
| 91 |
+
import torch.nn.functional as F
|
| 92 |
+
|
| 93 |
+
"""**Helper functions**"""
|
| 94 |
+
|
| 95 |
+
## The below function plots a given tensor image
|
| 96 |
+
def imshow(img,text=None,should_save=False):
|
| 97 |
+
npimg = img.numpy()
|
| 98 |
+
plt.axis("off")
|
| 99 |
+
if text:
|
| 100 |
+
plt.text(75, 8, text, style='italic',fontweight='bold', bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
|
| 101 |
+
plt.imshow(np.transpose(npimg, (1, 2, 0)))
|
| 102 |
+
|
| 103 |
+
# In PyTorch, the order of dimension is "channel*width*height" but in matplotlib it is "width*height*channel".
|
| 104 |
+
# So a transpose is performed to correctly index the dimensions
|
| 105 |
+
plt.imshow(np.transpose(npimg, (1, 2, 0)))
|
| 106 |
+
plt.show()
|
| 107 |
+
|
| 108 |
+
## We will use the below method to plot the loss graph while training
|
| 109 |
+
def show_plot(iteration,loss):
|
| 110 |
+
plt.plot(iteration,loss)
|
| 111 |
+
plt.show()
|
| 112 |
+
|
| 113 |
+
"""**Configuration class**"""
|
| 114 |
+
|
| 115 |
+
# A simple class to manage all configurations
|
| 116 |
+
class Config():
|
| 117 |
+
training_dir = "./data/faces/training/"
|
| 118 |
+
testing_dir = "./data/faces/testing/"
|
| 119 |
+
train_batch_size = 64
|
| 120 |
+
train_number_epochs = 100
|
| 121 |
+
|
| 122 |
+
"""**Display a sample image**"""
|
| 123 |
+
|
| 124 |
+
import cv2
|
| 125 |
+
import matplotlib.pyplot as plt
|
| 126 |
+
|
| 127 |
+
# Opencv's imread takes full path of an image as input to read an image
|
| 128 |
+
im1 = cv2.imread('./data/faces/training/s1/10.pgm')
|
| 129 |
+
print(im1.shape)
|
| 130 |
+
plt.imshow(cv2.cvtColor(im1, cv2.COLOR_BGR2RGB))
|
| 131 |
+
plt.axis('off')
|
| 132 |
+
plt.show()
|
| 133 |
+
|
| 134 |
+
"""#### **Define a custom SiameseNetworkDataset class**
|
| 135 |
+
|
| 136 |
+
This dataset generates a pair of images. 0 for geniune pair (similar) and 1 for imposter pair (dissimilar)
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
# Below is the class of custom dataset which arranges images in pairs and gives the labels. if both are same, then label will be 0 otherwise 1
|
| 140 |
+
# As our images are in .pgm extension (portable gray map) we converting them 'L' to store luminance which is basically single channel image.
|
| 141 |
+
|
| 142 |
+
class SiameseNetworkDataset(Dataset):
|
| 143 |
+
def __init__(self,imageFolderDataset, transform=None):
|
| 144 |
+
self.imageFolderDataset = imageFolderDataset
|
| 145 |
+
self.transform = transform
|
| 146 |
+
|
| 147 |
+
# Overriding the data retriever (__getitem__) to provide a pair of images + similar/dissimilar label
|
| 148 |
+
def __getitem__(self,index):
|
| 149 |
+
img0_tuple = random.choice(self.imageFolderDataset.imgs)
|
| 150 |
+
#we need to make sure approx 50% of images are in the same class
|
| 151 |
+
should_get_same_class = random.randint(0,1)
|
| 152 |
+
if should_get_same_class:
|
| 153 |
+
while True:
|
| 154 |
+
#keep looping till the same class image is found
|
| 155 |
+
img1_tuple = random.choice(self.imageFolderDataset.imgs)
|
| 156 |
+
if img0_tuple[1]==img1_tuple[1]:
|
| 157 |
+
break
|
| 158 |
+
else:
|
| 159 |
+
while True:
|
| 160 |
+
#keep looping till a different class image is found
|
| 161 |
+
img1_tuple = random.choice(self.imageFolderDataset.imgs)
|
| 162 |
+
if img0_tuple[1] !=img1_tuple[1]:
|
| 163 |
+
break
|
| 164 |
+
|
| 165 |
+
img0 = Image.open(img0_tuple[0])
|
| 166 |
+
img1 = Image.open(img1_tuple[0])
|
| 167 |
+
img0 = img0.convert("L")
|
| 168 |
+
img1 = img1.convert("L")
|
| 169 |
+
|
| 170 |
+
if self.transform is not None:
|
| 171 |
+
img0 = self.transform(img0)
|
| 172 |
+
img1 = self.transform(img1)
|
| 173 |
+
|
| 174 |
+
return img0, img1 , torch.from_numpy(np.array([int(img1_tuple[1]!=img0_tuple[1])],dtype=np.float32))
|
| 175 |
+
|
| 176 |
+
def __len__(self):
|
| 177 |
+
return len(self.imageFolderDataset.imgs)
|
| 178 |
+
|
| 179 |
+
"""**Create an Image Folder Dataset**"""
|
| 180 |
+
|
| 181 |
+
folder_dataset = dset.ImageFolder(root=Config.training_dir)
|
| 182 |
+
|
| 183 |
+
"""**Create the Siamese Network Dataset**"""
|
| 184 |
+
|
| 185 |
+
# Create the object for the SiameseNetworkDataset class (defined earlier in this notebook);
|
| 186 |
+
|
| 187 |
+
# NOTE: the 'TRANSFORMS' HERE CONSISTS OF
|
| 188 |
+
# a) resizing to 100*100
|
| 189 |
+
# b) Converting to tensor.
|
| 190 |
+
|
| 191 |
+
# YOU HAVE TO APPLY THE SAME TRANSORMS WHEN DEPLOYING THE MODEL ON THE SERVER!!
|
| 192 |
+
|
| 193 |
+
siamese_dataset = SiameseNetworkDataset(imageFolderDataset = folder_dataset, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
|
| 194 |
+
|
| 195 |
+
"""**Exclude malformed images to avoid image loading errors** (Use Nonechucks)"""
|
| 196 |
+
|
| 197 |
+
# In cases where your dataloader gives 'image not found' error i.e.
|
| 198 |
+
# if an image is malformed, nonechucks helps in eliminating those images so that your dataloader doesn't fail.
|
| 199 |
+
# So good practice to use this code below
|
| 200 |
+
|
| 201 |
+
#!pip install Nonechucks
|
| 202 |
+
#import nonechucks as nc
|
| 203 |
+
|
| 204 |
+
## The below line will avoid loading the image files that are corrupted or unreadable.
|
| 205 |
+
#siamese_dataset = nc.SafeDataset(siamese_dataset)
|
| 206 |
+
|
| 207 |
+
"""**Visualising some of the data**
|
| 208 |
+
The top row and the bottom row of any column is one pair. The 0s and 1s correspond to the column of the image.
|
| 209 |
+
1 indiciates dissimilar, and 0 indicates similar.
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
vis_dataloader = DataLoader(siamese_dataset, shuffle=True, num_workers=8, batch_size=8)
|
| 213 |
+
|
| 214 |
+
dataiter = iter(vis_dataloader)
|
| 215 |
+
|
| 216 |
+
example_batch = next(dataiter)
|
| 217 |
+
concatenated = torch.cat((example_batch[0],example_batch[1]),0)
|
| 218 |
+
imshow(torchvision.utils.make_grid(concatenated))
|
| 219 |
+
print(example_batch[2].numpy())
|
| 220 |
+
|
| 221 |
+
"""#### **Neural Net Definition**
|
| 222 |
+
|
| 223 |
+
We will use a standard convolutional neural network
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
class SiameseNetwork(nn.Module):
|
| 227 |
+
def __init__(self):
|
| 228 |
+
super(SiameseNetwork, self).__init__()
|
| 229 |
+
self.cnn1 = nn.Sequential(
|
| 230 |
+
nn.ReflectionPad2d(1), #Pads the input tensor using the reflection of the input boundary, it similar to the padding.
|
| 231 |
+
nn.Conv2d(1, 4, kernel_size=3),
|
| 232 |
+
nn.ReLU(inplace=True),
|
| 233 |
+
nn.BatchNorm2d(4),
|
| 234 |
+
|
| 235 |
+
nn.ReflectionPad2d(1),
|
| 236 |
+
nn.Conv2d(4, 8, kernel_size=3),
|
| 237 |
+
nn.ReLU(inplace=True),
|
| 238 |
+
nn.BatchNorm2d(8),
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
nn.ReflectionPad2d(1),
|
| 242 |
+
nn.Conv2d(8, 8, kernel_size=3),
|
| 243 |
+
nn.ReLU(inplace=True),
|
| 244 |
+
nn.BatchNorm2d(8),
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
self.fc1 = nn.Sequential(
|
| 248 |
+
nn.Linear(8*100*100, 500),
|
| 249 |
+
nn.ReLU(inplace=True),
|
| 250 |
+
|
| 251 |
+
nn.Linear(500, 500),
|
| 252 |
+
nn.ReLU(inplace=True),
|
| 253 |
+
|
| 254 |
+
nn.Linear(500, 5))
|
| 255 |
+
|
| 256 |
+
# forward_once is for one image. This can be used while classifying the face images
|
| 257 |
+
def forward_once(self, x):
|
| 258 |
+
output = self.cnn1(x)
|
| 259 |
+
output = output.view(output.size()[0], -1)
|
| 260 |
+
output = self.fc1(output)
|
| 261 |
+
return output
|
| 262 |
+
|
| 263 |
+
def forward(self, input1, input2):
|
| 264 |
+
output1 = self.forward_once(input1)
|
| 265 |
+
output2 = self.forward_once(input2)
|
| 266 |
+
return output1, output2
|
| 267 |
+
|
| 268 |
+
"""**Contrastive Loss**"""
|
| 269 |
+
|
| 270 |
+
class ContrastiveLoss(torch.nn.Module):
|
| 271 |
+
"""
|
| 272 |
+
Contrastive loss function.
|
| 273 |
+
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
|
| 274 |
+
"""
|
| 275 |
+
|
| 276 |
+
def __init__(self, margin=2.0):
|
| 277 |
+
super(ContrastiveLoss, self).__init__()
|
| 278 |
+
self.margin = margin
|
| 279 |
+
|
| 280 |
+
def forward(self, output1, output2, label):
|
| 281 |
+
euclidean_distance = F.pairwise_distance(output1, output2, keepdim = True)
|
| 282 |
+
loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
|
| 283 |
+
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
|
| 284 |
+
|
| 285 |
+
return loss_contrastive
|
| 286 |
+
|
| 287 |
+
"""**Training the model**"""
|
| 288 |
+
|
| 289 |
+
train_dataloader = DataLoader(siamese_dataset, shuffle=True, num_workers=8, batch_size=Config.train_batch_size)
|
| 290 |
+
|
| 291 |
+
# Check if CUDA is available
|
| 292 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 293 |
+
print(f"Using device: {device}")
|
| 294 |
+
|
| 295 |
+
net = SiameseNetwork().to(device)
|
| 296 |
+
criterion = ContrastiveLoss()
|
| 297 |
+
optimizer = optim.Adam(net.parameters(), lr=0.0005)
|
| 298 |
+
|
| 299 |
+
counter = []
|
| 300 |
+
loss_history = []
|
| 301 |
+
iteration_number= 0
|
| 302 |
+
|
| 303 |
+
# Training loop
|
| 304 |
+
print("Starting training...")
|
| 305 |
+
for epoch in range(0, Config.train_number_epochs):
|
| 306 |
+
epoch_loss = 0
|
| 307 |
+
for i, data in enumerate(train_dataloader, 0):
|
| 308 |
+
img0, img1, label = data
|
| 309 |
+
img0, img1, label = img0.to(device), img1.to(device), label.to(device)
|
| 310 |
+
|
| 311 |
+
optimizer.zero_grad()
|
| 312 |
+
output1, output2 = net(img0, img1)
|
| 313 |
+
loss_contrastive = criterion(output1, output2, label)
|
| 314 |
+
loss_contrastive.backward()
|
| 315 |
+
optimizer.step()
|
| 316 |
+
|
| 317 |
+
epoch_loss += loss_contrastive.item()
|
| 318 |
+
|
| 319 |
+
if i % 10 == 0:
|
| 320 |
+
print("Epoch number {}\n Current loss {}\n".format(epoch, loss_contrastive.item()))
|
| 321 |
+
iteration_number += 10
|
| 322 |
+
counter.append(iteration_number)
|
| 323 |
+
loss_history.append(loss_contrastive.item())
|
| 324 |
+
|
| 325 |
+
avg_loss = epoch_loss / len(train_dataloader)
|
| 326 |
+
print(f"Epoch {epoch} completed. Average loss: {avg_loss:.4f}")
|
| 327 |
+
|
| 328 |
+
print("Training completed!")
|
| 329 |
+
|
| 330 |
+
# Plotting the loss graph using the function show_plot
|
| 331 |
+
show_plot(counter,loss_history)
|
| 332 |
+
|
| 333 |
+
"""**Saving and loading model**
|
| 334 |
+
|
| 335 |
+
https://pytorch.org/tutorials/beginner/saving_loading_models.html
|
| 336 |
+
"""
|
| 337 |
+
|
| 338 |
+
## Saving the model as a state dictionary
|
| 339 |
+
state = {'net_dict': net.state_dict()}
|
| 340 |
+
torch.save(state, './siamese_model.t7')
|
| 341 |
+
|
| 342 |
+
"""**Verifying the saved model with load_state_dict**"""
|
| 343 |
+
|
| 344 |
+
myModel = SiameseNetwork().to(device)
|
| 345 |
+
|
| 346 |
+
# Commented out IPython magic to ensure Python compatibility.
|
| 347 |
+
# %ls
|
| 348 |
+
|
| 349 |
+
ckpt = torch.load('./siamese_model.t7')
|
| 350 |
+
|
| 351 |
+
# Save the state dictionary of the Siamese network (use pytorch only), It will be useful in integrating to the mobile app
|
| 352 |
+
# A state_dict is simply a Python dictionary object that maps each layer of the network to its parameters (weights)
|
| 353 |
+
# As a Python dictionary it can be easily saved, updated, altered and restored, adding a great deal of modularity to PyTorch models
|
| 354 |
+
|
| 355 |
+
myModel.load_state_dict(ckpt['net_dict'])
|
| 356 |
+
|
| 357 |
+
"""##### Create the siamese dataset for the `testing_dir` and also the respective data loader. The Distance between each image pair denotes the degree of similarity the model found between the two images. Less means it found the images more similar, while higher values indicate it found them to be dissimilar."""
|
| 358 |
+
|
| 359 |
+
folder_dataset_test = dset.ImageFolder(root = Config.testing_dir) #testing_dir
|
| 360 |
+
siamese_dataset = SiameseNetworkDataset(imageFolderDataset = folder_dataset_test,
|
| 361 |
+
transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
|
| 362 |
+
|
| 363 |
+
##siamese_dataset = nc.SafeDataset(siamese_dataset)
|
| 364 |
+
|
| 365 |
+
test_dataloader = DataLoader(siamese_dataset,num_workers=6,batch_size=1,shuffle=True)
|
| 366 |
+
dataiter = iter(test_dataloader)
|
| 367 |
+
x0,_,_ = next(dataiter)
|
| 368 |
+
|
| 369 |
+
# Check the similarity for the first few images from the test data loader
|
| 370 |
+
for i in range(10):
|
| 371 |
+
_,x1,label2 = next(dataiter)
|
| 372 |
+
concatenated = torch.cat((x0,x1),0)
|
| 373 |
+
|
| 374 |
+
output1,output2 = myModel(Variable(x0).to(device),Variable(x1).to(device)) #using the loaded 'myModel'; even 'net' can be used
|
| 375 |
+
#..but using 'myModel' helps a quick test on if the save and load model is working fine. You will replicate the same
|
| 376 |
+
#..in the server.
|
| 377 |
+
|
| 378 |
+
euclidean_distance = F.pairwise_distance(output1, output2)
|
| 379 |
+
imshow(torchvision.utils.make_grid(concatenated),'Dissimilarity: {:.2f}'.format(euclidean_distance.item()))
|
| 380 |
+
|
| 381 |
+
"""##**Stage 2: Deploy your Face Similarity application on HuggingFace Spaces (10 Marks)**
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
---
|
| 385 |
+
|
| 386 |
+
1. [Sign up](https://huggingface.co/join) for a Hugging Face account
|
| 387 |
+
|
| 388 |
+
2. Once you have an account, create an access token for your account and save it:
|
| 389 |
+
|
| 390 |
+
- Go to your `Settings`, then click on the `Access Tokens` tab. Click on the `New token` button to create a new User Access Token.
|
| 391 |
+
- Select a role(`write`) and a name for your token
|
| 392 |
+
- Click Generate a token
|
| 393 |
+
|
| 394 |
+
To know more about Access Tokens, refer [here](https://huggingface.co/docs/hub/security-tokens).
|
| 395 |
+
|
| 396 |
+
3. Start a new Hugging Face Space by going to your profile and [clicking "New Space"](https://huggingface.co/new-space)
|
| 397 |
+
|
| 398 |
+
4. Provide details for your space:
|
| 399 |
+
- Space name
|
| 400 |
+
- License (eg. [MIT](https://opensource.org/licenses/MIT))
|
| 401 |
+
- For Space SDK (software development kit), select `Docker` (**It is mandatory to select *Docker* for this application to work.**)
|
| 402 |
+
- Choose a Docker template: `Blank`
|
| 403 |
+
- Space hardware (CPU basic)
|
| 404 |
+
- Choose whether your Space is public or private
|
| 405 |
+
- Click "Create Space"
|
| 406 |
+
- After Space creation, the files of your repo can be seen via `Files` tab
|
| 407 |
+
|
| 408 |
+
5. Clone your space repository on to your system
|
| 409 |
+
- Go to 3-dots icon beside Settings, then select `Clone Repository` option
|
| 410 |
+
- `git lfs install`
|
| 411 |
+
- `git clone <your-repo>`
|
| 412 |
+
- Run clone commands in your terminal (Provide your HuggingFace username and access token if prompted for authentication.)
|
| 413 |
+
|
| 414 |
+
6. Download the project folder **`recognition_api`**, shared along with this notebook, on to your system
|
| 415 |
+
|
| 416 |
+
7. Add below files from **`recognition_api`** folder to your cloned repository folder
|
| 417 |
+
- `app`, `requirements.txt`, `Dockerfile`, and `.gitattributes`
|
| 418 |
+
- You may need to replace the `.gitattributes` file if it is already present in repository
|
| 419 |
+
- **NOTE:** Do NOT commit and push changes yet.
|
| 420 |
+
|
| 421 |
+
8. Update files present in your cloned repository
|
| 422 |
+
- Download the Siamese model(trained in colab for face similarity) and place it within **`app/Hackathon_setup/`** folder
|
| 423 |
+
- Update the Siamese model architecture in the **`app/Hackathon_setup/face_recognition_model.py`** file and provide the code in the **`get_similarity()`** function of the **`app/Hackathon_setup/face_recognition.py`** file. (See Deployment related files)
|
| 424 |
+
- Update the **`README.md`** file and include `app_port: 8001`
|
| 425 |
+
|
| 426 |
+
9. Commit your changes and push to HuggingFace Space repository
|
| 427 |
+
|
| 428 |
+
10. Access the `App` tab of your repository to see the build progress (debug if error persists) [This step might take sometime.]
|
| 429 |
+
|
| 430 |
+
11. Once the app has built successfully, you should see below message
|
| 431 |
+
|
| 432 |
+
`Application startup complete. Uvicorn running on http://0.0.0.0:8001`
|
| 433 |
+
|
| 434 |
+
12. Test the Siamese model using the application running on your Space
|
| 435 |
+
|
| 436 |
+
Go to 3-dots icon beside Settings, then select `Embed this Space` option, and go to `Direct URL`
|
| 437 |
+
- Select the task, `Face Similarity` for now
|
| 438 |
+
- Select 'Send Anyway' when prompted
|
| 439 |
+
- Upload images and test
|
| 440 |
+
- **NOTE:** When using the Direct URL link via mobile, the camera option will also enable to capture images (Set 1:1 aspect ratio in camera settings before-hand)
|
| 441 |
+
|
| 442 |
+
##**Stage 3: Build Face Recognition Classification Model with Siamese Representation of Team Data and Deploy on HuggingFace Spaces (10 Marks)**
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
---
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
- Build a Face Recognition Classification Model on Team data
|
| 449 |
+
- Collect Team data by running the code cells provided below
|
| 450 |
+
- The collected data images will be stored in the `captured_face_images` directory
|
| 451 |
+
|
| 452 |
+
- NOTE: *Since team members will be using separate colab notebooks, they can capture their own face images and then download it and share with other members for model training.* Code cell is provided below to download the data.
|
| 453 |
+
|
| 454 |
+
- Train a classifier (use any classifier) with the features extracted from the above trained Siamese network, for your Team Data
|
| 455 |
+
- Save the Classification Model with joblib (if Sklearn classification model is used)
|
| 456 |
+
|
| 457 |
+
- Deploy on your HuggingFace Space
|
| 458 |
+
|
| 459 |
+
- Download the trained models (Siamese model, Classification model) and upload them in your HuggingFace Space repository within **`app/Hackathon_setup/`** folder
|
| 460 |
+
|
| 461 |
+
- Update the Siamese model architecture in the **`app/Hackathon_setup/face_recognition_model.py`** file (If Required)
|
| 462 |
+
|
| 463 |
+
- Update the code in the **`get_face_class()`** function of the **`app/Hackathon_setup/face_recognition.py`** file. (See Deployment related files)
|
| 464 |
+
|
| 465 |
+
- Access the `App` tab of your repository to see the build progress (debug if error persists) [This step might take sometime.]
|
| 466 |
+
|
| 467 |
+
- Once the app has built successfully, you should see below message
|
| 468 |
+
|
| 469 |
+
`Application startup complete. Uvicorn running on http://0.0.0.0:8001`
|
| 470 |
+
|
| 471 |
+
- Test the model's Face Recognition functionality using the application running on your Space
|
| 472 |
+
|
| 473 |
+
Go to 3-dots icon beside Settings, then select `Embed this Space` option, and go to `Direct URL`
|
| 474 |
+
- Select the task, `Face Recognition`
|
| 475 |
+
- Select 'Send Anyway' when prompted
|
| 476 |
+
- Upload your image and test
|
| 477 |
+
- **NOTE:** When using the Direct URL link via mobile, the camera option will also enable to capture images (Set 1:1 aspect ratio in camera settings before-hand)
|
| 478 |
+
"""
|
| 479 |
+
|
| 480 |
+
# @title Run this cell to Setup Image Capturing in Colab {display-mode: "form"}
|
| 481 |
+
|
| 482 |
+
from IPython.display import display, Javascript
|
| 483 |
+
from google.colab.output import eval_js
|
| 484 |
+
from base64 import b64decode
|
| 485 |
+
from PIL import Image
|
| 486 |
+
import imageio
|
| 487 |
+
import datetime
|
| 488 |
+
import pathlib
|
| 489 |
+
import cv2
|
| 490 |
+
import numpy as np
|
| 491 |
+
import matplotlib.pyplot as plt
|
| 492 |
+
|
| 493 |
+
AREA_THRESHOLD = 2304
|
| 494 |
+
|
| 495 |
+
def take_photo(filename='photo.jpg', quality=0.8):
|
| 496 |
+
js = Javascript('''
|
| 497 |
+
async function takePhoto(quality) {
|
| 498 |
+
const div = document.createElement('div');
|
| 499 |
+
const capture = document.createElement('button');
|
| 500 |
+
capture.textContent = 'Capture';
|
| 501 |
+
div.appendChild(capture);
|
| 502 |
+
|
| 503 |
+
const video = document.createElement('video');
|
| 504 |
+
video.style.display = 'block';
|
| 505 |
+
const stream = await navigator.mediaDevices.getUserMedia({video: true});
|
| 506 |
+
|
| 507 |
+
document.body.appendChild(div);
|
| 508 |
+
div.appendChild(video);
|
| 509 |
+
video.srcObject = stream;
|
| 510 |
+
await video.play();
|
| 511 |
+
|
| 512 |
+
// Resize the output to fit the video element.
|
| 513 |
+
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
|
| 514 |
+
|
| 515 |
+
// Wait for Capture to be clicked.
|
| 516 |
+
await new Promise((resolve) => capture.onclick = resolve);
|
| 517 |
+
|
| 518 |
+
const canvas = document.createElement('canvas');
|
| 519 |
+
canvas.width = video.videoWidth;
|
| 520 |
+
canvas.height = video.videoHeight;
|
| 521 |
+
canvas.getContext('2d').drawImage(video, 0, 0);
|
| 522 |
+
stream.getVideoTracks()[0].stop(); // Stop the video stream
|
| 523 |
+
div.remove();
|
| 524 |
+
return canvas.toDataURL('image/jpeg', quality);
|
| 525 |
+
}
|
| 526 |
+
''')
|
| 527 |
+
display(js)
|
| 528 |
+
data = eval_js('takePhoto({})'.format(quality))
|
| 529 |
+
binary = b64decode(data.split(',')[1])
|
| 530 |
+
with open(filename, 'wb') as f:
|
| 531 |
+
f.write(binary)
|
| 532 |
+
|
| 533 |
+
im = Image.open(filename)
|
| 534 |
+
im1 = im.crop((80, 0, 560, 480))
|
| 535 |
+
im1.save(filename)
|
| 536 |
+
|
| 537 |
+
return filename
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def save_faces(miniframe, filepath):
|
| 541 |
+
TRAINSET = "lbpcascade_frontalface.xml"
|
| 542 |
+
classifier = cv2.CascadeClassifier(TRAINSET)
|
| 543 |
+
faces = classifier.detectMultiScale(miniframe)
|
| 544 |
+
image = get_large_face(miniframe, faces)
|
| 545 |
+
if not isinstance(image, np.ndarray):
|
| 546 |
+
return {"status" : False}
|
| 547 |
+
plt.imshow(image)
|
| 548 |
+
plt.show()
|
| 549 |
+
#cv2.imwrite(filepath, image)
|
| 550 |
+
imageio.imwrite(filepath, image)
|
| 551 |
+
return {"status" : True}
|
| 552 |
+
|
| 553 |
+
def get_large_face(miniframe, faces):
|
| 554 |
+
images = []
|
| 555 |
+
face_areas = []
|
| 556 |
+
required_image = 0
|
| 557 |
+
for x,y,w,h in faces:
|
| 558 |
+
face_cropped = miniframe[y:y+h, x:x+w]
|
| 559 |
+
face_areas.append(w*h)
|
| 560 |
+
images.append(face_cropped)
|
| 561 |
+
required_image = images[np.argmax(face_areas)]
|
| 562 |
+
if not face_areas:
|
| 563 |
+
return 0
|
| 564 |
+
if face_areas[np.argmax(face_areas)] < AREA_THRESHOLD:
|
| 565 |
+
return 0
|
| 566 |
+
|
| 567 |
+
return required_image
|
| 568 |
+
|
| 569 |
+
def save_image(filename, class_name):
|
| 570 |
+
base_path = "captured_face_images/"
|
| 571 |
+
|
| 572 |
+
pathlib.Path(base_path + class_name).mkdir(parents=True, exist_ok=True)
|
| 573 |
+
file_name = class_name + "_" + datetime.datetime.now().strftime("%s") + ".jpg"
|
| 574 |
+
filepath = base_path +class_name + "/" + file_name
|
| 575 |
+
|
| 576 |
+
image = Image.open(filename)
|
| 577 |
+
miniframe = np.asarray(image)
|
| 578 |
+
status = save_faces(miniframe, filepath)
|
| 579 |
+
if status['status']:
|
| 580 |
+
print("Image saved in " + filepath, flush = True)
|
| 581 |
+
else:
|
| 582 |
+
print("Face not found!\nRetry!")
|
| 583 |
+
|
| 584 |
+
# @title Capture an Image for Person1 $ $ [Re-run this cell to capture another image] {display-mode: "form"}
|
| 585 |
+
|
| 586 |
+
class_name = "Person1"
|
| 587 |
+
|
| 588 |
+
from IPython.display import Image as IPyImage
|
| 589 |
+
import time
|
| 590 |
+
|
| 591 |
+
num_images_to_capture = 5 # You can change this number to capture more or less images
|
| 592 |
+
|
| 593 |
+
for i in range(num_images_to_capture):
|
| 594 |
+
print(f"Capturing image {i+1}/{num_images_to_capture} for {class_name}...")
|
| 595 |
+
try:
|
| 596 |
+
filename = take_photo()
|
| 597 |
+
save_image(filename, class_name)
|
| 598 |
+
except Exception as err:
|
| 599 |
+
print(str(err))
|
| 600 |
+
time.sleep(1) # Add a small delay between captures
|
| 601 |
+
|
| 602 |
+
# @title Capture an Image for Person2 $ $ [Re-run this cell to capture another image] {display-mode: "form"}
|
| 603 |
+
|
| 604 |
+
class_name = "Person2"
|
| 605 |
+
|
| 606 |
+
from IPython.display import Image as IPyImage
|
| 607 |
+
try:
|
| 608 |
+
filename = take_photo()
|
| 609 |
+
#print('Saved to {}'.format(filename))
|
| 610 |
+
# Show the image which was just taken.
|
| 611 |
+
#display(IPyImage(filename))
|
| 612 |
+
except Exception as err:
|
| 613 |
+
# Errors will be thrown if the user does not have a webcam or if they do not
|
| 614 |
+
# grant the page permission to access it.
|
| 615 |
+
print(str(err))
|
| 616 |
+
|
| 617 |
+
save_image(filename, class_name)
|
| 618 |
+
|
| 619 |
+
# @title Capture an Image for Person3 $ $ [Re-run this cell to capture another image] {display-mode: "form"}
|
| 620 |
+
|
| 621 |
+
class_name = "Person3"
|
| 622 |
+
|
| 623 |
+
from IPython.display import Image as IPyImage
|
| 624 |
+
try:
|
| 625 |
+
filename = take_photo()
|
| 626 |
+
#print('Saved to {}'.format(filename))
|
| 627 |
+
# Show the image which was just taken.
|
| 628 |
+
#display(IPyImage(filename))
|
| 629 |
+
except Exception as err:
|
| 630 |
+
# Errors will be thrown if the user does not have a webcam or if they do not
|
| 631 |
+
# grant the page permission to access it.
|
| 632 |
+
print(str(err))
|
| 633 |
+
|
| 634 |
+
save_image(filename, class_name)
|
| 635 |
+
|
| 636 |
+
# @title Capture an Image for Person4 $ $ [Re-run this cell to capture another image] {display-mode: "form"}
|
| 637 |
+
|
| 638 |
+
class_name = "Person4"
|
| 639 |
+
|
| 640 |
+
from IPython.display import Image as IPyImage
|
| 641 |
+
try:
|
| 642 |
+
filename = take_photo()
|
| 643 |
+
#print('Saved to {}'.format(filename))
|
| 644 |
+
# Show the image which was just taken.
|
| 645 |
+
#display(IPyImage(filename))
|
| 646 |
+
except Exception as err:
|
| 647 |
+
# Errors will be thrown if the user does not have a webcam or if they do not
|
| 648 |
+
# grant the page permission to access it.
|
| 649 |
+
print(str(err))
|
| 650 |
+
|
| 651 |
+
save_image(filename, class_name)
|
| 652 |
+
|
| 653 |
+
# @title Capture an Image for Person5 $ $ [Re-run this cell to capture another image] {display-mode: "form"}
|
| 654 |
+
|
| 655 |
+
class_name = "Person5"
|
| 656 |
+
|
| 657 |
+
from IPython.display import Image as IPyImage
|
| 658 |
+
try:
|
| 659 |
+
filename = take_photo()
|
| 660 |
+
#print('Saved to {}'.format(filename))
|
| 661 |
+
# Show the image which was just taken.
|
| 662 |
+
#display(IPyImage(filename))
|
| 663 |
+
except Exception as err:
|
| 664 |
+
# Errors will be thrown if the user does not have a webcam or if they do not
|
| 665 |
+
# grant the page permission to access it.
|
| 666 |
+
print(str(err))
|
| 667 |
+
|
| 668 |
+
save_image(filename, class_name)
|
| 669 |
+
|
| 670 |
+
# @title Capture an Image for Person6 $ $ [Re-run this cell to capture another image] {display-mode: "form"}
|
| 671 |
+
|
| 672 |
+
class_name = "Person6"
|
| 673 |
+
|
| 674 |
+
from IPython.display import Image as IPyImage
|
| 675 |
+
try:
|
| 676 |
+
filename = take_photo()
|
| 677 |
+
#print('Saved to {}'.format(filename))
|
| 678 |
+
# Show the image which was just taken.
|
| 679 |
+
#display(IPyImage(filename))
|
| 680 |
+
except Exception as err:
|
| 681 |
+
# Errors will be thrown if the user does not have a webcam or if they do not
|
| 682 |
+
# grant the page permission to access it.
|
| 683 |
+
print(str(err))
|
| 684 |
+
|
| 685 |
+
save_image(filename, class_name)
|
| 686 |
+
|
| 687 |
+
# @title Capture an Image for Person7 $ $ [Re-run this cell to capture another image] {display-mode: "form"}
|
| 688 |
+
|
| 689 |
+
class_name = "Person7"
|
| 690 |
+
|
| 691 |
+
from IPython.display import Image as IPyImage
|
| 692 |
+
try:
|
| 693 |
+
filename = take_photo()
|
| 694 |
+
#print('Saved to {}'.format(filename))
|
| 695 |
+
# Show the image which was just taken.
|
| 696 |
+
#display(IPyImage(filename))
|
| 697 |
+
except Exception as err:
|
| 698 |
+
# Errors will be thrown if the user does not have a webcam or if they do not
|
| 699 |
+
# grant the page permission to access it.
|
| 700 |
+
print(str(err))
|
| 701 |
+
|
| 702 |
+
save_image(filename, class_name)
|
| 703 |
+
|
| 704 |
+
# @title Download Collected Images $ $ [OPTIONAL] {display-mode: "form"}
|
| 705 |
+
|
| 706 |
+
from google.colab import files
|
| 707 |
+
!zip -r "captured_face_images.zip" "captured_face_images"
|
| 708 |
+
files.download('captured_face_images.zip')
|
| 709 |
+
print("Downloaded captured_face_images.zip !!")
|
| 710 |
+
|
| 711 |
+
!cd captured_face_images && ls -a
|
| 712 |
+
|
| 713 |
+
"""**While uploading the team images manually to captured_face_images. this file .ipynb_checkpoints will be created and make issue to delet run the below code**"""
|
| 714 |
+
|
| 715 |
+
rm -rf `find -type d -name .ipynb_checkpoints`
|
| 716 |
+
|
| 717 |
+
# Here simple ImageFolder is enough; we don't need SiameseDataSet
|
| 718 |
+
finalClassifierDset = dset.ImageFolder(root='./captured_face_images',
|
| 719 |
+
transform = transforms.Compose([transforms.Grayscale(num_output_channels = 1), transforms.Resize((100,100)), transforms.ToTensor()]))
|
| 720 |
+
|
| 721 |
+
# Dataloader using the dataset created above.
|
| 722 |
+
representation_dataloader = DataLoader(finalClassifierDset, shuffle=False, num_workers=8, batch_size=100)
|
| 723 |
+
|
| 724 |
+
# Load the state dict of the siamese model
|
| 725 |
+
siamese_model = SiameseNetwork().to(device)
|
| 726 |
+
ckpt = torch.load('./siamese_model.t7', map_location=device)
|
| 727 |
+
siamese_model.load_state_dict(ckpt['net_dict'])
|
| 728 |
+
siamese_model.eval()
|
| 729 |
+
print("Siamese model loaded successfully!")
|
| 730 |
+
|
| 731 |
+
# Get a siamese representation of each of your data points i.e. for each of your team images.
|
| 732 |
+
siamese_features = []
|
| 733 |
+
labels = []
|
| 734 |
+
|
| 735 |
+
print("Extracting features from team images...")
|
| 736 |
+
with torch.no_grad():
|
| 737 |
+
for batch_idx, (data, target) in enumerate(representation_dataloader):
|
| 738 |
+
data = data.to(device)
|
| 739 |
+
# Get features using the siamese network
|
| 740 |
+
features = siamese_model.forward_once(data)
|
| 741 |
+
siamese_features.append(features.cpu().numpy())
|
| 742 |
+
labels.extend(target.numpy())
|
| 743 |
+
|
| 744 |
+
# Convert to numpy arrays
|
| 745 |
+
X = np.vstack(siamese_features)
|
| 746 |
+
y = np.array(labels)
|
| 747 |
+
|
| 748 |
+
print(f"Feature shape: {X.shape}")
|
| 749 |
+
print(f"Labels shape: {y.shape}")
|
| 750 |
+
print(f"Unique labels: {np.unique(y)}")
|
| 751 |
+
|
| 752 |
+
"""**Train a Classifier for Face Recognition**
|
| 753 |
+
|
| 754 |
+
You can use any classifier with the features extracted from the above trained Siamese network of your team data. If required, you have to convert torch variable to numpy array before using SkLearn.
|
| 755 |
+
"""
|
| 756 |
+
|
| 757 |
+
# Train a classifier using sklearn
|
| 758 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 759 |
+
from sklearn.model_selection import train_test_split
|
| 760 |
+
from sklearn.metrics import accuracy_score, classification_report
|
| 761 |
+
|
| 762 |
+
# Split the data
|
| 763 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 764 |
+
|
| 765 |
+
# Train Random Forest classifier
|
| 766 |
+
classifier = RandomForestClassifier(n_estimators=100, random_state=42)
|
| 767 |
+
classifier.fit(X_train, y_train)
|
| 768 |
+
|
| 769 |
+
# Make predictions
|
| 770 |
+
y_pred = classifier.predict(X_test)
|
| 771 |
+
accuracy = accuracy_score(y_test, y_pred)
|
| 772 |
+
|
| 773 |
+
print(f"Classification Accuracy: {accuracy:.4f}")
|
| 774 |
+
print("\nClassification Report:")
|
| 775 |
+
print(classification_report(y_test, y_pred))
|
| 776 |
+
|
| 777 |
+
"""**Save your classification model**
|
| 778 |
+
|
| 779 |
+
* Save your sklearn models using joblib
|
| 780 |
+
|
| 781 |
+
|
| 782 |
+
"""
|
| 783 |
+
|
| 784 |
+
# Save the classification model using joblib
|
| 785 |
+
import joblib
|
| 786 |
+
|
| 787 |
+
# Save the classifier
|
| 788 |
+
joblib.dump(classifier, 'face_classifier_model.sav')
|
| 789 |
+
print("Classification model saved as 'face_classifier_model.sav'")
|
| 790 |
+
|
| 791 |
+
# Also save the class names for reference
|
| 792 |
+
class_names = ['Person1', 'Person2', 'Person3', 'Person4', 'Person5', 'Person6', 'Person7']
|
| 793 |
+
joblib.dump(class_names, 'class_names.sav')
|
| 794 |
+
print("Class names saved as 'class_names.sav'")
|
| 795 |
+
|
| 796 |
+
"""**Download your trained model using the code below**
|
| 797 |
+
* Given the path of model file the following code downloads it through the browser
|
| 798 |
+
"""
|
| 799 |
+
|
| 800 |
+
# Download the trained models
|
| 801 |
+
import os
|
| 802 |
+
|
| 803 |
+
# Check if files exist and download them
|
| 804 |
+
model_files = ['siamese_model.t7', 'face_classifier_model.sav', 'class_names.sav']
|
| 805 |
+
|
| 806 |
+
for file in model_files:
|
| 807 |
+
if os.path.exists(file):
|
| 808 |
+
print(f"Model file '{file}' is ready for download")
|
| 809 |
+
else:
|
| 810 |
+
print(f"Warning: Model file '{file}' not found")
|
| 811 |
+
|
| 812 |
+
# Note: In a local environment, you can manually copy these files
|
| 813 |
+
# In Google Colab, you would use:
|
| 814 |
+
# from google.colab import files
|
| 815 |
+
# files.download('siamese_model.t7')
|
| 816 |
+
# files.download('face_classifier_model.sav')
|
| 817 |
+
# files.download('class_names.sav')
|
| 818 |
+
|
app/Hackathon_setup/lbpcascade_frontalface.xml
ADDED
|
@@ -0,0 +1,1505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0"?>
|
| 2 |
+
<!--
|
| 3 |
+
number of positive samples 3000
|
| 4 |
+
number of negative samples 1500
|
| 5 |
+
-->
|
| 6 |
+
<opencv_storage>
|
| 7 |
+
<cascade type_id="opencv-cascade-classifier">
|
| 8 |
+
<stageType>BOOST</stageType>
|
| 9 |
+
<featureType>LBP</featureType>
|
| 10 |
+
<height>24</height>
|
| 11 |
+
<width>24</width>
|
| 12 |
+
<stageParams>
|
| 13 |
+
<boostType>GAB</boostType>
|
| 14 |
+
<minHitRate>0.9950000047683716</minHitRate>
|
| 15 |
+
<maxFalseAlarm>0.5000000000000000</maxFalseAlarm>
|
| 16 |
+
<weightTrimRate>0.9500000000000000</weightTrimRate>
|
| 17 |
+
<maxDepth>1</maxDepth>
|
| 18 |
+
<maxWeakCount>100</maxWeakCount></stageParams>
|
| 19 |
+
<featureParams>
|
| 20 |
+
<maxCatCount>256</maxCatCount></featureParams>
|
| 21 |
+
<stageNum>20</stageNum>
|
| 22 |
+
<stages>
|
| 23 |
+
<!-- stage 0 -->
|
| 24 |
+
<_>
|
| 25 |
+
<maxWeakCount>3</maxWeakCount>
|
| 26 |
+
<stageThreshold>-0.7520892024040222</stageThreshold>
|
| 27 |
+
<weakClassifiers>
|
| 28 |
+
<!-- tree 0 -->
|
| 29 |
+
<_>
|
| 30 |
+
<internalNodes>
|
| 31 |
+
0 -1 46 -67130709 -21569 -1426120013 -1275125205 -21585
|
| 32 |
+
-16385 587145899 -24005</internalNodes>
|
| 33 |
+
<leafValues>
|
| 34 |
+
-0.6543210148811340 0.8888888955116272</leafValues></_>
|
| 35 |
+
<!-- tree 1 -->
|
| 36 |
+
<_>
|
| 37 |
+
<internalNodes>
|
| 38 |
+
0 -1 13 -163512766 -769593758 -10027009 -262145 -514457854
|
| 39 |
+
-193593353 -524289 -1</internalNodes>
|
| 40 |
+
<leafValues>
|
| 41 |
+
-0.7739216089248657 0.7278633713722229</leafValues></_>
|
| 42 |
+
<!-- tree 2 -->
|
| 43 |
+
<_>
|
| 44 |
+
<internalNodes>
|
| 45 |
+
0 -1 2 -363936790 -893203669 -1337948010 -136907894
|
| 46 |
+
1088782736 -134217726 -741544961 -1590337</internalNodes>
|
| 47 |
+
<leafValues>
|
| 48 |
+
-0.7068563103675842 0.6761534214019775</leafValues></_></weakClassifiers></_>
|
| 49 |
+
<!-- stage 1 -->
|
| 50 |
+
<_>
|
| 51 |
+
<maxWeakCount>4</maxWeakCount>
|
| 52 |
+
<stageThreshold>-0.4872078299522400</stageThreshold>
|
| 53 |
+
<weakClassifiers>
|
| 54 |
+
<!-- tree 0 -->
|
| 55 |
+
<_>
|
| 56 |
+
<internalNodes>
|
| 57 |
+
0 -1 84 2147483647 1946124287 -536870913 2147450879
|
| 58 |
+
738132490 1061101567 243204619 2147446655</internalNodes>
|
| 59 |
+
<leafValues>
|
| 60 |
+
-0.8083735704421997 0.7685696482658386</leafValues></_>
|
| 61 |
+
<!-- tree 1 -->
|
| 62 |
+
<_>
|
| 63 |
+
<internalNodes>
|
| 64 |
+
0 -1 21 2147483647 263176079 1879048191 254749487 1879048191
|
| 65 |
+
-134252545 -268435457 801111999</internalNodes>
|
| 66 |
+
<leafValues>
|
| 67 |
+
-0.7698410153388977 0.6592915654182434</leafValues></_>
|
| 68 |
+
<!-- tree 2 -->
|
| 69 |
+
<_>
|
| 70 |
+
<internalNodes>
|
| 71 |
+
0 -1 106 -98110272 1610939566 -285484400 -850010381
|
| 72 |
+
-189334372 -1671954433 -571026695 -262145</internalNodes>
|
| 73 |
+
<leafValues>
|
| 74 |
+
-0.7506558895111084 0.5444605946540833</leafValues></_>
|
| 75 |
+
<!-- tree 3 -->
|
| 76 |
+
<_>
|
| 77 |
+
<internalNodes>
|
| 78 |
+
0 -1 48 -798690576 -131075 1095771153 -237144073 -65569 -1
|
| 79 |
+
-216727745 -69206049</internalNodes>
|
| 80 |
+
<leafValues>
|
| 81 |
+
-0.7775990366935730 0.5465461611747742</leafValues></_></weakClassifiers></_>
|
| 82 |
+
<!-- stage 2 -->
|
| 83 |
+
<_>
|
| 84 |
+
<maxWeakCount>4</maxWeakCount>
|
| 85 |
+
<stageThreshold>-1.1592328548431396</stageThreshold>
|
| 86 |
+
<weakClassifiers>
|
| 87 |
+
<!-- tree 0 -->
|
| 88 |
+
<_>
|
| 89 |
+
<internalNodes>
|
| 90 |
+
0 -1 47 -21585 -20549 -100818262 -738254174 -20561 -36865
|
| 91 |
+
-151016790 -134238549</internalNodes>
|
| 92 |
+
<leafValues>
|
| 93 |
+
-0.5601882934570313 0.7743113040924072</leafValues></_>
|
| 94 |
+
<!-- tree 1 -->
|
| 95 |
+
<_>
|
| 96 |
+
<internalNodes>
|
| 97 |
+
0 -1 12 -286003217 183435247 -268994614 -421330945
|
| 98 |
+
-402686081 1090387966 -286785545 -402653185</internalNodes>
|
| 99 |
+
<leafValues>
|
| 100 |
+
-0.6124526262283325 0.6978127956390381</leafValues></_>
|
| 101 |
+
<!-- tree 2 -->
|
| 102 |
+
<_>
|
| 103 |
+
<internalNodes>
|
| 104 |
+
0 -1 26 -50347012 970882927 -50463492 -1253377 -134218251
|
| 105 |
+
-50364513 -33619992 -172490753</internalNodes>
|
| 106 |
+
<leafValues>
|
| 107 |
+
-0.6114496588706970 0.6537628173828125</leafValues></_>
|
| 108 |
+
<!-- tree 3 -->
|
| 109 |
+
<_>
|
| 110 |
+
<internalNodes>
|
| 111 |
+
0 -1 8 -273 -135266321 1877977738 -2088243418 -134217987
|
| 112 |
+
2146926575 -18910642 1095231247</internalNodes>
|
| 113 |
+
<leafValues>
|
| 114 |
+
-0.6854077577590942 0.5403239130973816</leafValues></_></weakClassifiers></_>
|
| 115 |
+
<!-- stage 3 -->
|
| 116 |
+
<_>
|
| 117 |
+
<maxWeakCount>5</maxWeakCount>
|
| 118 |
+
<stageThreshold>-0.7562355995178223</stageThreshold>
|
| 119 |
+
<weakClassifiers>
|
| 120 |
+
<!-- tree 0 -->
|
| 121 |
+
<_>
|
| 122 |
+
<internalNodes>
|
| 123 |
+
0 -1 96 -1273 1870659519 -20971602 -67633153 -134250731
|
| 124 |
+
2004875127 -250 -150995969</internalNodes>
|
| 125 |
+
<leafValues>
|
| 126 |
+
-0.4051094949245453 0.7584033608436585</leafValues></_>
|
| 127 |
+
<!-- tree 1 -->
|
| 128 |
+
<_>
|
| 129 |
+
<internalNodes>
|
| 130 |
+
0 -1 33 -868162224 -76810262 -4262145 -257 1465211989
|
| 131 |
+
-268959873 -2656269 -524289</internalNodes>
|
| 132 |
+
<leafValues>
|
| 133 |
+
-0.7388162612915039 0.5340843200683594</leafValues></_>
|
| 134 |
+
<!-- tree 2 -->
|
| 135 |
+
<_>
|
| 136 |
+
<internalNodes>
|
| 137 |
+
0 -1 57 -12817 -49 -541103378 -152950 -38993 -20481 -1153876
|
| 138 |
+
-72478976</internalNodes>
|
| 139 |
+
<leafValues>
|
| 140 |
+
-0.6582943797111511 0.5339496731758118</leafValues></_>
|
| 141 |
+
<!-- tree 3 -->
|
| 142 |
+
<_>
|
| 143 |
+
<internalNodes>
|
| 144 |
+
0 -1 125 -269484161 -452984961 -319816180 -1594032130 -2111
|
| 145 |
+
-990117891 -488975296 -520947741</internalNodes>
|
| 146 |
+
<leafValues>
|
| 147 |
+
-0.5981323719024658 0.5323504805564880</leafValues></_>
|
| 148 |
+
<!-- tree 4 -->
|
| 149 |
+
<_>
|
| 150 |
+
<internalNodes>
|
| 151 |
+
0 -1 53 557787431 670265215 -1342193665 -1075892225
|
| 152 |
+
1998528318 1056964607 -33570977 -1</internalNodes>
|
| 153 |
+
<leafValues>
|
| 154 |
+
-0.6498787999153137 0.4913350641727448</leafValues></_></weakClassifiers></_>
|
| 155 |
+
<!-- stage 4 -->
|
| 156 |
+
<_>
|
| 157 |
+
<maxWeakCount>5</maxWeakCount>
|
| 158 |
+
<stageThreshold>-0.8085358142852783</stageThreshold>
|
| 159 |
+
<weakClassifiers>
|
| 160 |
+
<!-- tree 0 -->
|
| 161 |
+
<_>
|
| 162 |
+
<internalNodes>
|
| 163 |
+
0 -1 60 -536873708 880195381 -16842788 -20971521 -176687276
|
| 164 |
+
-168427659 -16777260 -33554626</internalNodes>
|
| 165 |
+
<leafValues>
|
| 166 |
+
-0.5278195738792419 0.6946372389793396</leafValues></_>
|
| 167 |
+
<!-- tree 1 -->
|
| 168 |
+
<_>
|
| 169 |
+
<internalNodes>
|
| 170 |
+
0 -1 7 -1 -62981529 -1090591130 805330978 -8388827 -41945787
|
| 171 |
+
-39577 -531118985</internalNodes>
|
| 172 |
+
<leafValues>
|
| 173 |
+
-0.5206505060195923 0.6329920291900635</leafValues></_>
|
| 174 |
+
<!-- tree 2 -->
|
| 175 |
+
<_>
|
| 176 |
+
<internalNodes>
|
| 177 |
+
0 -1 98 -725287348 1347747543 -852489 -16809993 1489881036
|
| 178 |
+
-167903241 -1 -1</internalNodes>
|
| 179 |
+
<leafValues>
|
| 180 |
+
-0.7516061067581177 0.4232024252414703</leafValues></_>
|
| 181 |
+
<!-- tree 3 -->
|
| 182 |
+
<_>
|
| 183 |
+
<internalNodes>
|
| 184 |
+
0 -1 44 -32777 1006582562 -65 935312171 -8388609 -1078198273
|
| 185 |
+
-1 733886267</internalNodes>
|
| 186 |
+
<leafValues>
|
| 187 |
+
-0.7639313936233521 0.4123568832874298</leafValues></_>
|
| 188 |
+
<!-- tree 4 -->
|
| 189 |
+
<_>
|
| 190 |
+
<internalNodes>
|
| 191 |
+
0 -1 24 -85474705 2138828511 -1036436754 817625855
|
| 192 |
+
1123369029 -58796809 -1013468481 -194513409</internalNodes>
|
| 193 |
+
<leafValues>
|
| 194 |
+
-0.5123769044876099 0.5791834592819214</leafValues></_></weakClassifiers></_>
|
| 195 |
+
<!-- stage 5 -->
|
| 196 |
+
<_>
|
| 197 |
+
<maxWeakCount>5</maxWeakCount>
|
| 198 |
+
<stageThreshold>-0.5549971461296082</stageThreshold>
|
| 199 |
+
<weakClassifiers>
|
| 200 |
+
<!-- tree 0 -->
|
| 201 |
+
<_>
|
| 202 |
+
<internalNodes>
|
| 203 |
+
0 -1 42 -17409 -20481 -268457797 -134239493 -17473 -1 -21829
|
| 204 |
+
-21846</internalNodes>
|
| 205 |
+
<leafValues>
|
| 206 |
+
-0.3763174116611481 0.7298233509063721</leafValues></_>
|
| 207 |
+
<!-- tree 1 -->
|
| 208 |
+
<_>
|
| 209 |
+
<internalNodes>
|
| 210 |
+
0 -1 6 -805310737 -2098262358 -269504725 682502698
|
| 211 |
+
2147483519 1740574719 -1090519233 -268472385</internalNodes>
|
| 212 |
+
<leafValues>
|
| 213 |
+
-0.5352765917778015 0.5659480094909668</leafValues></_>
|
| 214 |
+
<!-- tree 2 -->
|
| 215 |
+
<_>
|
| 216 |
+
<internalNodes>
|
| 217 |
+
0 -1 61 -67109678 -6145 -8 -87884584 -20481 -1073762305
|
| 218 |
+
-50856216 -16849696</internalNodes>
|
| 219 |
+
<leafValues>
|
| 220 |
+
-0.5678374171257019 0.4961479902267456</leafValues></_>
|
| 221 |
+
<!-- tree 3 -->
|
| 222 |
+
<_>
|
| 223 |
+
<internalNodes>
|
| 224 |
+
0 -1 123 -138428633 1002418167 -1359008245 -1908670465
|
| 225 |
+
-1346685918 910098423 -1359010520 -1346371657</internalNodes>
|
| 226 |
+
<leafValues>
|
| 227 |
+
-0.5706262588500977 0.4572288393974304</leafValues></_>
|
| 228 |
+
<!-- tree 4 -->
|
| 229 |
+
<_>
|
| 230 |
+
<internalNodes>
|
| 231 |
+
0 -1 9 -89138513 -4196353 1256531674 -1330665426 1216308261
|
| 232 |
+
-36190633 33498198 -151796633</internalNodes>
|
| 233 |
+
<leafValues>
|
| 234 |
+
-0.5344601869583130 0.4672054052352905</leafValues></_></weakClassifiers></_>
|
| 235 |
+
<!-- stage 6 -->
|
| 236 |
+
<_>
|
| 237 |
+
<maxWeakCount>5</maxWeakCount>
|
| 238 |
+
<stageThreshold>-0.8776460289955139</stageThreshold>
|
| 239 |
+
<weakClassifiers>
|
| 240 |
+
<!-- tree 0 -->
|
| 241 |
+
<_>
|
| 242 |
+
<internalNodes>
|
| 243 |
+
0 -1 105 1073769576 206601725 -34013449 -33554433 -789514004
|
| 244 |
+
-101384321 -690225153 -264193</internalNodes>
|
| 245 |
+
<leafValues>
|
| 246 |
+
-0.7700348496437073 0.5943940877914429</leafValues></_>
|
| 247 |
+
<!-- tree 1 -->
|
| 248 |
+
<_>
|
| 249 |
+
<internalNodes>
|
| 250 |
+
0 -1 30 -1432340997 -823623681 -49153 -34291724 -269484035
|
| 251 |
+
-1342767105 -1078198273 -1277955</internalNodes>
|
| 252 |
+
<leafValues>
|
| 253 |
+
-0.5043668746948242 0.6151274442672730</leafValues></_>
|
| 254 |
+
<!-- tree 2 -->
|
| 255 |
+
<_>
|
| 256 |
+
<internalNodes>
|
| 257 |
+
0 -1 35 -1067385040 -195758209 -436748425 -134217731
|
| 258 |
+
-50855988 -129 -1 -1</internalNodes>
|
| 259 |
+
<leafValues>
|
| 260 |
+
-0.6808040738105774 0.4667325913906097</leafValues></_>
|
| 261 |
+
<!-- tree 3 -->
|
| 262 |
+
<_>
|
| 263 |
+
<internalNodes>
|
| 264 |
+
0 -1 119 832534325 -34111555 -26050561 -423659521 -268468364
|
| 265 |
+
2105014143 -2114244 -17367185</internalNodes>
|
| 266 |
+
<leafValues>
|
| 267 |
+
-0.4927591383457184 0.5401885509490967</leafValues></_>
|
| 268 |
+
<!-- tree 4 -->
|
| 269 |
+
<_>
|
| 270 |
+
<internalNodes>
|
| 271 |
+
0 -1 82 -1089439888 -1080524865 2143059967 -1114121
|
| 272 |
+
-1140949004 -3 -2361356 -739516</internalNodes>
|
| 273 |
+
<leafValues>
|
| 274 |
+
-0.6445107460021973 0.4227822124958038</leafValues></_></weakClassifiers></_>
|
| 275 |
+
<!-- stage 7 -->
|
| 276 |
+
<_>
|
| 277 |
+
<maxWeakCount>6</maxWeakCount>
|
| 278 |
+
<stageThreshold>-1.1139287948608398</stageThreshold>
|
| 279 |
+
<weakClassifiers>
|
| 280 |
+
<!-- tree 0 -->
|
| 281 |
+
<_>
|
| 282 |
+
<internalNodes>
|
| 283 |
+
0 -1 52 -1074071553 -1074003969 -1 -1280135430 -5324817 -1
|
| 284 |
+
-335548482 582134442</internalNodes>
|
| 285 |
+
<leafValues>
|
| 286 |
+
-0.5307556986808777 0.6258179545402527</leafValues></_>
|
| 287 |
+
<!-- tree 1 -->
|
| 288 |
+
<_>
|
| 289 |
+
<internalNodes>
|
| 290 |
+
0 -1 99 -706937396 -705364068 -540016724 -570495027
|
| 291 |
+
-570630659 -587857963 -33628164 -35848193</internalNodes>
|
| 292 |
+
<leafValues>
|
| 293 |
+
-0.5227634310722351 0.5049746036529541</leafValues></_>
|
| 294 |
+
<!-- tree 2 -->
|
| 295 |
+
<_>
|
| 296 |
+
<internalNodes>
|
| 297 |
+
0 -1 18 -2035630093 42119158 -268503053 -1671444 261017599
|
| 298 |
+
1325432815 1954394111 -805306449</internalNodes>
|
| 299 |
+
<leafValues>
|
| 300 |
+
-0.4983572661876679 0.5106441378593445</leafValues></_>
|
| 301 |
+
<!-- tree 3 -->
|
| 302 |
+
<_>
|
| 303 |
+
<internalNodes>
|
| 304 |
+
0 -1 111 -282529488 -1558073088 1426018736 -170526448
|
| 305 |
+
-546832487 -5113037 -34243375 -570427929</internalNodes>
|
| 306 |
+
<leafValues>
|
| 307 |
+
-0.4990860521793366 0.5060507059097290</leafValues></_>
|
| 308 |
+
<!-- tree 4 -->
|
| 309 |
+
<_>
|
| 310 |
+
<internalNodes>
|
| 311 |
+
0 -1 92 1016332500 -606301707 915094269 -1080086049
|
| 312 |
+
-1837027144 -1361600280 2147318747 1067975613</internalNodes>
|
| 313 |
+
<leafValues>
|
| 314 |
+
-0.5695009231567383 0.4460467398166657</leafValues></_>
|
| 315 |
+
<!-- tree 5 -->
|
| 316 |
+
<_>
|
| 317 |
+
<internalNodes>
|
| 318 |
+
0 -1 51 -656420166 -15413034 -141599534 -603435836
|
| 319 |
+
1505950458 -787556946 -79823438 -1326199134</internalNodes>
|
| 320 |
+
<leafValues>
|
| 321 |
+
-0.6590405106544495 0.3616424500942230</leafValues></_></weakClassifiers></_>
|
| 322 |
+
<!-- stage 8 -->
|
| 323 |
+
<_>
|
| 324 |
+
<maxWeakCount>7</maxWeakCount>
|
| 325 |
+
<stageThreshold>-0.8243625760078430</stageThreshold>
|
| 326 |
+
<weakClassifiers>
|
| 327 |
+
<!-- tree 0 -->
|
| 328 |
+
<_>
|
| 329 |
+
<internalNodes>
|
| 330 |
+
0 -1 28 -901591776 -201916417 -262 -67371009 -143312112
|
| 331 |
+
-524289 -41943178 -1</internalNodes>
|
| 332 |
+
<leafValues>
|
| 333 |
+
-0.4972776770591736 0.6027074456214905</leafValues></_>
|
| 334 |
+
<!-- tree 1 -->
|
| 335 |
+
<_>
|
| 336 |
+
<internalNodes>
|
| 337 |
+
0 -1 112 -4507851 -411340929 -268437513 -67502145 -17350859
|
| 338 |
+
-32901 -71344315 -29377</internalNodes>
|
| 339 |
+
<leafValues>
|
| 340 |
+
-0.4383158981800079 0.5966237187385559</leafValues></_>
|
| 341 |
+
<!-- tree 2 -->
|
| 342 |
+
<_>
|
| 343 |
+
<internalNodes>
|
| 344 |
+
0 -1 69 -75894785 -117379438 -239063587 -12538500 1485072126
|
| 345 |
+
2076233213 2123118847 801906927</internalNodes>
|
| 346 |
+
<leafValues>
|
| 347 |
+
-0.6386105418205261 0.3977999985218048</leafValues></_>
|
| 348 |
+
<!-- tree 3 -->
|
| 349 |
+
<_>
|
| 350 |
+
<internalNodes>
|
| 351 |
+
0 -1 19 -823480413 786628589 -16876049 -1364262914 242165211
|
| 352 |
+
1315930109 -696268833 -455082829</internalNodes>
|
| 353 |
+
<leafValues>
|
| 354 |
+
-0.5512794256210327 0.4282079637050629</leafValues></_>
|
| 355 |
+
<!-- tree 4 -->
|
| 356 |
+
<_>
|
| 357 |
+
<internalNodes>
|
| 358 |
+
0 -1 73 -521411968 6746762 -1396236286 -2038436114
|
| 359 |
+
-185612509 57669627 -143132877 -1041235973</internalNodes>
|
| 360 |
+
<leafValues>
|
| 361 |
+
-0.6418755054473877 0.3549866080284119</leafValues></_>
|
| 362 |
+
<!-- tree 5 -->
|
| 363 |
+
<_>
|
| 364 |
+
<internalNodes>
|
| 365 |
+
0 -1 126 -478153869 1076028979 -1645895615 1365298272
|
| 366 |
+
-557859073 -339771473 1442574528 -1058802061</internalNodes>
|
| 367 |
+
<leafValues>
|
| 368 |
+
-0.4841901361942291 0.4668019413948059</leafValues></_>
|
| 369 |
+
<!-- tree 6 -->
|
| 370 |
+
<_>
|
| 371 |
+
<internalNodes>
|
| 372 |
+
0 -1 45 -246350404 -1650402048 -1610612745 -788400696
|
| 373 |
+
1467604861 -2787397 1476263935 -4481349</internalNodes>
|
| 374 |
+
<leafValues>
|
| 375 |
+
-0.5855734348297119 0.3879135847091675</leafValues></_></weakClassifiers></_>
|
| 376 |
+
<!-- stage 9 -->
|
| 377 |
+
<_>
|
| 378 |
+
<maxWeakCount>7</maxWeakCount>
|
| 379 |
+
<stageThreshold>-1.2237116098403931</stageThreshold>
|
| 380 |
+
<weakClassifiers>
|
| 381 |
+
<!-- tree 0 -->
|
| 382 |
+
<_>
|
| 383 |
+
<internalNodes>
|
| 384 |
+
0 -1 114 -24819 1572863935 -16809993 -67108865 2146778388
|
| 385 |
+
1433927541 -268608444 -34865205</internalNodes>
|
| 386 |
+
<leafValues>
|
| 387 |
+
-0.2518476545810700 0.7088654041290283</leafValues></_>
|
| 388 |
+
<!-- tree 1 -->
|
| 389 |
+
<_>
|
| 390 |
+
<internalNodes>
|
| 391 |
+
0 -1 97 -1841359 -134271049 -32769 -5767369 -1116675 -2185
|
| 392 |
+
-8231 -33603327</internalNodes>
|
| 393 |
+
<leafValues>
|
| 394 |
+
-0.4303432404994965 0.5283288359642029</leafValues></_>
|
| 395 |
+
<!-- tree 2 -->
|
| 396 |
+
<_>
|
| 397 |
+
<internalNodes>
|
| 398 |
+
0 -1 25 -1359507589 -1360593090 -1073778729 -269553812
|
| 399 |
+
-809512977 1744707583 -41959433 -134758978</internalNodes>
|
| 400 |
+
<leafValues>
|
| 401 |
+
-0.4259553551673889 0.5440809130668640</leafValues></_>
|
| 402 |
+
<!-- tree 3 -->
|
| 403 |
+
<_>
|
| 404 |
+
<internalNodes>
|
| 405 |
+
0 -1 34 729753407 -134270989 -1140907329 -235200777
|
| 406 |
+
658456383 2147467263 -1140900929 -16385</internalNodes>
|
| 407 |
+
<leafValues>
|
| 408 |
+
-0.5605589151382446 0.4220733344554901</leafValues></_>
|
| 409 |
+
<!-- tree 4 -->
|
| 410 |
+
<_>
|
| 411 |
+
<internalNodes>
|
| 412 |
+
0 -1 134 -310380553 -420675595 -193005472 -353568129
|
| 413 |
+
1205338070 -990380036 887604324 -420544526</internalNodes>
|
| 414 |
+
<leafValues>
|
| 415 |
+
-0.5192656517028809 0.4399855434894562</leafValues></_>
|
| 416 |
+
<!-- tree 5 -->
|
| 417 |
+
<_>
|
| 418 |
+
<internalNodes>
|
| 419 |
+
0 -1 16 -1427119361 1978920959 -287119734 -487068946
|
| 420 |
+
114759245 -540578051 -707510259 -671660453</internalNodes>
|
| 421 |
+
<leafValues>
|
| 422 |
+
-0.5013077259063721 0.4570254683494568</leafValues></_>
|
| 423 |
+
<!-- tree 6 -->
|
| 424 |
+
<_>
|
| 425 |
+
<internalNodes>
|
| 426 |
+
0 -1 74 -738463762 -889949281 -328301948 -121832450
|
| 427 |
+
-1142658284 -1863576559 2146417353 -263185</internalNodes>
|
| 428 |
+
<leafValues>
|
| 429 |
+
-0.4631414115428925 0.4790246188640595</leafValues></_></weakClassifiers></_>
|
| 430 |
+
<!-- stage 10 -->
|
| 431 |
+
<_>
|
| 432 |
+
<maxWeakCount>7</maxWeakCount>
|
| 433 |
+
<stageThreshold>-0.5544230937957764</stageThreshold>
|
| 434 |
+
<weakClassifiers>
|
| 435 |
+
<!-- tree 0 -->
|
| 436 |
+
<_>
|
| 437 |
+
<internalNodes>
|
| 438 |
+
0 -1 113 -76228780 -65538 -1 -67174401 -148007 -33 -221796
|
| 439 |
+
-272842924</internalNodes>
|
| 440 |
+
<leafValues>
|
| 441 |
+
-0.3949716091156006 0.6082032322883606</leafValues></_>
|
| 442 |
+
<!-- tree 1 -->
|
| 443 |
+
<_>
|
| 444 |
+
<internalNodes>
|
| 445 |
+
0 -1 110 369147696 -1625232112 2138570036 -1189900 790708019
|
| 446 |
+
-1212613127 799948719 -4456483</internalNodes>
|
| 447 |
+
<leafValues>
|
| 448 |
+
-0.4855885505676270 0.4785369932651520</leafValues></_>
|
| 449 |
+
<!-- tree 2 -->
|
| 450 |
+
<_>
|
| 451 |
+
<internalNodes>
|
| 452 |
+
0 -1 37 784215839 -290015241 536832799 -402984963
|
| 453 |
+
-1342414991 -838864897 -176769 -268456129</internalNodes>
|
| 454 |
+
<leafValues>
|
| 455 |
+
-0.4620285332202911 0.4989669024944305</leafValues></_>
|
| 456 |
+
<!-- tree 3 -->
|
| 457 |
+
<_>
|
| 458 |
+
<internalNodes>
|
| 459 |
+
0 -1 41 -486418688 -171915327 -340294900 -21938 -519766032
|
| 460 |
+
-772751172 -73096060 -585322623</internalNodes>
|
| 461 |
+
<leafValues>
|
| 462 |
+
-0.6420643329620361 0.3624351918697357</leafValues></_>
|
| 463 |
+
<!-- tree 4 -->
|
| 464 |
+
<_>
|
| 465 |
+
<internalNodes>
|
| 466 |
+
0 -1 117 -33554953 -475332625 -1423463824 -2077230421
|
| 467 |
+
-4849669 -2080505925 -219032928 -1071915349</internalNodes>
|
| 468 |
+
<leafValues>
|
| 469 |
+
-0.4820112884044647 0.4632140696048737</leafValues></_>
|
| 470 |
+
<!-- tree 5 -->
|
| 471 |
+
<_>
|
| 472 |
+
<internalNodes>
|
| 473 |
+
0 -1 65 -834130468 -134217476 -1349314083 -1073803559
|
| 474 |
+
-619913764 -1449131844 -1386890321 -1979118423</internalNodes>
|
| 475 |
+
<leafValues>
|
| 476 |
+
-0.4465552568435669 0.5061788558959961</leafValues></_>
|
| 477 |
+
<!-- tree 6 -->
|
| 478 |
+
<_>
|
| 479 |
+
<internalNodes>
|
| 480 |
+
0 -1 56 -285249779 1912569855 -16530 -1731022870 -1161904146
|
| 481 |
+
-1342177297 -268439634 -1464078708</internalNodes>
|
| 482 |
+
<leafValues>
|
| 483 |
+
-0.5190586447715759 0.4441480338573456</leafValues></_></weakClassifiers></_>
|
| 484 |
+
<!-- stage 11 -->
|
| 485 |
+
<_>
|
| 486 |
+
<maxWeakCount>7</maxWeakCount>
|
| 487 |
+
<stageThreshold>-0.7161560654640198</stageThreshold>
|
| 488 |
+
<weakClassifiers>
|
| 489 |
+
<!-- tree 0 -->
|
| 490 |
+
<_>
|
| 491 |
+
<internalNodes>
|
| 492 |
+
0 -1 20 1246232575 1078001186 -10027057 60102 -277348353
|
| 493 |
+
-43646987 -1210581153 1195769615</internalNodes>
|
| 494 |
+
<leafValues>
|
| 495 |
+
-0.4323809444904327 0.5663768053054810</leafValues></_>
|
| 496 |
+
<!-- tree 1 -->
|
| 497 |
+
<_>
|
| 498 |
+
<internalNodes>
|
| 499 |
+
0 -1 15 -778583572 -612921106 -578775890 -4036478
|
| 500 |
+
-1946580497 -1164766570 -1986687009 -12103599</internalNodes>
|
| 501 |
+
<leafValues>
|
| 502 |
+
-0.4588732719421387 0.4547033011913300</leafValues></_>
|
| 503 |
+
<!-- tree 2 -->
|
| 504 |
+
<_>
|
| 505 |
+
<internalNodes>
|
| 506 |
+
0 -1 129 -1073759445 2013231743 -1363169553 -1082459201
|
| 507 |
+
-1414286549 868185983 -1356133589 -1077936257</internalNodes>
|
| 508 |
+
<leafValues>
|
| 509 |
+
-0.5218553543090820 0.4111092388629913</leafValues></_>
|
| 510 |
+
<!-- tree 3 -->
|
| 511 |
+
<_>
|
| 512 |
+
<internalNodes>
|
| 513 |
+
0 -1 102 -84148365 -2093417722 -1204850272 564290299
|
| 514 |
+
-67121221 -1342177350 -1309195902 -776734797</internalNodes>
|
| 515 |
+
<leafValues>
|
| 516 |
+
-0.4920000731945038 0.4326725304126740</leafValues></_>
|
| 517 |
+
<!-- tree 4 -->
|
| 518 |
+
<_>
|
| 519 |
+
<internalNodes>
|
| 520 |
+
0 -1 88 -25694458 67104495 -290216278 -168563037 2083877442
|
| 521 |
+
1702788383 -144191964 -234882162</internalNodes>
|
| 522 |
+
<leafValues>
|
| 523 |
+
-0.4494568109512329 0.4448510706424713</leafValues></_>
|
| 524 |
+
<!-- tree 5 -->
|
| 525 |
+
<_>
|
| 526 |
+
<internalNodes>
|
| 527 |
+
0 -1 59 -857980836 904682741 -1612267521 232279415
|
| 528 |
+
1550862252 -574825221 -357380888 -4579409</internalNodes>
|
| 529 |
+
<leafValues>
|
| 530 |
+
-0.5180826783180237 0.3888972699642181</leafValues></_>
|
| 531 |
+
<!-- tree 6 -->
|
| 532 |
+
<_>
|
| 533 |
+
<internalNodes>
|
| 534 |
+
0 -1 27 -98549440 -137838400 494928389 -246013630 939541351
|
| 535 |
+
-1196072350 -620603549 2137216273</internalNodes>
|
| 536 |
+
<leafValues>
|
| 537 |
+
-0.6081240773200989 0.3333222270011902</leafValues></_></weakClassifiers></_>
|
| 538 |
+
<!-- stage 12 -->
|
| 539 |
+
<_>
|
| 540 |
+
<maxWeakCount>8</maxWeakCount>
|
| 541 |
+
<stageThreshold>-0.6743940711021423</stageThreshold>
|
| 542 |
+
<weakClassifiers>
|
| 543 |
+
<!-- tree 0 -->
|
| 544 |
+
<_>
|
| 545 |
+
<internalNodes>
|
| 546 |
+
0 -1 29 -150995201 2071191945 -1302151626 536934335
|
| 547 |
+
-1059008937 914128709 1147328110 -268369925</internalNodes>
|
| 548 |
+
<leafValues>
|
| 549 |
+
-0.1790193915367127 0.6605972051620483</leafValues></_>
|
| 550 |
+
<!-- tree 1 -->
|
| 551 |
+
<_>
|
| 552 |
+
<internalNodes>
|
| 553 |
+
0 -1 128 -134509479 1610575703 -1342177289 1861484541
|
| 554 |
+
-1107833788 1577058173 -333558568 -136319041</internalNodes>
|
| 555 |
+
<leafValues>
|
| 556 |
+
-0.3681024610996246 0.5139749646186829</leafValues></_>
|
| 557 |
+
<!-- tree 2 -->
|
| 558 |
+
<_>
|
| 559 |
+
<internalNodes>
|
| 560 |
+
0 -1 70 -1 1060154476 -1090984524 -630918524 -539492875
|
| 561 |
+
779616255 -839568424 -321</internalNodes>
|
| 562 |
+
<leafValues>
|
| 563 |
+
-0.3217232525348663 0.6171553134918213</leafValues></_>
|
| 564 |
+
<!-- tree 3 -->
|
| 565 |
+
<_>
|
| 566 |
+
<internalNodes>
|
| 567 |
+
0 -1 4 -269562385 -285029906 -791084350 -17923776 235286671
|
| 568 |
+
1275504943 1344390399 -966276889</internalNodes>
|
| 569 |
+
<leafValues>
|
| 570 |
+
-0.4373284578323364 0.4358185231685638</leafValues></_>
|
| 571 |
+
<!-- tree 4 -->
|
| 572 |
+
<_>
|
| 573 |
+
<internalNodes>
|
| 574 |
+
0 -1 76 17825984 -747628419 595427229 1474759671 575672208
|
| 575 |
+
-1684005538 872217086 -1155858277</internalNodes>
|
| 576 |
+
<leafValues>
|
| 577 |
+
-0.4404836893081665 0.4601220190525055</leafValues></_>
|
| 578 |
+
<!-- tree 5 -->
|
| 579 |
+
<_>
|
| 580 |
+
<internalNodes>
|
| 581 |
+
0 -1 124 -336593039 1873735591 -822231622 -355795238
|
| 582 |
+
-470820869 -1997537409 -1057132384 -1015285005</internalNodes>
|
| 583 |
+
<leafValues>
|
| 584 |
+
-0.4294152259826660 0.4452161788940430</leafValues></_>
|
| 585 |
+
<!-- tree 6 -->
|
| 586 |
+
<_>
|
| 587 |
+
<internalNodes>
|
| 588 |
+
0 -1 54 -834212130 -593694721 -322142257 -364892500
|
| 589 |
+
-951029539 -302125121 -1615106053 -79249765</internalNodes>
|
| 590 |
+
<leafValues>
|
| 591 |
+
-0.3973052501678467 0.4854526817798615</leafValues></_>
|
| 592 |
+
<!-- tree 7 -->
|
| 593 |
+
<_>
|
| 594 |
+
<internalNodes>
|
| 595 |
+
0 -1 95 1342144479 2147431935 -33554561 -47873 -855685912 -1
|
| 596 |
+
1988052447 536827383</internalNodes>
|
| 597 |
+
<leafValues>
|
| 598 |
+
-0.7054683566093445 0.2697997391223908</leafValues></_></weakClassifiers></_>
|
| 599 |
+
<!-- stage 13 -->
|
| 600 |
+
<_>
|
| 601 |
+
<maxWeakCount>9</maxWeakCount>
|
| 602 |
+
<stageThreshold>-1.2042298316955566</stageThreshold>
|
| 603 |
+
<weakClassifiers>
|
| 604 |
+
<!-- tree 0 -->
|
| 605 |
+
<_>
|
| 606 |
+
<internalNodes>
|
| 607 |
+
0 -1 39 1431368960 -183437936 -537002499 -137497097
|
| 608 |
+
1560590321 -84611081 -2097193 -513</internalNodes>
|
| 609 |
+
<leafValues>
|
| 610 |
+
-0.5905947685241699 0.5101932883262634</leafValues></_>
|
| 611 |
+
<!-- tree 1 -->
|
| 612 |
+
<_>
|
| 613 |
+
<internalNodes>
|
| 614 |
+
0 -1 120 -1645259691 2105491231 2130706431 1458995007
|
| 615 |
+
-8567536 -42483883 -33780003 -21004417</internalNodes>
|
| 616 |
+
<leafValues>
|
| 617 |
+
-0.4449204802513123 0.4490709304809570</leafValues></_>
|
| 618 |
+
<!-- tree 2 -->
|
| 619 |
+
<_>
|
| 620 |
+
<internalNodes>
|
| 621 |
+
0 -1 89 -612381022 -505806938 -362027516 -452985106
|
| 622 |
+
275854917 1920431639 -12600561 -134221825</internalNodes>
|
| 623 |
+
<leafValues>
|
| 624 |
+
-0.4693818688392639 0.4061094820499420</leafValues></_>
|
| 625 |
+
<!-- tree 3 -->
|
| 626 |
+
<_>
|
| 627 |
+
<internalNodes>
|
| 628 |
+
0 -1 14 -805573153 -161 -554172679 -530519488 -16779441
|
| 629 |
+
2000682871 -33604275 -150997129</internalNodes>
|
| 630 |
+
<leafValues>
|
| 631 |
+
-0.3600351214408875 0.5056326985359192</leafValues></_>
|
| 632 |
+
<!-- tree 4 -->
|
| 633 |
+
<_>
|
| 634 |
+
<internalNodes>
|
| 635 |
+
0 -1 67 6192 435166195 1467449341 2046691505 -1608493775
|
| 636 |
+
-4755729 -1083162625 -71365637</internalNodes>
|
| 637 |
+
<leafValues>
|
| 638 |
+
-0.4459891915321350 0.4132415652275085</leafValues></_>
|
| 639 |
+
<!-- tree 5 -->
|
| 640 |
+
<_>
|
| 641 |
+
<internalNodes>
|
| 642 |
+
0 -1 86 -41689215 -3281034 1853357967 -420712635 -415924289
|
| 643 |
+
-270209208 -1088293113 -825311232</internalNodes>
|
| 644 |
+
<leafValues>
|
| 645 |
+
-0.4466069042682648 0.4135067760944367</leafValues></_>
|
| 646 |
+
<!-- tree 6 -->
|
| 647 |
+
<_>
|
| 648 |
+
<internalNodes>
|
| 649 |
+
0 -1 80 -117391116 -42203396 2080374461 -188709 -542008165
|
| 650 |
+
-356831940 -1091125345 -1073796897</internalNodes>
|
| 651 |
+
<leafValues>
|
| 652 |
+
-0.3394956290721893 0.5658645033836365</leafValues></_>
|
| 653 |
+
<!-- tree 7 -->
|
| 654 |
+
<_>
|
| 655 |
+
<internalNodes>
|
| 656 |
+
0 -1 75 -276830049 1378714472 -1342181951 757272098
|
| 657 |
+
1073740607 -282199241 -415761549 170896931</internalNodes>
|
| 658 |
+
<leafValues>
|
| 659 |
+
-0.5346512198448181 0.3584479391574860</leafValues></_>
|
| 660 |
+
<!-- tree 8 -->
|
| 661 |
+
<_>
|
| 662 |
+
<internalNodes>
|
| 663 |
+
0 -1 55 -796075825 -123166849 2113667055 -217530421
|
| 664 |
+
-1107432194 -16385 -806359809 -391188771</internalNodes>
|
| 665 |
+
<leafValues>
|
| 666 |
+
-0.4379335641860962 0.4123645126819611</leafValues></_></weakClassifiers></_>
|
| 667 |
+
<!-- stage 14 -->
|
| 668 |
+
<_>
|
| 669 |
+
<maxWeakCount>10</maxWeakCount>
|
| 670 |
+
<stageThreshold>-0.8402050137519836</stageThreshold>
|
| 671 |
+
<weakClassifiers>
|
| 672 |
+
<!-- tree 0 -->
|
| 673 |
+
<_>
|
| 674 |
+
<internalNodes>
|
| 675 |
+
0 -1 71 -890246622 15525883 -487690486 47116238 -1212319899
|
| 676 |
+
-1291847681 -68159890 -469829921</internalNodes>
|
| 677 |
+
<leafValues>
|
| 678 |
+
-0.2670986354351044 0.6014143228530884</leafValues></_>
|
| 679 |
+
<!-- tree 1 -->
|
| 680 |
+
<_>
|
| 681 |
+
<internalNodes>
|
| 682 |
+
0 -1 31 -1361180685 -1898008841 -1090588811 -285410071
|
| 683 |
+
-1074016265 -840443905 2147221487 -262145</internalNodes>
|
| 684 |
+
<leafValues>
|
| 685 |
+
-0.4149844348430634 0.4670888185501099</leafValues></_>
|
| 686 |
+
<!-- tree 2 -->
|
| 687 |
+
<_>
|
| 688 |
+
<internalNodes>
|
| 689 |
+
0 -1 40 1426190596 1899364271 2142731795 -142607505
|
| 690 |
+
-508232452 -21563393 -41960001 -65</internalNodes>
|
| 691 |
+
<leafValues>
|
| 692 |
+
-0.4985891580581665 0.3719584941864014</leafValues></_>
|
| 693 |
+
<!-- tree 3 -->
|
| 694 |
+
<_>
|
| 695 |
+
<internalNodes>
|
| 696 |
+
0 -1 109 -201337965 10543906 -236498096 -746195597
|
| 697 |
+
1974565825 -15204415 921907633 -190058309</internalNodes>
|
| 698 |
+
<leafValues>
|
| 699 |
+
-0.4568729996681213 0.3965812027454376</leafValues></_>
|
| 700 |
+
<!-- tree 4 -->
|
| 701 |
+
<_>
|
| 702 |
+
<internalNodes>
|
| 703 |
+
0 -1 130 -595026732 -656401928 -268649235 -571490699
|
| 704 |
+
-440600392 -133131 -358810952 -2004088646</internalNodes>
|
| 705 |
+
<leafValues>
|
| 706 |
+
-0.4770836830139160 0.3862601518630981</leafValues></_>
|
| 707 |
+
<!-- tree 5 -->
|
| 708 |
+
<_>
|
| 709 |
+
<internalNodes>
|
| 710 |
+
0 -1 66 941674740 -1107882114 1332789109 -67691015
|
| 711 |
+
-1360463693 -1556612430 -609108546 733546933</internalNodes>
|
| 712 |
+
<leafValues>
|
| 713 |
+
-0.4877715110778809 0.3778986334800720</leafValues></_>
|
| 714 |
+
<!-- tree 6 -->
|
| 715 |
+
<_>
|
| 716 |
+
<internalNodes>
|
| 717 |
+
0 -1 49 -17114945 -240061474 1552871558 -82775604 -932393844
|
| 718 |
+
-1308544889 -532635478 -99042357</internalNodes>
|
| 719 |
+
<leafValues>
|
| 720 |
+
-0.3721654713153839 0.4994400143623352</leafValues></_>
|
| 721 |
+
<!-- tree 7 -->
|
| 722 |
+
<_>
|
| 723 |
+
<internalNodes>
|
| 724 |
+
0 -1 133 -655906006 1405502603 -939205164 1884929228
|
| 725 |
+
-498859222 559417357 -1928559445 -286264385</internalNodes>
|
| 726 |
+
<leafValues>
|
| 727 |
+
-0.3934195041656494 0.4769641458988190</leafValues></_>
|
| 728 |
+
<!-- tree 8 -->
|
| 729 |
+
<_>
|
| 730 |
+
<internalNodes>
|
| 731 |
+
0 -1 0 -335837777 1860677295 -90 -1946186226 931096183
|
| 732 |
+
251612987 2013265917 -671232197</internalNodes>
|
| 733 |
+
<leafValues>
|
| 734 |
+
-0.4323300719261169 0.4342164099216461</leafValues></_>
|
| 735 |
+
<!-- tree 9 -->
|
| 736 |
+
<_>
|
| 737 |
+
<internalNodes>
|
| 738 |
+
0 -1 103 37769424 -137772680 374692301 2002666345 -536176194
|
| 739 |
+
-1644484728 807009019 1069089930</internalNodes>
|
| 740 |
+
<leafValues>
|
| 741 |
+
-0.4993278682231903 0.3665378093719482</leafValues></_></weakClassifiers></_>
|
| 742 |
+
<!-- stage 15 -->
|
| 743 |
+
<_>
|
| 744 |
+
<maxWeakCount>9</maxWeakCount>
|
| 745 |
+
<stageThreshold>-1.1974394321441650</stageThreshold>
|
| 746 |
+
<weakClassifiers>
|
| 747 |
+
<!-- tree 0 -->
|
| 748 |
+
<_>
|
| 749 |
+
<internalNodes>
|
| 750 |
+
0 -1 43 -5505 2147462911 2143265466 -4511070 -16450 -257
|
| 751 |
+
-201348440 -71333206</internalNodes>
|
| 752 |
+
<leafValues>
|
| 753 |
+
-0.3310225307941437 0.5624626278877258</leafValues></_>
|
| 754 |
+
<!-- tree 1 -->
|
| 755 |
+
<_>
|
| 756 |
+
<internalNodes>
|
| 757 |
+
0 -1 90 -136842268 -499330741 2015250980 -87107126
|
| 758 |
+
-641665744 -788524639 -1147864792 -134892563</internalNodes>
|
| 759 |
+
<leafValues>
|
| 760 |
+
-0.5266560912132263 0.3704403042793274</leafValues></_>
|
| 761 |
+
<!-- tree 2 -->
|
| 762 |
+
<_>
|
| 763 |
+
<internalNodes>
|
| 764 |
+
0 -1 104 -146800880 -1780368555 2111170033 -140904684
|
| 765 |
+
-16777551 -1946681885 -1646463595 -839131947</internalNodes>
|
| 766 |
+
<leafValues>
|
| 767 |
+
-0.4171888828277588 0.4540435671806335</leafValues></_>
|
| 768 |
+
<!-- tree 3 -->
|
| 769 |
+
<_>
|
| 770 |
+
<internalNodes>
|
| 771 |
+
0 -1 85 -832054034 -981663763 -301990281 -578814081
|
| 772 |
+
-932319000 -1997406723 -33555201 -69206017</internalNodes>
|
| 773 |
+
<leafValues>
|
| 774 |
+
-0.4556705355644226 0.3704262077808380</leafValues></_>
|
| 775 |
+
<!-- tree 4 -->
|
| 776 |
+
<_>
|
| 777 |
+
<internalNodes>
|
| 778 |
+
0 -1 24 -118492417 -1209026825 1119023838 -1334313353
|
| 779 |
+
1112948738 -297319313 1378887291 -139469193</internalNodes>
|
| 780 |
+
<leafValues>
|
| 781 |
+
-0.4182529747486115 0.4267231225967407</leafValues></_>
|
| 782 |
+
<!-- tree 5 -->
|
| 783 |
+
<_>
|
| 784 |
+
<internalNodes>
|
| 785 |
+
0 -1 78 -1714382628 -2353704 -112094959 -549613092
|
| 786 |
+
-1567058760 -1718550464 -342315012 -1074972227</internalNodes>
|
| 787 |
+
<leafValues>
|
| 788 |
+
-0.3625369668006897 0.4684656262397766</leafValues></_>
|
| 789 |
+
<!-- tree 6 -->
|
| 790 |
+
<_>
|
| 791 |
+
<internalNodes>
|
| 792 |
+
0 -1 5 -85219702 316836394 -33279 1904970288 2117267315
|
| 793 |
+
-260901769 -621461759 -88607770</internalNodes>
|
| 794 |
+
<leafValues>
|
| 795 |
+
-0.4742925167083740 0.3689507246017456</leafValues></_>
|
| 796 |
+
<!-- tree 7 -->
|
| 797 |
+
<_>
|
| 798 |
+
<internalNodes>
|
| 799 |
+
0 -1 11 -294654041 -353603585 -1641159686 -50331921
|
| 800 |
+
-2080899877 1145569279 -143132713 -152044037</internalNodes>
|
| 801 |
+
<leafValues>
|
| 802 |
+
-0.3666271567344666 0.4580127298831940</leafValues></_>
|
| 803 |
+
<!-- tree 8 -->
|
| 804 |
+
<_>
|
| 805 |
+
<internalNodes>
|
| 806 |
+
0 -1 32 1887453658 -638545712 -1877976819 -34320972
|
| 807 |
+
-1071067983 -661345416 -583338277 1060190561</internalNodes>
|
| 808 |
+
<leafValues>
|
| 809 |
+
-0.4567637443542481 0.3894708156585693</leafValues></_></weakClassifiers></_>
|
| 810 |
+
<!-- stage 16 -->
|
| 811 |
+
<_>
|
| 812 |
+
<maxWeakCount>9</maxWeakCount>
|
| 813 |
+
<stageThreshold>-0.5733128190040588</stageThreshold>
|
| 814 |
+
<weakClassifiers>
|
| 815 |
+
<!-- tree 0 -->
|
| 816 |
+
<_>
|
| 817 |
+
<internalNodes>
|
| 818 |
+
0 -1 122 -994063296 1088745462 -318837116 -319881377
|
| 819 |
+
1102566613 1165490103 -121679694 -134744129</internalNodes>
|
| 820 |
+
<leafValues>
|
| 821 |
+
-0.4055117964744568 0.5487945079803467</leafValues></_>
|
| 822 |
+
<!-- tree 1 -->
|
| 823 |
+
<_>
|
| 824 |
+
<internalNodes>
|
| 825 |
+
0 -1 68 -285233233 -538992907 1811935199 -369234005 -529
|
| 826 |
+
-20593 -20505 -1561401854</internalNodes>
|
| 827 |
+
<leafValues>
|
| 828 |
+
-0.3787897229194641 0.4532003402709961</leafValues></_>
|
| 829 |
+
<!-- tree 2 -->
|
| 830 |
+
<_>
|
| 831 |
+
<internalNodes>
|
| 832 |
+
0 -1 58 -1335245632 1968917183 1940861695 536816369
|
| 833 |
+
-1226071367 -570908176 457026619 1000020667</internalNodes>
|
| 834 |
+
<leafValues>
|
| 835 |
+
-0.4258328974246979 0.4202791750431061</leafValues></_>
|
| 836 |
+
<!-- tree 3 -->
|
| 837 |
+
<_>
|
| 838 |
+
<internalNodes>
|
| 839 |
+
0 -1 94 -1360318719 -1979797897 -50435249 -18646473
|
| 840 |
+
-608879292 -805306691 -269304244 -17840167</internalNodes>
|
| 841 |
+
<leafValues>
|
| 842 |
+
-0.4561023116111755 0.4002747833728790</leafValues></_>
|
| 843 |
+
<!-- tree 4 -->
|
| 844 |
+
<_>
|
| 845 |
+
<internalNodes>
|
| 846 |
+
0 -1 87 2062765935 -16449 -1275080721 -16406 45764335
|
| 847 |
+
-1090552065 -772846337 -570464322</internalNodes>
|
| 848 |
+
<leafValues>
|
| 849 |
+
-0.4314672648906708 0.4086346626281738</leafValues></_>
|
| 850 |
+
<!-- tree 5 -->
|
| 851 |
+
<_>
|
| 852 |
+
<internalNodes>
|
| 853 |
+
0 -1 127 -536896021 1080817663 -738234288 -965478709
|
| 854 |
+
-2082767969 1290855887 1993822934 -990381609</internalNodes>
|
| 855 |
+
<leafValues>
|
| 856 |
+
-0.4174543321132660 0.4249868988990784</leafValues></_>
|
| 857 |
+
<!-- tree 6 -->
|
| 858 |
+
<_>
|
| 859 |
+
<internalNodes>
|
| 860 |
+
0 -1 3 -818943025 168730891 -293610428 -79249354 669224671
|
| 861 |
+
621166734 1086506807 1473768907</internalNodes>
|
| 862 |
+
<leafValues>
|
| 863 |
+
-0.4321364760398865 0.4090838730335236</leafValues></_>
|
| 864 |
+
<!-- tree 7 -->
|
| 865 |
+
<_>
|
| 866 |
+
<internalNodes>
|
| 867 |
+
0 -1 79 -68895696 -67107736 -1414315879 -841676168
|
| 868 |
+
-619843344 -1180610531 -1081990469 1043203389</internalNodes>
|
| 869 |
+
<leafValues>
|
| 870 |
+
-0.5018386244773865 0.3702533841133118</leafValues></_>
|
| 871 |
+
<!-- tree 8 -->
|
| 872 |
+
<_>
|
| 873 |
+
<internalNodes>
|
| 874 |
+
0 -1 116 -54002134 -543485719 -2124882422 -1437445858
|
| 875 |
+
-115617074 -1195787391 -1096024366 -2140472445</internalNodes>
|
| 876 |
+
<leafValues>
|
| 877 |
+
-0.5037505626678467 0.3564981222152710</leafValues></_></weakClassifiers></_>
|
| 878 |
+
<!-- stage 17 -->
|
| 879 |
+
<_>
|
| 880 |
+
<maxWeakCount>9</maxWeakCount>
|
| 881 |
+
<stageThreshold>-0.4892596900463104</stageThreshold>
|
| 882 |
+
<weakClassifiers>
|
| 883 |
+
<!-- tree 0 -->
|
| 884 |
+
<_>
|
| 885 |
+
<internalNodes>
|
| 886 |
+
0 -1 132 -67113211 2003808111 1862135111 846461923 -2752
|
| 887 |
+
2002237273 -273154752 1937223539</internalNodes>
|
| 888 |
+
<leafValues>
|
| 889 |
+
-0.2448196411132813 0.5689709186553955</leafValues></_>
|
| 890 |
+
<!-- tree 1 -->
|
| 891 |
+
<_>
|
| 892 |
+
<internalNodes>
|
| 893 |
+
0 -1 62 1179423888 -78064940 -611839555 -539167899
|
| 894 |
+
-1289358360 -1650810108 -892540499 -1432827684</internalNodes>
|
| 895 |
+
<leafValues>
|
| 896 |
+
-0.4633283913135529 0.3587929606437683</leafValues></_>
|
| 897 |
+
<!-- tree 2 -->
|
| 898 |
+
<_>
|
| 899 |
+
<internalNodes>
|
| 900 |
+
0 -1 23 -285212705 -78450761 -656212031 -264050110 -27787425
|
| 901 |
+
-1334349961 -547662981 -135796924</internalNodes>
|
| 902 |
+
<leafValues>
|
| 903 |
+
-0.3731099069118500 0.4290455579757690</leafValues></_>
|
| 904 |
+
<!-- tree 3 -->
|
| 905 |
+
<_>
|
| 906 |
+
<internalNodes>
|
| 907 |
+
0 -1 77 341863476 403702016 -550588417 1600194541
|
| 908 |
+
-1080690735 951127993 -1388580949 -1153717473</internalNodes>
|
| 909 |
+
<leafValues>
|
| 910 |
+
-0.3658909499645233 0.4556473195552826</leafValues></_>
|
| 911 |
+
<!-- tree 4 -->
|
| 912 |
+
<_>
|
| 913 |
+
<internalNodes>
|
| 914 |
+
0 -1 22 -586880702 -204831512 -100644596 -39319550
|
| 915 |
+
-1191150794 705692513 457203315 -75806957</internalNodes>
|
| 916 |
+
<leafValues>
|
| 917 |
+
-0.5214384198188782 0.3221037387847900</leafValues></_>
|
| 918 |
+
<!-- tree 5 -->
|
| 919 |
+
<_>
|
| 920 |
+
<internalNodes>
|
| 921 |
+
0 -1 72 -416546870 545911370 -673716192 -775559454
|
| 922 |
+
-264113598 139424 -183369982 -204474641</internalNodes>
|
| 923 |
+
<leafValues>
|
| 924 |
+
-0.4289036989212036 0.4004956185817719</leafValues></_>
|
| 925 |
+
<!-- tree 6 -->
|
| 926 |
+
<_>
|
| 927 |
+
<internalNodes>
|
| 928 |
+
0 -1 50 -1026505020 -589692154 -1740499937 -1563770497
|
| 929 |
+
1348491006 -60710713 -1109853489 -633909413</internalNodes>
|
| 930 |
+
<leafValues>
|
| 931 |
+
-0.4621542394161224 0.3832748532295227</leafValues></_>
|
| 932 |
+
<!-- tree 7 -->
|
| 933 |
+
<_>
|
| 934 |
+
<internalNodes>
|
| 935 |
+
0 -1 108 -1448872304 -477895040 -1778390608 -772418127
|
| 936 |
+
-1789923416 -1612057181 -805306693 -1415842113</internalNodes>
|
| 937 |
+
<leafValues>
|
| 938 |
+
-0.3711548447608948 0.4612701535224915</leafValues></_>
|
| 939 |
+
<!-- tree 8 -->
|
| 940 |
+
<_>
|
| 941 |
+
<internalNodes>
|
| 942 |
+
0 -1 92 407905424 -582449988 52654751 -1294472 -285103725
|
| 943 |
+
-74633006 1871559083 1057955850</internalNodes>
|
| 944 |
+
<leafValues>
|
| 945 |
+
-0.5180652141571045 0.3205870389938355</leafValues></_></weakClassifiers></_>
|
| 946 |
+
<!-- stage 18 -->
|
| 947 |
+
<_>
|
| 948 |
+
<maxWeakCount>10</maxWeakCount>
|
| 949 |
+
<stageThreshold>-0.5911940932273865</stageThreshold>
|
| 950 |
+
<weakClassifiers>
|
| 951 |
+
<!-- tree 0 -->
|
| 952 |
+
<_>
|
| 953 |
+
<internalNodes>
|
| 954 |
+
0 -1 81 4112 -1259563825 -846671428 -100902460 1838164148
|
| 955 |
+
-74153752 -90653988 -1074263896</internalNodes>
|
| 956 |
+
<leafValues>
|
| 957 |
+
-0.2592592537403107 0.5873016119003296</leafValues></_>
|
| 958 |
+
<!-- tree 1 -->
|
| 959 |
+
<_>
|
| 960 |
+
<internalNodes>
|
| 961 |
+
0 -1 1 -285216785 -823206977 -1085589 -1081346 1207959293
|
| 962 |
+
1157103471 2097133565 -2097169</internalNodes>
|
| 963 |
+
<leafValues>
|
| 964 |
+
-0.3801195919513702 0.4718827307224274</leafValues></_>
|
| 965 |
+
<!-- tree 2 -->
|
| 966 |
+
<_>
|
| 967 |
+
<internalNodes>
|
| 968 |
+
0 -1 121 -12465 -536875169 2147478367 2130706303 -37765492
|
| 969 |
+
-866124467 -318782328 -1392509185</internalNodes>
|
| 970 |
+
<leafValues>
|
| 971 |
+
-0.3509117066860199 0.5094807147979736</leafValues></_>
|
| 972 |
+
<!-- tree 3 -->
|
| 973 |
+
<_>
|
| 974 |
+
<internalNodes>
|
| 975 |
+
0 -1 38 2147449663 -20741 -16794757 1945873146 -16710 -1
|
| 976 |
+
-8406341 -67663041</internalNodes>
|
| 977 |
+
<leafValues>
|
| 978 |
+
-0.4068757295608521 0.4130136370658875</leafValues></_>
|
| 979 |
+
<!-- tree 4 -->
|
| 980 |
+
<_>
|
| 981 |
+
<internalNodes>
|
| 982 |
+
0 -1 17 -155191713 866117231 1651407483 548272812 -479201468
|
| 983 |
+
-447742449 1354229504 -261884429</internalNodes>
|
| 984 |
+
<leafValues>
|
| 985 |
+
-0.4557141065597534 0.3539792001247406</leafValues></_>
|
| 986 |
+
<!-- tree 5 -->
|
| 987 |
+
<_>
|
| 988 |
+
<internalNodes>
|
| 989 |
+
0 -1 100 -225319378 -251682065 -492783986 -792341777
|
| 990 |
+
-1287261695 1393643841 -11274182 -213909521</internalNodes>
|
| 991 |
+
<leafValues>
|
| 992 |
+
-0.4117803275585175 0.4118592441082001</leafValues></_>
|
| 993 |
+
<!-- tree 6 -->
|
| 994 |
+
<_>
|
| 995 |
+
<internalNodes>
|
| 996 |
+
0 -1 63 -382220122 -2002072729 -51404800 -371201558
|
| 997 |
+
-923011069 -2135301457 -2066104743 -1042557441</internalNodes>
|
| 998 |
+
<leafValues>
|
| 999 |
+
-0.4008397758007050 0.4034757018089294</leafValues></_>
|
| 1000 |
+
<!-- tree 7 -->
|
| 1001 |
+
<_>
|
| 1002 |
+
<internalNodes>
|
| 1003 |
+
0 -1 101 -627353764 -48295149 1581203952 -436258614
|
| 1004 |
+
-105268268 -1435893445 -638126888 -1061107126</internalNodes>
|
| 1005 |
+
<leafValues>
|
| 1006 |
+
-0.5694189667701721 0.2964762747287750</leafValues></_>
|
| 1007 |
+
<!-- tree 8 -->
|
| 1008 |
+
<_>
|
| 1009 |
+
<internalNodes>
|
| 1010 |
+
0 -1 118 -8399181 1058107691 -621022752 -251003468 -12582915
|
| 1011 |
+
-574619739 -994397789 -1648362021</internalNodes>
|
| 1012 |
+
<leafValues>
|
| 1013 |
+
-0.3195341229438782 0.5294018983840942</leafValues></_>
|
| 1014 |
+
<!-- tree 9 -->
|
| 1015 |
+
<_>
|
| 1016 |
+
<internalNodes>
|
| 1017 |
+
0 -1 92 -348343812 -1078389516 1717960437 364735981
|
| 1018 |
+
-1783841602 -4883137 -457572354 -1076950384</internalNodes>
|
| 1019 |
+
<leafValues>
|
| 1020 |
+
-0.3365339040756226 0.5067458748817444</leafValues></_></weakClassifiers></_>
|
| 1021 |
+
<!-- stage 19 -->
|
| 1022 |
+
<_>
|
| 1023 |
+
<maxWeakCount>10</maxWeakCount>
|
| 1024 |
+
<stageThreshold>-0.7612916231155396</stageThreshold>
|
| 1025 |
+
<weakClassifiers>
|
| 1026 |
+
<!-- tree 0 -->
|
| 1027 |
+
<_>
|
| 1028 |
+
<internalNodes>
|
| 1029 |
+
0 -1 10 -1976661318 -287957604 -1659497122 -782068 43591089
|
| 1030 |
+
-453637880 1435470000 -1077438561</internalNodes>
|
| 1031 |
+
<leafValues>
|
| 1032 |
+
-0.4204545319080353 0.5165745615959168</leafValues></_>
|
| 1033 |
+
<!-- tree 1 -->
|
| 1034 |
+
<_>
|
| 1035 |
+
<internalNodes>
|
| 1036 |
+
0 -1 131 -67110925 14874979 -142633168 -1338923040
|
| 1037 |
+
2046713291 -2067933195 1473503712 -789579837</internalNodes>
|
| 1038 |
+
<leafValues>
|
| 1039 |
+
-0.3762553930282593 0.4075302779674530</leafValues></_>
|
| 1040 |
+
<!-- tree 2 -->
|
| 1041 |
+
<_>
|
| 1042 |
+
<internalNodes>
|
| 1043 |
+
0 -1 83 -272814301 -1577073 -1118685 -305156120 -1052289
|
| 1044 |
+
-1073813756 -538971154 -355523038</internalNodes>
|
| 1045 |
+
<leafValues>
|
| 1046 |
+
-0.4253497421741486 0.3728055357933044</leafValues></_>
|
| 1047 |
+
<!-- tree 3 -->
|
| 1048 |
+
<_>
|
| 1049 |
+
<internalNodes>
|
| 1050 |
+
0 -1 135 -2233 -214486242 -538514758 573747007 -159390971
|
| 1051 |
+
1994225489 -973738098 -203424005</internalNodes>
|
| 1052 |
+
<leafValues>
|
| 1053 |
+
-0.3601998090744019 0.4563256204128265</leafValues></_>
|
| 1054 |
+
<!-- tree 4 -->
|
| 1055 |
+
<_>
|
| 1056 |
+
<internalNodes>
|
| 1057 |
+
0 -1 115 -261031688 -1330369299 -641860609 1029570301
|
| 1058 |
+
-1306461192 -1196149518 -1529767778 683139823</internalNodes>
|
| 1059 |
+
<leafValues>
|
| 1060 |
+
-0.4034293889999390 0.4160816967487335</leafValues></_>
|
| 1061 |
+
<!-- tree 5 -->
|
| 1062 |
+
<_>
|
| 1063 |
+
<internalNodes>
|
| 1064 |
+
0 -1 64 -572993608 -34042628 -417865 -111109 -1433365268
|
| 1065 |
+
-19869715 -1920939864 -1279457063</internalNodes>
|
| 1066 |
+
<leafValues>
|
| 1067 |
+
-0.3620899617671967 0.4594142735004425</leafValues></_>
|
| 1068 |
+
<!-- tree 6 -->
|
| 1069 |
+
<_>
|
| 1070 |
+
<internalNodes>
|
| 1071 |
+
0 -1 36 -626275097 -615256993 1651946018 805366393
|
| 1072 |
+
2016559730 -430780849 -799868165 -16580645</internalNodes>
|
| 1073 |
+
<leafValues>
|
| 1074 |
+
-0.3903816640377045 0.4381459355354309</leafValues></_>
|
| 1075 |
+
<!-- tree 7 -->
|
| 1076 |
+
<_>
|
| 1077 |
+
<internalNodes>
|
| 1078 |
+
0 -1 93 1354797300 -1090957603 1976418270 -1342502178
|
| 1079 |
+
-1851873892 -1194637077 -1153521668 -1108399474</internalNodes>
|
| 1080 |
+
<leafValues>
|
| 1081 |
+
-0.3591445386409760 0.4624078869819641</leafValues></_>
|
| 1082 |
+
<!-- tree 8 -->
|
| 1083 |
+
<_>
|
| 1084 |
+
<internalNodes>
|
| 1085 |
+
0 -1 91 68157712 1211368313 -304759523 1063017136 798797750
|
| 1086 |
+
-275513546 648167355 -1145357350</internalNodes>
|
| 1087 |
+
<leafValues>
|
| 1088 |
+
-0.4297670423984528 0.4023293554782867</leafValues></_>
|
| 1089 |
+
<!-- tree 9 -->
|
| 1090 |
+
<_>
|
| 1091 |
+
<internalNodes>
|
| 1092 |
+
0 -1 107 -546318240 -1628569602 -163577944 -537002306
|
| 1093 |
+
-545456389 -1325465645 -380446736 -1058473386</internalNodes>
|
| 1094 |
+
<leafValues>
|
| 1095 |
+
-0.5727006793022156 0.2995934784412384</leafValues></_></weakClassifiers></_></stages>
|
| 1096 |
+
<features>
|
| 1097 |
+
<_>
|
| 1098 |
+
<rect>
|
| 1099 |
+
0 0 3 5</rect></_>
|
| 1100 |
+
<_>
|
| 1101 |
+
<rect>
|
| 1102 |
+
0 0 4 2</rect></_>
|
| 1103 |
+
<_>
|
| 1104 |
+
<rect>
|
| 1105 |
+
0 0 6 3</rect></_>
|
| 1106 |
+
<_>
|
| 1107 |
+
<rect>
|
| 1108 |
+
0 1 2 3</rect></_>
|
| 1109 |
+
<_>
|
| 1110 |
+
<rect>
|
| 1111 |
+
0 1 3 3</rect></_>
|
| 1112 |
+
<_>
|
| 1113 |
+
<rect>
|
| 1114 |
+
0 1 3 7</rect></_>
|
| 1115 |
+
<_>
|
| 1116 |
+
<rect>
|
| 1117 |
+
0 4 3 3</rect></_>
|
| 1118 |
+
<_>
|
| 1119 |
+
<rect>
|
| 1120 |
+
0 11 3 4</rect></_>
|
| 1121 |
+
<_>
|
| 1122 |
+
<rect>
|
| 1123 |
+
0 12 8 4</rect></_>
|
| 1124 |
+
<_>
|
| 1125 |
+
<rect>
|
| 1126 |
+
0 14 4 3</rect></_>
|
| 1127 |
+
<_>
|
| 1128 |
+
<rect>
|
| 1129 |
+
1 0 5 3</rect></_>
|
| 1130 |
+
<_>
|
| 1131 |
+
<rect>
|
| 1132 |
+
1 1 2 2</rect></_>
|
| 1133 |
+
<_>
|
| 1134 |
+
<rect>
|
| 1135 |
+
1 3 3 1</rect></_>
|
| 1136 |
+
<_>
|
| 1137 |
+
<rect>
|
| 1138 |
+
1 7 4 4</rect></_>
|
| 1139 |
+
<_>
|
| 1140 |
+
<rect>
|
| 1141 |
+
1 12 2 2</rect></_>
|
| 1142 |
+
<_>
|
| 1143 |
+
<rect>
|
| 1144 |
+
1 13 4 1</rect></_>
|
| 1145 |
+
<_>
|
| 1146 |
+
<rect>
|
| 1147 |
+
1 14 4 3</rect></_>
|
| 1148 |
+
<_>
|
| 1149 |
+
<rect>
|
| 1150 |
+
1 17 3 2</rect></_>
|
| 1151 |
+
<_>
|
| 1152 |
+
<rect>
|
| 1153 |
+
2 0 2 3</rect></_>
|
| 1154 |
+
<_>
|
| 1155 |
+
<rect>
|
| 1156 |
+
2 1 2 2</rect></_>
|
| 1157 |
+
<_>
|
| 1158 |
+
<rect>
|
| 1159 |
+
2 2 4 6</rect></_>
|
| 1160 |
+
<_>
|
| 1161 |
+
<rect>
|
| 1162 |
+
2 3 4 4</rect></_>
|
| 1163 |
+
<_>
|
| 1164 |
+
<rect>
|
| 1165 |
+
2 7 2 1</rect></_>
|
| 1166 |
+
<_>
|
| 1167 |
+
<rect>
|
| 1168 |
+
2 11 2 3</rect></_>
|
| 1169 |
+
<_>
|
| 1170 |
+
<rect>
|
| 1171 |
+
2 17 3 2</rect></_>
|
| 1172 |
+
<_>
|
| 1173 |
+
<rect>
|
| 1174 |
+
3 0 2 2</rect></_>
|
| 1175 |
+
<_>
|
| 1176 |
+
<rect>
|
| 1177 |
+
3 1 7 3</rect></_>
|
| 1178 |
+
<_>
|
| 1179 |
+
<rect>
|
| 1180 |
+
3 7 2 1</rect></_>
|
| 1181 |
+
<_>
|
| 1182 |
+
<rect>
|
| 1183 |
+
3 7 2 4</rect></_>
|
| 1184 |
+
<_>
|
| 1185 |
+
<rect>
|
| 1186 |
+
3 18 2 2</rect></_>
|
| 1187 |
+
<_>
|
| 1188 |
+
<rect>
|
| 1189 |
+
4 0 2 3</rect></_>
|
| 1190 |
+
<_>
|
| 1191 |
+
<rect>
|
| 1192 |
+
4 3 2 1</rect></_>
|
| 1193 |
+
<_>
|
| 1194 |
+
<rect>
|
| 1195 |
+
4 6 2 1</rect></_>
|
| 1196 |
+
<_>
|
| 1197 |
+
<rect>
|
| 1198 |
+
4 6 2 5</rect></_>
|
| 1199 |
+
<_>
|
| 1200 |
+
<rect>
|
| 1201 |
+
4 7 5 2</rect></_>
|
| 1202 |
+
<_>
|
| 1203 |
+
<rect>
|
| 1204 |
+
4 8 4 3</rect></_>
|
| 1205 |
+
<_>
|
| 1206 |
+
<rect>
|
| 1207 |
+
4 18 2 2</rect></_>
|
| 1208 |
+
<_>
|
| 1209 |
+
<rect>
|
| 1210 |
+
5 0 2 2</rect></_>
|
| 1211 |
+
<_>
|
| 1212 |
+
<rect>
|
| 1213 |
+
5 3 4 4</rect></_>
|
| 1214 |
+
<_>
|
| 1215 |
+
<rect>
|
| 1216 |
+
5 6 2 5</rect></_>
|
| 1217 |
+
<_>
|
| 1218 |
+
<rect>
|
| 1219 |
+
5 9 2 2</rect></_>
|
| 1220 |
+
<_>
|
| 1221 |
+
<rect>
|
| 1222 |
+
5 10 2 2</rect></_>
|
| 1223 |
+
<_>
|
| 1224 |
+
<rect>
|
| 1225 |
+
6 3 4 4</rect></_>
|
| 1226 |
+
<_>
|
| 1227 |
+
<rect>
|
| 1228 |
+
6 4 4 3</rect></_>
|
| 1229 |
+
<_>
|
| 1230 |
+
<rect>
|
| 1231 |
+
6 5 2 3</rect></_>
|
| 1232 |
+
<_>
|
| 1233 |
+
<rect>
|
| 1234 |
+
6 5 2 5</rect></_>
|
| 1235 |
+
<_>
|
| 1236 |
+
<rect>
|
| 1237 |
+
6 5 4 3</rect></_>
|
| 1238 |
+
<_>
|
| 1239 |
+
<rect>
|
| 1240 |
+
6 6 4 2</rect></_>
|
| 1241 |
+
<_>
|
| 1242 |
+
<rect>
|
| 1243 |
+
6 6 4 4</rect></_>
|
| 1244 |
+
<_>
|
| 1245 |
+
<rect>
|
| 1246 |
+
6 18 1 2</rect></_>
|
| 1247 |
+
<_>
|
| 1248 |
+
<rect>
|
| 1249 |
+
6 21 2 1</rect></_>
|
| 1250 |
+
<_>
|
| 1251 |
+
<rect>
|
| 1252 |
+
7 0 3 7</rect></_>
|
| 1253 |
+
<_>
|
| 1254 |
+
<rect>
|
| 1255 |
+
7 4 2 3</rect></_>
|
| 1256 |
+
<_>
|
| 1257 |
+
<rect>
|
| 1258 |
+
7 9 5 1</rect></_>
|
| 1259 |
+
<_>
|
| 1260 |
+
<rect>
|
| 1261 |
+
7 21 2 1</rect></_>
|
| 1262 |
+
<_>
|
| 1263 |
+
<rect>
|
| 1264 |
+
8 0 1 4</rect></_>
|
| 1265 |
+
<_>
|
| 1266 |
+
<rect>
|
| 1267 |
+
8 5 2 2</rect></_>
|
| 1268 |
+
<_>
|
| 1269 |
+
<rect>
|
| 1270 |
+
8 5 3 2</rect></_>
|
| 1271 |
+
<_>
|
| 1272 |
+
<rect>
|
| 1273 |
+
8 17 3 1</rect></_>
|
| 1274 |
+
<_>
|
| 1275 |
+
<rect>
|
| 1276 |
+
8 18 1 2</rect></_>
|
| 1277 |
+
<_>
|
| 1278 |
+
<rect>
|
| 1279 |
+
9 0 5 3</rect></_>
|
| 1280 |
+
<_>
|
| 1281 |
+
<rect>
|
| 1282 |
+
9 2 2 6</rect></_>
|
| 1283 |
+
<_>
|
| 1284 |
+
<rect>
|
| 1285 |
+
9 5 1 1</rect></_>
|
| 1286 |
+
<_>
|
| 1287 |
+
<rect>
|
| 1288 |
+
9 11 1 1</rect></_>
|
| 1289 |
+
<_>
|
| 1290 |
+
<rect>
|
| 1291 |
+
9 16 1 1</rect></_>
|
| 1292 |
+
<_>
|
| 1293 |
+
<rect>
|
| 1294 |
+
9 16 2 1</rect></_>
|
| 1295 |
+
<_>
|
| 1296 |
+
<rect>
|
| 1297 |
+
9 17 1 1</rect></_>
|
| 1298 |
+
<_>
|
| 1299 |
+
<rect>
|
| 1300 |
+
9 18 1 1</rect></_>
|
| 1301 |
+
<_>
|
| 1302 |
+
<rect>
|
| 1303 |
+
10 5 1 2</rect></_>
|
| 1304 |
+
<_>
|
| 1305 |
+
<rect>
|
| 1306 |
+
10 5 3 3</rect></_>
|
| 1307 |
+
<_>
|
| 1308 |
+
<rect>
|
| 1309 |
+
10 7 1 5</rect></_>
|
| 1310 |
+
<_>
|
| 1311 |
+
<rect>
|
| 1312 |
+
10 8 1 1</rect></_>
|
| 1313 |
+
<_>
|
| 1314 |
+
<rect>
|
| 1315 |
+
10 9 1 1</rect></_>
|
| 1316 |
+
<_>
|
| 1317 |
+
<rect>
|
| 1318 |
+
10 10 1 1</rect></_>
|
| 1319 |
+
<_>
|
| 1320 |
+
<rect>
|
| 1321 |
+
10 10 1 2</rect></_>
|
| 1322 |
+
<_>
|
| 1323 |
+
<rect>
|
| 1324 |
+
10 14 3 3</rect></_>
|
| 1325 |
+
<_>
|
| 1326 |
+
<rect>
|
| 1327 |
+
10 15 1 1</rect></_>
|
| 1328 |
+
<_>
|
| 1329 |
+
<rect>
|
| 1330 |
+
10 15 2 1</rect></_>
|
| 1331 |
+
<_>
|
| 1332 |
+
<rect>
|
| 1333 |
+
10 16 1 1</rect></_>
|
| 1334 |
+
<_>
|
| 1335 |
+
<rect>
|
| 1336 |
+
10 16 2 1</rect></_>
|
| 1337 |
+
<_>
|
| 1338 |
+
<rect>
|
| 1339 |
+
10 17 1 1</rect></_>
|
| 1340 |
+
<_>
|
| 1341 |
+
<rect>
|
| 1342 |
+
10 21 1 1</rect></_>
|
| 1343 |
+
<_>
|
| 1344 |
+
<rect>
|
| 1345 |
+
11 3 2 2</rect></_>
|
| 1346 |
+
<_>
|
| 1347 |
+
<rect>
|
| 1348 |
+
11 5 1 2</rect></_>
|
| 1349 |
+
<_>
|
| 1350 |
+
<rect>
|
| 1351 |
+
11 5 3 3</rect></_>
|
| 1352 |
+
<_>
|
| 1353 |
+
<rect>
|
| 1354 |
+
11 5 4 6</rect></_>
|
| 1355 |
+
<_>
|
| 1356 |
+
<rect>
|
| 1357 |
+
11 6 1 1</rect></_>
|
| 1358 |
+
<_>
|
| 1359 |
+
<rect>
|
| 1360 |
+
11 7 2 2</rect></_>
|
| 1361 |
+
<_>
|
| 1362 |
+
<rect>
|
| 1363 |
+
11 8 1 2</rect></_>
|
| 1364 |
+
<_>
|
| 1365 |
+
<rect>
|
| 1366 |
+
11 10 1 1</rect></_>
|
| 1367 |
+
<_>
|
| 1368 |
+
<rect>
|
| 1369 |
+
11 10 1 2</rect></_>
|
| 1370 |
+
<_>
|
| 1371 |
+
<rect>
|
| 1372 |
+
11 15 1 1</rect></_>
|
| 1373 |
+
<_>
|
| 1374 |
+
<rect>
|
| 1375 |
+
11 17 1 1</rect></_>
|
| 1376 |
+
<_>
|
| 1377 |
+
<rect>
|
| 1378 |
+
11 18 1 1</rect></_>
|
| 1379 |
+
<_>
|
| 1380 |
+
<rect>
|
| 1381 |
+
12 0 2 2</rect></_>
|
| 1382 |
+
<_>
|
| 1383 |
+
<rect>
|
| 1384 |
+
12 1 2 5</rect></_>
|
| 1385 |
+
<_>
|
| 1386 |
+
<rect>
|
| 1387 |
+
12 2 4 1</rect></_>
|
| 1388 |
+
<_>
|
| 1389 |
+
<rect>
|
| 1390 |
+
12 3 1 3</rect></_>
|
| 1391 |
+
<_>
|
| 1392 |
+
<rect>
|
| 1393 |
+
12 7 3 4</rect></_>
|
| 1394 |
+
<_>
|
| 1395 |
+
<rect>
|
| 1396 |
+
12 10 3 2</rect></_>
|
| 1397 |
+
<_>
|
| 1398 |
+
<rect>
|
| 1399 |
+
12 11 1 1</rect></_>
|
| 1400 |
+
<_>
|
| 1401 |
+
<rect>
|
| 1402 |
+
12 12 3 2</rect></_>
|
| 1403 |
+
<_>
|
| 1404 |
+
<rect>
|
| 1405 |
+
12 14 4 3</rect></_>
|
| 1406 |
+
<_>
|
| 1407 |
+
<rect>
|
| 1408 |
+
12 17 1 1</rect></_>
|
| 1409 |
+
<_>
|
| 1410 |
+
<rect>
|
| 1411 |
+
12 21 2 1</rect></_>
|
| 1412 |
+
<_>
|
| 1413 |
+
<rect>
|
| 1414 |
+
13 6 2 5</rect></_>
|
| 1415 |
+
<_>
|
| 1416 |
+
<rect>
|
| 1417 |
+
13 7 3 5</rect></_>
|
| 1418 |
+
<_>
|
| 1419 |
+
<rect>
|
| 1420 |
+
13 11 3 2</rect></_>
|
| 1421 |
+
<_>
|
| 1422 |
+
<rect>
|
| 1423 |
+
13 17 2 2</rect></_>
|
| 1424 |
+
<_>
|
| 1425 |
+
<rect>
|
| 1426 |
+
13 17 3 2</rect></_>
|
| 1427 |
+
<_>
|
| 1428 |
+
<rect>
|
| 1429 |
+
13 18 1 2</rect></_>
|
| 1430 |
+
<_>
|
| 1431 |
+
<rect>
|
| 1432 |
+
13 18 2 2</rect></_>
|
| 1433 |
+
<_>
|
| 1434 |
+
<rect>
|
| 1435 |
+
14 0 2 2</rect></_>
|
| 1436 |
+
<_>
|
| 1437 |
+
<rect>
|
| 1438 |
+
14 1 1 3</rect></_>
|
| 1439 |
+
<_>
|
| 1440 |
+
<rect>
|
| 1441 |
+
14 2 3 2</rect></_>
|
| 1442 |
+
<_>
|
| 1443 |
+
<rect>
|
| 1444 |
+
14 7 2 1</rect></_>
|
| 1445 |
+
<_>
|
| 1446 |
+
<rect>
|
| 1447 |
+
14 13 2 1</rect></_>
|
| 1448 |
+
<_>
|
| 1449 |
+
<rect>
|
| 1450 |
+
14 13 3 3</rect></_>
|
| 1451 |
+
<_>
|
| 1452 |
+
<rect>
|
| 1453 |
+
14 17 2 2</rect></_>
|
| 1454 |
+
<_>
|
| 1455 |
+
<rect>
|
| 1456 |
+
15 0 2 2</rect></_>
|
| 1457 |
+
<_>
|
| 1458 |
+
<rect>
|
| 1459 |
+
15 0 2 3</rect></_>
|
| 1460 |
+
<_>
|
| 1461 |
+
<rect>
|
| 1462 |
+
15 4 3 2</rect></_>
|
| 1463 |
+
<_>
|
| 1464 |
+
<rect>
|
| 1465 |
+
15 4 3 6</rect></_>
|
| 1466 |
+
<_>
|
| 1467 |
+
<rect>
|
| 1468 |
+
15 6 3 2</rect></_>
|
| 1469 |
+
<_>
|
| 1470 |
+
<rect>
|
| 1471 |
+
15 11 3 4</rect></_>
|
| 1472 |
+
<_>
|
| 1473 |
+
<rect>
|
| 1474 |
+
15 13 3 2</rect></_>
|
| 1475 |
+
<_>
|
| 1476 |
+
<rect>
|
| 1477 |
+
15 17 2 2</rect></_>
|
| 1478 |
+
<_>
|
| 1479 |
+
<rect>
|
| 1480 |
+
15 17 3 2</rect></_>
|
| 1481 |
+
<_>
|
| 1482 |
+
<rect>
|
| 1483 |
+
16 1 2 3</rect></_>
|
| 1484 |
+
<_>
|
| 1485 |
+
<rect>
|
| 1486 |
+
16 3 2 4</rect></_>
|
| 1487 |
+
<_>
|
| 1488 |
+
<rect>
|
| 1489 |
+
16 6 1 1</rect></_>
|
| 1490 |
+
<_>
|
| 1491 |
+
<rect>
|
| 1492 |
+
16 16 2 2</rect></_>
|
| 1493 |
+
<_>
|
| 1494 |
+
<rect>
|
| 1495 |
+
17 1 2 2</rect></_>
|
| 1496 |
+
<_>
|
| 1497 |
+
<rect>
|
| 1498 |
+
17 1 2 5</rect></_>
|
| 1499 |
+
<_>
|
| 1500 |
+
<rect>
|
| 1501 |
+
17 12 2 2</rect></_>
|
| 1502 |
+
<_>
|
| 1503 |
+
<rect>
|
| 1504 |
+
18 0 2 2</rect></_></features></cascade>
|
| 1505 |
+
</opencv_storage>
|
app/Hackathon_setup/siamese_model.t7
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51f94087f49e39d1fa0f093e4d1469a4c8807a8b1d20abcf4b70b5f7fcecd27a
|
| 3 |
+
size 161027032
|
app/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__version__ = "0.0.1"
|
app/config.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
from pydantic import AnyHttpUrl, BaseSettings
|
| 5 |
+
|
| 6 |
+
class Settings(BaseSettings):
|
| 7 |
+
API_V1_STR: str = "/api/v1"
|
| 8 |
+
|
| 9 |
+
# Meta
|
| 10 |
+
|
| 11 |
+
# BACKEND_CORS_ORIGINS is a comma-separated list of origins
|
| 12 |
+
# e.g: http://localhost,http://localhost:4200,http://localhost:3000
|
| 13 |
+
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = [
|
| 14 |
+
"http://localhost:3000", # type: ignore
|
| 15 |
+
"http://localhost:8000", # type: ignore
|
| 16 |
+
"https://localhost:3000", # type: ignore
|
| 17 |
+
"https://localhost:8000", # type: ignore
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
PROJECT_NAME: str = "Recognition API"
|
| 21 |
+
|
| 22 |
+
class Config:
|
| 23 |
+
case_sensitive = True
|
| 24 |
+
|
| 25 |
+
settings = Settings()
|
app/main.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
sys.path.append(str(Path(__file__).resolve().parent.parent))
|
| 4 |
+
#print(sys.path)
|
| 5 |
+
from typing import Any
|
| 6 |
+
|
| 7 |
+
from fastapi import FastAPI, Request, APIRouter, File, UploadFile
|
| 8 |
+
from fastapi.staticfiles import StaticFiles
|
| 9 |
+
from fastapi.templating import Jinja2Templates
|
| 10 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 11 |
+
from app.config import settings
|
| 12 |
+
from app import __version__
|
| 13 |
+
from app.Hackathon_setup import face_recognition, exp_recognition
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
from PIL import Image
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
app = FastAPI(
|
| 20 |
+
title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json"
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# To store files uploaded by users
|
| 24 |
+
app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
| 25 |
+
|
| 26 |
+
# To access Templates directory
|
| 27 |
+
templates = Jinja2Templates(directory="app/templates")
|
| 28 |
+
|
| 29 |
+
simi_filename1 = None
|
| 30 |
+
simi_filename2 = None
|
| 31 |
+
face_rec_filename = None
|
| 32 |
+
expr_rec_filename = None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
#################################### Home Page endpoints #################################################
|
| 36 |
+
@app.get("/")
|
| 37 |
+
async def root(request: Request):
|
| 38 |
+
return templates.TemplateResponse("index.html", {'request': request,})
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
#################################### Face Similarity endpoints #################################################
|
| 42 |
+
@app.get("/similarity/")
|
| 43 |
+
async def similarity_root(request: Request):
|
| 44 |
+
return templates.TemplateResponse("similarity.html", {'request': request,})
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@app.post("/predict_similarity/")
|
| 48 |
+
async def create_upload_files(request: Request, file1: UploadFile = File(...), file2: UploadFile = File(...)):
|
| 49 |
+
global simi_filename1
|
| 50 |
+
global simi_filename2
|
| 51 |
+
|
| 52 |
+
if 'image' in file1.content_type:
|
| 53 |
+
contents = await file1.read()
|
| 54 |
+
simi_filename1 = 'app/static/' + file1.filename
|
| 55 |
+
with open(simi_filename1, 'wb') as f:
|
| 56 |
+
f.write(contents)
|
| 57 |
+
|
| 58 |
+
if 'image' in file2.content_type:
|
| 59 |
+
contents = await file2.read()
|
| 60 |
+
simi_filename2 = 'app/static/' + file2.filename
|
| 61 |
+
with open(simi_filename2, 'wb') as f:
|
| 62 |
+
f.write(contents)
|
| 63 |
+
|
| 64 |
+
img1 = Image.open(simi_filename1)
|
| 65 |
+
img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
|
| 66 |
+
|
| 67 |
+
img2 = Image.open(simi_filename2)
|
| 68 |
+
img2 = np.array(img2).reshape(img2.size[1], img2.size[0], 3).astype(np.uint8)
|
| 69 |
+
|
| 70 |
+
result = face_recognition.get_similarity(img1, img2)
|
| 71 |
+
#print(result)
|
| 72 |
+
|
| 73 |
+
return templates.TemplateResponse("predict_similarity.html", {"request": request,
|
| 74 |
+
"result": np.round(result, 3),
|
| 75 |
+
"simi_filename1": '../static/'+file1.filename,
|
| 76 |
+
"simi_filename2": '../static/'+file2.filename,})
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
#################################### Face Recognition endpoints #################################################
|
| 80 |
+
@app.get("/face_recognition/")
|
| 81 |
+
async def face_recognition_root(request: Request):
|
| 82 |
+
return templates.TemplateResponse("face_recognition.html", {'request': request,})
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@app.post("/predict_face_recognition/")
|
| 86 |
+
async def create_upload_files(request: Request, file3: UploadFile = File(...)):
|
| 87 |
+
global face_rec_filename
|
| 88 |
+
|
| 89 |
+
if 'image' in file3.content_type:
|
| 90 |
+
contents = await file3.read()
|
| 91 |
+
face_rec_filename = 'app/static/' + file3.filename
|
| 92 |
+
with open(face_rec_filename, 'wb') as f:
|
| 93 |
+
f.write(contents)
|
| 94 |
+
|
| 95 |
+
img1 = Image.open(face_rec_filename)
|
| 96 |
+
img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
|
| 97 |
+
|
| 98 |
+
result = face_recognition.get_face_class(img1)
|
| 99 |
+
print(result)
|
| 100 |
+
|
| 101 |
+
return templates.TemplateResponse("predict_face_recognition.html", {"request": request,
|
| 102 |
+
"result": result,
|
| 103 |
+
"face_rec_filename": '../static/'+file3.filename,})
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
#################################### Expresion Recognition endpoints #################################################
|
| 107 |
+
@app.get("/expr_recognition/")
|
| 108 |
+
async def expr_recognition_root(request: Request):
|
| 109 |
+
return templates.TemplateResponse("expr_recognition.html", {'request': request,})
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@app.post("/predict_expr_recognition/")
|
| 113 |
+
async def create_upload_files(request: Request, file4: UploadFile = File(...)):
|
| 114 |
+
global expr_rec_filename
|
| 115 |
+
|
| 116 |
+
if 'image' in file4.content_type:
|
| 117 |
+
contents = await file4.read()
|
| 118 |
+
expr_rec_filename = 'app/static/' + file4.filename
|
| 119 |
+
with open(expr_rec_filename, 'wb') as f:
|
| 120 |
+
f.write(contents)
|
| 121 |
+
|
| 122 |
+
img1 = Image.open(expr_rec_filename)
|
| 123 |
+
img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
|
| 124 |
+
|
| 125 |
+
result = exp_recognition.get_expression(img1)
|
| 126 |
+
print(result)
|
| 127 |
+
|
| 128 |
+
return templates.TemplateResponse("predict_expr_recognition.html", {"request": request,
|
| 129 |
+
"result": result,
|
| 130 |
+
"expr_rec_filename": '../static/'+file4.filename,})
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# Set all CORS enabled origins
|
| 135 |
+
if settings.BACKEND_CORS_ORIGINS:
|
| 136 |
+
app.add_middleware(
|
| 137 |
+
CORSMiddleware,
|
| 138 |
+
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
|
| 139 |
+
allow_credentials=True,
|
| 140 |
+
allow_methods=["*"],
|
| 141 |
+
allow_headers=["*"],
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# Start app
|
| 146 |
+
if __name__ == "__main__":
|
| 147 |
+
import uvicorn
|
| 148 |
+
uvicorn.run(app, host="0.0.0.0", port=8001)
|
app/static/Person1_1697805233.jpg
ADDED
|
app/templates/expr_recognition.html
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Expression Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<!li>
|
| 16 |
+
<br>
|
| 17 |
+
<form action="/predict_expr_recognition/" enctype="multipart/form-data" method="post">
|
| 18 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload Image:</span> <br><br>
|
| 19 |
+
<input name="file4" type="file" onchange="readURL(this);" />
|
| 20 |
+
<br><br><br>
|
| 21 |
+
<button type="submit">Recognize Expression</button>
|
| 22 |
+
</form>
|
| 23 |
+
<!/li>
|
| 24 |
+
<br><br>
|
| 25 |
+
<form action="/" method="get">
|
| 26 |
+
<button type="submit">Home</button>
|
| 27 |
+
</form>
|
| 28 |
+
</ul>
|
| 29 |
+
</fieldset>
|
| 30 |
+
</div>
|
| 31 |
+
</body>
|
| 32 |
+
</html>
|
app/templates/face_recognition.html
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<!li>
|
| 16 |
+
<br>
|
| 17 |
+
<form action="/predict_face_recognition/" enctype="multipart/form-data" method="post">
|
| 18 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload Image:</span> <br><br>
|
| 19 |
+
<input name="file3" type="file" onchange="readURL(this);" />
|
| 20 |
+
<br><br><br>
|
| 21 |
+
<button type="submit">Recognize Face</button>
|
| 22 |
+
</form>
|
| 23 |
+
<!/li>
|
| 24 |
+
<br><br>
|
| 25 |
+
<form action="/" method="get">
|
| 26 |
+
<button type="submit">Home</button>
|
| 27 |
+
</form>
|
| 28 |
+
</ul>
|
| 29 |
+
</fieldset>
|
| 30 |
+
</div>
|
| 31 |
+
</body>
|
| 32 |
+
</html>
|
app/templates/index.html
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Recognition Application</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<li><span style="font-weight:bold;font-family:sans-serif">Select a task:</span>
|
| 16 |
+
<br><br><br>
|
| 17 |
+
<form action="{{ url_for('similarity_root') }}"><button>Face Similarity</button></form>
|
| 18 |
+
<br><br>
|
| 19 |
+
<form action="{{ url_for('face_recognition_root') }}"><button>Face Recognition</button></form>
|
| 20 |
+
<br><br>
|
| 21 |
+
<form action="{{ url_for('expr_recognition_root') }}"><button>Expression Recognition</button></form>
|
| 22 |
+
<br>
|
| 23 |
+
</li>
|
| 24 |
+
<br>
|
| 25 |
+
</ul>
|
| 26 |
+
</fieldset>
|
| 27 |
+
</div>
|
| 28 |
+
</body>
|
| 29 |
+
</html>
|
app/templates/predict_expr_recognition.html
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Predict</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Expression Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<h2>
|
| 15 |
+
<center>
|
| 16 |
+
<span style="font-weight:bold;font-family:sans-serif">Prediction: </span>
|
| 17 |
+
<span style="font-weight:bold;color:blue"> {{result}}</span>
|
| 18 |
+
</center>
|
| 19 |
+
</h2>
|
| 20 |
+
<h3><center><span style="font-weight:bold;font-family:sans-serif">Input image:</span></Input></center></h3>
|
| 21 |
+
<p>
|
| 22 |
+
<center>
|
| 23 |
+
<img src="{{expr_rec_filename}}" alt={{expr_rec_filename1}} width='150' height='150'>
|
| 24 |
+
</center>
|
| 25 |
+
</p>
|
| 26 |
+
<br>
|
| 27 |
+
<form action="/expr_recognition/" method="get">
|
| 28 |
+
<center><button type="submit">Check Another Input</button></center>
|
| 29 |
+
</form>
|
| 30 |
+
<br>
|
| 31 |
+
<form action="/" method="get">
|
| 32 |
+
<center><button type="submit">Home</button></center>
|
| 33 |
+
</form>
|
| 34 |
+
</fieldset>
|
| 35 |
+
</div>
|
| 36 |
+
</body>
|
| 37 |
+
</html>
|
app/templates/predict_face_recognition.html
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Predict</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<h2>
|
| 15 |
+
<center>
|
| 16 |
+
<span style="font-weight:bold;font-family:sans-serif">Prediction: </span>
|
| 17 |
+
<span style="font-weight:bold;color:blue"> {{result}}</span>
|
| 18 |
+
</center>
|
| 19 |
+
</h2>
|
| 20 |
+
<h3><center><span style="font-weight:bold;font-family:sans-serif">Input image:</span></Input></center></h3>
|
| 21 |
+
<p>
|
| 22 |
+
<center>
|
| 23 |
+
<img src="{{face_rec_filename}}" alt={{face_rec_filename1}} width='150' height='150'>
|
| 24 |
+
</center>
|
| 25 |
+
</p>
|
| 26 |
+
<br>
|
| 27 |
+
<form action="/face_recognition/" method="get">
|
| 28 |
+
<center><button type="submit">Check Another Input</button></center>
|
| 29 |
+
</form>
|
| 30 |
+
<br>
|
| 31 |
+
<form action="/" method="get">
|
| 32 |
+
<center><button type="submit">Home</button></center>
|
| 33 |
+
</form>
|
| 34 |
+
</fieldset>
|
| 35 |
+
</div>
|
| 36 |
+
</body>
|
| 37 |
+
</html>
|
app/templates/predict_similarity.html
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Predict</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Similarity</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<h2>
|
| 15 |
+
<center>
|
| 16 |
+
<span style="font-weight:bold;font-family:sans-serif">Dissimilarity:</span>
|
| 17 |
+
<span style="font-weight:bold;color:blue"> {{result}}</span>
|
| 18 |
+
</center>
|
| 19 |
+
</h2>
|
| 20 |
+
<h3><center><span style="font-weight:bold;font-family:sans-serif">Input images:</span></Input></center></h3>
|
| 21 |
+
<p>
|
| 22 |
+
<center>
|
| 23 |
+
<img src="{{simi_filename1}}" alt={{simi_filename1}} width='150' height='150'>
|
| 24 |
+
<img src="{{simi_filename2}}" alt={{simi_filename2}} width='150' height='150'>
|
| 25 |
+
</center>
|
| 26 |
+
</p>
|
| 27 |
+
<br>
|
| 28 |
+
<form action="/similarity/" method="get">
|
| 29 |
+
<center><button type="submit">Check Another Input</button></center>
|
| 30 |
+
</form>
|
| 31 |
+
<br>
|
| 32 |
+
<form action="/" method="get">
|
| 33 |
+
<center><button type="submit">Home</button></center>
|
| 34 |
+
</form>
|
| 35 |
+
</fieldset>
|
| 36 |
+
</div>
|
| 37 |
+
</body>
|
| 38 |
+
</html>
|
app/templates/similarity.html
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Similarity</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<!li>
|
| 16 |
+
<br>
|
| 17 |
+
<form action="/predict_similarity/" enctype="multipart/form-data" method="post">
|
| 18 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload First Image:</span> <br><br>
|
| 19 |
+
<input name="file1" type="file" onchange="readURL(this);" />
|
| 20 |
+
<br><br><br>
|
| 21 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload Second Image:</span> <br><br>
|
| 22 |
+
<input name="file2" type="file" onchange="readURL(this);" />
|
| 23 |
+
<br><br><br><br>
|
| 24 |
+
<button type="submit">Check Similarity</button>
|
| 25 |
+
</form>
|
| 26 |
+
<!/li>
|
| 27 |
+
<br><br>
|
| 28 |
+
<form action="/" method="get">
|
| 29 |
+
<button type="submit">Home</button>
|
| 30 |
+
</form>
|
| 31 |
+
</ul>
|
| 32 |
+
</fieldset>
|
| 33 |
+
</div>
|
| 34 |
+
</body>
|
| 35 |
+
</html>
|
requirements.txt
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
uvicorn==0.17.6
|
| 2 |
+
fastapi==0.99.1
|
| 3 |
+
pydantic==1.10.10
|
| 4 |
+
requests==2.23.0
|
| 5 |
+
jinja2==3.1.2
|
| 6 |
+
python-multipart==0.0.6
|
| 7 |
+
|
| 8 |
+
scikit-learn==1.2.2
|
| 9 |
+
joblib==1.3.2
|
| 10 |
+
Pillow==9.4.0
|
| 11 |
+
torch==2.1.0
|
| 12 |
+
torchvision==0.16.0
|
| 13 |
+
matplotlib==3.7.1
|
| 14 |
+
|
| 15 |
+
#opencv-python==4.5.5.64
|
| 16 |
+
numpy==1.26.4
|
| 17 |
+
pandas==2.2.2
|
| 18 |
+
# Prefer headless on servers (no GUI deps); 4.9 works well with numpy 1.26.x
|
| 19 |
+
opencv-python-headless==4.9.0.80
|