majorSeaweed commited on
Commit
4794a48
·
verified ·
1 Parent(s): d164d27

Upload 6 files

Browse files
Files changed (6) hide show
  1. LICENSE.md +21 -0
  2. README.md +107 -13
  3. app.py +54 -0
  4. detection.py +132 -0
  5. packages.txt +2 -0
  6. requirements.txt +10 -0
LICENSE.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Aditya Singh
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,13 +1,107 @@
1
- ---
2
- title: Face Recognition App
3
- emoji: 🐠
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.41.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Face Recognition Using MTCNN, VGG-Face2 and Pinecone DB
3
+
4
+ An implementation of siamease neural networks on one shot learning tasks for face recognition tasks utilising MTCNN, FaceNet and Pinecone DB for building an interactive and easy to use application to store and detect faces from images as well as camera inputs accurately.
5
+
6
+
7
+ ## Deployment
8
+
9
+ To use this project application run
10
+
11
+ ```bash
12
+ https://face-recognition-tf-1.onrender.com
13
+ ```
14
+ Note : Loading may take some time due to limited server processing capability.
15
+
16
+
17
+
18
+ ## Outputs
19
+ <img width="956" alt="356581473-3f98e226-d9a5-4036-9c35-06481f292c8a" src="https://github.com/user-attachments/assets/60c7ea7f-1604-421b-8265-5b171da0edaa">
20
+ <img width="956" alt="image" src="https://github.com/user-attachments/assets/1bc6a9c0-2698-483c-b478-80bf8d29f442">
21
+
22
+
23
+ ## Key Features
24
+
25
+ __1)__ __Face detection using MTCNN__ :
26
+ Used MTCNN for accurate face detection on images.
27
+
28
+ __2)__ __Embedding extraction using FacNet__ :
29
+ created embeddings vectors of shape (512,1) using a Tensorflow pretrained model on face datasets.
30
+
31
+ __3)__ __Pinecone Database for efficent storage and retreival__:
32
+ Created a pinecone DB index for storing embdeeings by creating relevent metadata and upserting it to the index with cosine similarity as serach parameter.
33
+
34
+ __4)__ __Siamease Network Architehture__:
35
+ Implemented a Siamease Network like architehture to acheive one shot learning for face recognition using a combination of face detection and face recognition.
36
+
37
+ __5)__ __Streamlit application__:
38
+ Created and deployed an interactive streamlit application to interact with the project. Deployment was done on render.
39
+
40
+ __6)__ __Multiface Detection capabilities__:
41
+ The model is able to detect and recognise multiple faces in an image however for creating a new entry in the database for a person, an induidual image is required to ensure integrity of the data in the database.
42
+
43
+ __7)__ __Modularity of code__:
44
+ The project is cretated so that the induvidual blocks can be changed to suit the detection needs, eg. MTCNN can be replace with YOLO detection for faster results for applications such as ANPR after replacing FaceNet with a suitably trained model to generate embeddings.
45
+
46
+
47
+ ## Pinecone API Reference
48
+
49
+ ##### Refer to Pinecone Documentation at : [pc docs](https://docs.pinecone.io/reference/api/introduction)
50
+
51
+ #### Pinecone env variables used :
52
+
53
+ | Parameter | Type | Description |
54
+ | :-------- | :------- | :------------------------- |
55
+ | `Pinecone key` | `string` | <Your API key>|
56
+ | `Pinecone index` | `string` | Name of Index |
57
+
58
+
59
+ #### Installing pinecone client
60
+ ```bash
61
+ pip install pinecone --upgrade pinecone-client
62
+ ```
63
+ #### Connecting to index and upserting
64
+
65
+ ```bash
66
+ from pinecone import Pinecone
67
+ pc = Pinecone(os.environ["PINECONE_API_KEY"])
68
+ index = pc.Index(os.environ["PINECONE_IDX"])
69
+ ```
70
+ ```bash
71
+ vectors = 'Create Vectors'
72
+ index.upsert(vectors)
73
+ ```
74
+
75
+ #### Query top k vectors
76
+ ```bash
77
+ out = index.query(
78
+ vector = vectors.tolist(),
79
+ top_k = k,
80
+ include_metadata = True
81
+ )
82
+ ```
83
+
84
+
85
+ ## Dependencies
86
+ - OpenCV
87
+ - Tensorflow
88
+ - MTCNN
89
+ - FaceNet
90
+ - Pinecone
91
+ - streamlit
92
+ - os
93
+ - dotenv
94
+ - numpy
95
+ #### Installation :
96
+ After pulling this repo, run:
97
+ ```bash
98
+ pip install requirements
99
+ ```
100
+ #### env file setup :
101
+ setup an env file to store api keys and index information as the template :
102
+ ```bash
103
+ pinecone_key = "your_api_key"
104
+ pinecone_index = "your_index_name"
105
+ ```
106
+
107
+
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import base64
4
+ from detection import *
5
+
6
+ background_image = """
7
+ <style>
8
+ [data-testid="stAppViewContainer"] > .main {
9
+ background-image: url("https://images.pexels.com/photos/1054218/pexels-photo-1054218.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=2");
10
+ background-size: cover;
11
+ background-position: center;
12
+ background-repeat: no-repeat;
13
+ }
14
+ </style>
15
+ """
16
+
17
+ st.markdown(background_image, unsafe_allow_html=True)
18
+
19
+ st.html("<h1><center>Face Detection Web App</h1></center>")
20
+ vis = True
21
+ db_mode = st.checkbox("Add to data base")
22
+ if db_mode:
23
+ vis = False
24
+ if not vis:
25
+ st.html("<h3><center>Storing Image to Database</h3></center>")
26
+ input_method = st.checkbox("Camera Mode")
27
+ cam_inp , file_inp = None , None
28
+ if input_method:
29
+ cam_inp = st.camera_input(label="Say Cheese!")
30
+ else:
31
+ file_inp = st.file_uploader("Upload a Photo!")
32
+
33
+ if cam_inp != None:
34
+ file = cam_inp
35
+ else:
36
+ file = file_inp
37
+
38
+ if file and vis:
39
+ min_conf = st.number_input("Enter the desired confidence value:" , min_value=0.0 , max_value=1.0, step=0.10, disabled= not vis , value=0.65)
40
+ if min_conf:
41
+ img , labels = detect_and_fetch(file , min_confidence= min_conf)
42
+ st.image(img , caption='Detections' , use_column_width=True)
43
+ st.header('Detected:')
44
+ labels = set(labels)
45
+ for i in labels:
46
+ if i is not "Unknown":
47
+ st.text(i)
48
+ else:
49
+ name = st.text_input("Write the Name of Person" , disabled= vis)
50
+ if name:
51
+ process = write_and_upsert(file , name)
52
+ if process:
53
+ st.text("Uploaded Sucessfully!")
54
+
detection.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from pinecone import Pinecone
3
+ import os
4
+ from dotenv import load_dotenv , dotenv_values
5
+ import numpy as np
6
+ from mtcnn import MTCNN
7
+ import matplotlib.pyplot as plt
8
+ import scipy # for MTCNN Dependencies
9
+ from keras_facenet import FaceNet
10
+
11
+ detector = MTCNN()
12
+ embedder = FaceNet()
13
+ load_dotenv()
14
+ pc = Pinecone(os.getenv("pinecone_key"))
15
+ index = pc.Index(os.getenv("pinecone_index"))
16
+
17
+ def get_embedding(file):
18
+ img = plt.imread(file)
19
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
20
+ try:
21
+ faces = detector.detect_faces(img)
22
+ if len(faces) > 1:
23
+ return print("Multiple Faces")
24
+ else:
25
+ faces = faces[0]
26
+ x1, y1, width, height = faces['box']
27
+ x1, y1 = abs(x1), abs(y1)
28
+ img = img[y1:y1+height, x1:x1+width]
29
+ img = cv2.resize(img, (224, 224))
30
+ img = np.expand_dims(img, axis=0)
31
+ print(f"Detected Face in {file}")
32
+ return embedder.embeddings(img)[0]
33
+ except:
34
+ print(f"Failed to detect faces in {file}")
35
+ return None
36
+
37
+ def make_embeddings(file , name):
38
+ embeddings = {}
39
+ emb = get_embedding(file)
40
+ if emb is not None:
41
+ embeddings[name] = emb
42
+ return embeddings
43
+
44
+ def make_meta(file , i , name):
45
+ meta = {'image_id':f"Image_{i}"}
46
+ meta['label'] = name
47
+ return meta
48
+
49
+ def upsert_embeddings(face_embeddings , metadata):
50
+ assert len(face_embeddings) == len(metadata) , f"Dimesnion mismatch, got embeddings len {len(face_embeddings)} and metadata length {len(metadata)}"
51
+ assert face_embeddings is not None and metadata is not None , 'Input vectors cannot be None'
52
+ assert face_embeddings[0].shape[0] == index.describe_index_stats()['dimension'] , 'Dimension mismatch'
53
+ a = index.describe_index_stats()['total_vector_count'] + 1
54
+ upsert_data = [(str(i + a), face_embeddings[i].tolist(), metadata[i]) for i in range(len(face_embeddings))] # index starts from last vector in database
55
+ index.upsert(vectors=upsert_data)
56
+ return True
57
+
58
+ def fetch_embeddings(embeddings):
59
+ assert embeddings.shape[0] == index.describe_index_stats()['dimension'] , f"Expected{embeddings.shape[0]} got {index.describe_index_stats()['dimension']} instead"
60
+ assert embeddings is not None , 'Input vectors cannot be None'
61
+
62
+ out = index.query(
63
+ vector=embeddings.tolist(),
64
+ top_k=1,
65
+ include_metadata=True
66
+ )
67
+ name , confidence = out['matches'][0]['metadata']['label'] , out['matches'][0]['score']
68
+ return name , confidence
69
+
70
+ def multiple_faces(file):
71
+ imga = plt.imread(file)
72
+ #imga = cv2.cvtColor(imga, cv2.COLOR_BGR2RGB)
73
+ try:
74
+ faces = detector.detect_faces(imga)
75
+ embs = []
76
+ boxes = []
77
+ for face in faces:
78
+ x1, y1, width, height = face['box']
79
+ x1, y1 = abs(x1), abs(y1)
80
+ img = imga[y1:y1+height, x1:x1+width]
81
+ img = cv2.resize(img, (224, 224))
82
+ img = np.expand_dims(img, axis=0)
83
+ embs.append(embedder.embeddings(img)[0])
84
+ boxes.append([x1, y1, width, height])
85
+ return embs, boxes
86
+ except Exception as e:
87
+ print(f"Failed to detect faces in {file} due to {e}")
88
+ return None
89
+
90
+ def draw_boxes(file, boxes ,labels, probs):
91
+ img = plt.imread(file)
92
+ #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
93
+ img = np.copy(img)
94
+ for box , label , prob in zip(boxes,labels, probs):
95
+ x1, y1, width, height = box
96
+ img = cv2.rectangle(img, (x1, y1), (x1+width, y1+height), (0, 255, 0), 1)
97
+ if label == 'Unknown':
98
+ img = cv2.putText(img, f"{label}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 1)
99
+ else:
100
+ img = cv2.putText(img, f"{label} {np.round(prob , decimals = 2)}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 0), 1)
101
+ #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
102
+ return img
103
+
104
+ def write_and_upsert(dir , Name, upsert = True):
105
+ embeddings = make_embeddings(dir , Name)
106
+ metadata = [make_meta(dir , 1 , Name)]
107
+ if upsert:
108
+ upsert_embeddings(list(embeddings.values()), metadata)
109
+ return True
110
+
111
+ def detect_and_fetch(dir , directory = False , min_confidence = 0):
112
+ files = []
113
+ files.append(dir)
114
+ for file in files:
115
+ try:
116
+ embs , boxes = multiple_faces(file)
117
+ labels = []
118
+ probs = []
119
+ for emb in embs:
120
+ name , confidence = fetch_embeddings(emb)
121
+ if confidence > min_confidence:
122
+ labels.append(name)
123
+ probs.append(confidence)
124
+ else:
125
+ labels.append('Unknown')
126
+ probs.append('unk')
127
+ img = draw_boxes(file, boxes, labels, probs)
128
+ except Exception as e:
129
+ print(f"Failed to detect faces in {file} due to {e}")
130
+ return img , labels
131
+
132
+
packages.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ libgl1-mesa-glx
2
+ libglib2.0-0
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ tensorflow
2
+ streamlit
3
+ matplotlib
4
+ opencv-python
5
+ pinecone
6
+ scipy
7
+ mtcnn
8
+ keras-facenet
9
+ python-dotenv
10
+ scikit-learn