JaipalReddy commited on
Commit
f519063
·
verified ·
1 Parent(s): 0870ed2

Upload 6 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ app/Hackathon_setup/expression_model.t7 filter=lfs diff=lfs merge=lfs -text
37
+ app/Hackathon_setup/siamese_model.t7 filter=lfs diff=lfs merge=lfs -text
app/Hackathon_setup/exp_recognition.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from matplotlib import pyplot as plt
4
+ import torch
5
+ # In the below line,remove '.' while working on your local system.However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
6
+ from .exp_recognition_model import *
7
+ from PIL import Image
8
+ import base64
9
+ import io
10
+ import os
11
+ ## Add more imports if required
12
+
13
+ #############################################################################################################################
14
+ # Caution: Don't change any of the filenames, function names and definitions #
15
+ # Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
16
+ #############################################################################################################################
17
+
18
+ # Current_path stores absolute path of the file from where it runs.
19
+ current_path = os.path.dirname(os.path.abspath(__file__))
20
+
21
+
22
+ #1) The below function is used to detect faces in the given image.
23
+ #2) It returns only one image which has maximum area out of all the detected faces in the photo.
24
+ #3) If no face is detected,then it returns zero(0).
25
+
26
+ def detected_face(image):
27
+ eye_haar = current_path + '/haarcascade_eye.xml'
28
+ face_haar = current_path + '/haarcascade_frontalface_default.xml'
29
+ face_cascade = cv2.CascadeClassifier(face_haar)
30
+ eye_cascade = cv2.CascadeClassifier(eye_haar)
31
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
32
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
33
+ face_areas=[]
34
+ images = []
35
+ required_image=0
36
+ for i, (x,y,w,h) in enumerate(faces):
37
+ face_cropped = gray[y:y+h, x:x+w]
38
+ face_areas.append(w*h)
39
+ images.append(face_cropped)
40
+ required_image = images[np.argmax(face_areas)]
41
+ required_image = Image.fromarray(required_image)
42
+ return required_image
43
+
44
+
45
+ #1) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode.
46
+ #2) Perform necessary transformations to the input(detected face using the above function), this should return the Expression in string form ex: "Anger"
47
+ #3) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function
48
+ ##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
49
+ def get_expression(img):
50
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
51
+
52
+ ##########################################################################################
53
+ ##Example for loading a model using weight state dictionary: ##
54
+ ## face_det_net = facExpRec() #Example Network ##
55
+ ## model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device) ##
56
+ ## face_det_net.load_state_dict(model['net_dict']) ##
57
+ ## ##
58
+ ##current_path + '/<network_definition>' is path of the saved model if present in ##
59
+ ##the same path as this file, we recommend to put in the same directory ##
60
+ ##########################################################################################
61
+ ##########################################################################################
62
+
63
+ face = detected_face(img)
64
+ if face==0:
65
+ face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
66
+
67
+ # YOUR CODE HERE, return expression using your model
68
+
69
+ return "YET TO BE CODED"
app/Hackathon_setup/exp_recognition_model.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ import torch.nn as nn
4
+ from torchvision import transforms
5
+ ## Add more imports if required
6
+
7
+ ####################################################################################################################
8
+ # Define your model and transform and all necessary helper functions here #
9
+ # They will be imported to the exp_recognition.py file #
10
+ ####################################################################################################################
11
+
12
+ # Definition of classes as dictionary
13
+ classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5: 'SADNESS', 6: 'SURPRISE'}
14
+
15
+ # Example Network
16
+ class facExpRec(torch.nn.Module):
17
+ def __init__(self):
18
+ pass # remove 'pass' once you have written your code
19
+ #YOUR CODE HERE
20
+
21
+ def forward(self, x):
22
+ pass # remove 'pass' once you have written your code
23
+ #YOUR CODE HERE
24
+
25
+ # Sample Helper function
26
+ def rgb2gray(image):
27
+ return image.convert('L')
28
+
29
+ # Sample Transformation function
30
+ #YOUR CODE HERE for changing the Transformation values.
31
+ trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor()])
app/Hackathon_setup/expression_model.t7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12c2daf67685fd96a1b39cd1518a1a18f307b269fc2401d3b59e07e17f5c8f41
3
+ size 45333465
app/Hackathon_setup/face_recognition.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from matplotlib import pyplot as plt
4
+ import torch
5
+ # In the below line,remove '.' while working on your local system. However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
6
+ from .face_recognition_model import *
7
+ from PIL import Image
8
+ import base64
9
+ import io
10
+ import os
11
+ import joblib
12
+ import pickle
13
+ # Add more imports if required
14
+
15
+
16
+
17
+ ###########################################################################################################################################
18
+ # Caution: Don't change any of the filenames, function names and definitions #
19
+ # Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
20
+ ###########################################################################################################################################
21
+
22
+ # Current_path stores absolute path of the file from where it runs.
23
+ current_path = os.path.dirname(os.path.abspath(__file__))
24
+
25
+ #1) The below function is used to detect faces in the given image.
26
+ #2) It returns only one image which has maximum area out of all the detected faces in the photo.
27
+ #3) If no face is detected,then it returns zero(0).
28
+
29
+ def detected_face(image):
30
+ eye_haar = current_path + '/haarcascade_eye.xml'
31
+ face_haar = current_path + '/haarcascade_frontalface_default.xml'
32
+ face_cascade = cv2.CascadeClassifier(face_haar)
33
+ eye_cascade = cv2.CascadeClassifier(eye_haar)
34
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
35
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
36
+ face_areas=[]
37
+ images = []
38
+ required_image=0
39
+ for i, (x,y,w,h) in enumerate(faces):
40
+ face_cropped = gray[y:y+h, x:x+w]
41
+ face_areas.append(w*h)
42
+ images.append(face_cropped)
43
+ required_image = images[np.argmax(face_areas)]
44
+ required_image = Image.fromarray(required_image)
45
+ return required_image
46
+
47
+
48
+ #1) Define an object to your siamese network here in the function and load the weight from the trained network, set it in evaluation mode.
49
+ #2) Get the features for both the faces from the network and return the similarity measure, Euclidean,cosine etc can be it. But choose the Relevant measure.
50
+ #3) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function
51
+ #Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
52
+ def get_similarity(img1, img2):
53
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
54
+
55
+ det_img1 = detected_face(img1)
56
+ det_img2 = detected_face(img2)
57
+ if(det_img1 == 0 or det_img2 == 0):
58
+ det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
59
+ det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
60
+ face1 = trnscm(det_img1).unsqueeze(0)
61
+ face2 = trnscm(det_img2).unsqueeze(0)
62
+ ##########################################################################################
63
+ ##Example for loading a model using weight state dictionary: ##
64
+ ## feature_net = light_cnn() #Example Network ##
65
+ ## model = torch.load(current_path + '/siamese_model.t7', map_location=device) ##
66
+ ## feature_net.load_state_dict(model['net_dict']) ##
67
+ ## ##
68
+ ##current_path + '/<network_definition>' is path of the saved model if present in ##
69
+ ##the same path as this file, we recommend to put in the same directory ##
70
+ ##########################################################################################
71
+ ##########################################################################################
72
+
73
+ # YOUR CODE HERE, load the model
74
+
75
+ # YOUR CODE HERE, return similarity measure using your model
76
+
77
+ return 0
78
+
79
+ #1) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode
80
+ #2) Perform necessary transformations to the input(detected face using the above function).
81
+ #3) Along with the siamese, you need the classifier as well, which is to be finetuned with the faces that you are training
82
+ ##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
83
+ def get_face_class(img1):
84
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
85
+
86
+ det_img1 = detected_face(img1)
87
+ if(det_img1 == 0):
88
+ det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
89
+ ##YOUR CODE HERE, return face class here
90
+ ##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
91
+ ##Better Hint: Siamese experiment is covered in one of the labs
92
+ return "YET TO BE CODED"
app/Hackathon_setup/face_recognition_model.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torchvision
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torchvision import transforms
7
+ # Add more imports if required
8
+
9
+ # Sample Transformation function
10
+ # YOUR CODE HERE for changing the Transformation values.
11
+ trnscm = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()])
12
+
13
+ ##Example Network
14
+ class Siamese(torch.nn.Module):
15
+ def __init__(self):
16
+ super(Siamese, self).__init__()
17
+ #YOUR CODE HERE
18
+
19
+ def forward(self, x):
20
+ pass # remove 'pass' once you have written your code
21
+ #YOUR CODE HERE
22
+
23
+ ##########################################################################################################
24
+ ## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
25
+ ## classifier = nn.Sequential(nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Linear...) ##
26
+ ##########################################################################################################
27
+
28
+ # YOUR CODE HERE for pytorch classifier
29
+
30
+ # Definition of classes as dictionary
31
+ classes = ['person1','person2','person3','person4','person5','person6','person7']
app/Hackathon_setup/siamese_model.t7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d351b6ab270b5289cccf0218825514066ac1ef7f2811de7fb58acfaddf83368f
3
+ size 161027032