datasciencedojo commited on
Commit
43dd17a
·
1 Parent(s): a443b39

Create new file

Browse files
Files changed (1) hide show
  1. app.py +193 -0
app.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import time
3
+ import os
4
+ import mediapipe as mp
5
+ import gradio as gr
6
+ import pyttsx3
7
+ from threading import Thread
8
+ #from cvzone.HandTrackingModule import HandDetector
9
+ example_flag = False
10
+
11
+ class handDetector():
12
+ def __init__(self, mode=True, modelComplexity=1, maxHands=2, detectionCon=0.5, trackCon=0.5):
13
+ self.mode = mode
14
+ self.maxHands = maxHands
15
+ self.detectionCon = detectionCon
16
+ self.modelComplex = modelComplexity
17
+ self.trackCon = trackCon
18
+ self.mpHands = mp.solutions.hands
19
+ self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.modelComplex,self.detectionCon, self.trackCon)
20
+ self.mpDraw = mp.solutions.drawing_utils
21
+
22
+ def findHands(self, img, draw=True,flipType=True):
23
+ """
24
+ Finds hands in a BGR image.
25
+ :param img: Image to find the hands in.
26
+ :param draw: Flag to draw the output on the image.
27
+ :return: Image with or without drawings
28
+ """
29
+ imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
30
+ #cv2.imshow('test',imgRGB)
31
+ self.results = self.hands.process(imgRGB)
32
+ allHands = []
33
+ h, w, c = img.shape
34
+ if self.results.multi_hand_landmarks:
35
+ for handType, handLms in zip(self.results.multi_handedness, self.results.multi_hand_landmarks):
36
+ myHand = {}
37
+ ## lmList
38
+ mylmList = []
39
+ xList = []
40
+ yList = []
41
+ for id, lm in enumerate(handLms.landmark):
42
+ px, py, pz = int(lm.x * w), int(lm.y * h), int(lm.z * w)
43
+ mylmList.append([px, py, pz])
44
+ xList.append(px)
45
+ yList.append(py)
46
+
47
+ ## bbox
48
+ xmin, xmax = min(xList), max(xList)
49
+ ymin, ymax = min(yList), max(yList)
50
+ boxW, boxH = xmax - xmin, ymax - ymin
51
+ bbox = xmin, ymin, boxW, boxH
52
+ cx, cy = bbox[0] + (bbox[2] // 2), \
53
+ bbox[1] + (bbox[3] // 2)
54
+
55
+ myHand["lmList"] = mylmList
56
+ myHand["bbox"] = bbox
57
+ myHand["center"] = (cx, cy)
58
+
59
+ if flipType:
60
+ if handType.classification[0].label == "Right":
61
+ myHand["type"] = "Left"
62
+ else:
63
+ myHand["type"] = "Right"
64
+ else:
65
+ myHand["type"] = handType.classification[0].label
66
+ allHands.append(myHand)
67
+
68
+ ## draw
69
+ if draw:
70
+ self.mpDraw.draw_landmarks(img, handLms,
71
+ self.mpHands.HAND_CONNECTIONS)
72
+ cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20),
73
+ (bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20),
74
+ (255, 0, 255), 2)
75
+ #cv2.putText(img, myHand["type"], (bbox[0] - 30, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN,2, (255, 0, 255), 2)
76
+ if draw:
77
+ return allHands, img
78
+ else:
79
+ return allHands
80
+ def findPosition(self, img, handNo=0, draw=True,flipType=False):
81
+
82
+ lmList = []
83
+ if self.results.multi_hand_landmarks:
84
+ myHand = self.results.multi_hand_landmarks[handNo]
85
+ for id, lm in enumerate(myHand.landmark):
86
+ # print(id, lm)
87
+ h, w, c = img.shape
88
+ cx, cy = int(lm.x * w), int(lm.y * h)
89
+ # print(id, cx, cy)
90
+ lmList.append([id, cx, cy])
91
+ if draw:
92
+ cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
93
+
94
+ return lmList
95
+
96
+
97
+ def speak(text):
98
+ pyttsx3.speak(text)
99
+
100
+ def set_example_image(example: list) -> dict:
101
+ return gr.inputs.Image.update(value=example[0])
102
+
103
+
104
+ def count(im):
105
+ folderPath = "Count"
106
+ myList = os.listdir(folderPath)
107
+ overlayList = []
108
+ for imPath in sorted(myList):
109
+ image = cv2.imread(f'{folderPath}/{imPath}')
110
+ # print(f'{folderPath}/{imPath}')
111
+ overlayList.append(image)
112
+
113
+ #print(len(overlayList))
114
+ tipIds = [4, 8, 12, 16, 20]
115
+ detector = handDetector(detectionCon=0.75)
116
+
117
+ #img = cv2.imread('test.jpg')
118
+ allhands,img = detector.findHands(cv2.flip(im[:,:,::-1], 1))
119
+ cv2.imwrite('test3.png',img)
120
+
121
+ lmList = detector.findPosition(img, draw=False,)
122
+ # print(lmList)
123
+
124
+ if len(lmList) != 0:
125
+ fingers = []
126
+
127
+ # Thumb
128
+ if lmList[tipIds[0]][1] > lmList[tipIds[0] - 1][1]:
129
+ fingers.append(1)
130
+ else:
131
+ fingers.append(0)
132
+
133
+ # 4 Fingers
134
+ for id in range(1, 5):
135
+ if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]:
136
+ fingers.append(1)
137
+ else:
138
+ fingers.append(0)
139
+
140
+ # print(fingers)
141
+ totalFingers = fingers.count(1)
142
+ #print(totalFingers)
143
+ text = f"Total finger count is {totalFingers}!"
144
+
145
+ h, w, c = overlayList[totalFingers - 1].shape
146
+ img = cv2.flip(img,1)
147
+ img[0:h, 0:w] = overlayList[totalFingers - 1]
148
+
149
+
150
+ cv2.rectangle(img, (20, 225), (170, 425), (0, 255, 0), cv2.FILLED)
151
+ cv2.putText(img, str(totalFingers), (45, 375), cv2.FONT_HERSHEY_PLAIN,
152
+ 10, (255, 0, 0), 25)
153
+ p1=Thread(target=speak,args=(text,))
154
+ p1.start()
155
+ return img[:,:,::-1]
156
+ else:
157
+ text = f"No Hand detected, please use your right hand for correct output!"
158
+ p1=Thread(target=speak,args=(text,))
159
+ p1.start()
160
+ return cv2.flip(img[:,:,::-1],1)
161
+
162
+ with gr.Blocks() as demo:
163
+ with gr.Tabs():
164
+ with gr.TabItem('Upload'):
165
+ with gr.Row():
166
+ with gr.Column():
167
+ img_input = gr.Image(shape=(640,480))
168
+ image_button = gr.Button("Submit")
169
+
170
+ with gr.Column():
171
+ output = gr.Image()
172
+ with gr.Row():
173
+ example_images = gr.Dataset(components=[img_input],samples=[["ex2.jpg"]])
174
+
175
+ with gr.TabItem('Webcam'):
176
+ with gr.Row():
177
+ with gr.Column():
178
+ img_input2 = gr.Webcam()
179
+ image_button2 = gr.Button("Submit")
180
+
181
+ with gr.Column():
182
+ output2 = gr.outputs.Image()
183
+
184
+ image_button.click(fn=count,
185
+ inputs = img_input,
186
+ outputs = output)
187
+ image_button2.click(fn=count,
188
+ inputs = img_input2,
189
+ outputs = output2)
190
+ example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input])
191
+
192
+
193
+ demo.launch(debug=True)