Commit
·
f600821
1
Parent(s):
b060bfa
update
Browse files- main.py → Hands.py +24 -10
- Realtime_Face_Landmark_Detection.py +71 -0
- VolumeControl.py +76 -0
- tempCodeRunnerFile.py +42 -0
main.py → Hands.py
RENAMED
|
@@ -1,11 +1,14 @@
|
|
| 1 |
import cv2
|
| 2 |
import mediapipe as mp
|
| 3 |
import pyautogui
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
| 6 |
drawing_utils = mp.solutions.drawing_utils
|
| 7 |
screen_width, screen_height = pyautogui.size()
|
| 8 |
index_y = 0
|
|
|
|
| 9 |
while True:
|
| 10 |
_, frame = cap.read()
|
| 11 |
frame = cv2.flip(frame, 1)
|
|
@@ -13,6 +16,7 @@ while True:
|
|
| 13 |
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 14 |
output = hand_detector.process(rgb_frame)
|
| 15 |
hands = output.multi_hand_landmarks
|
|
|
|
| 16 |
if hands:
|
| 17 |
for hand in hands:
|
| 18 |
drawing_utils.draw_landmarks(frame, hand)
|
|
@@ -20,20 +24,30 @@ while True:
|
|
| 20 |
for id, landmark in enumerate(landmarks):
|
| 21 |
x = int(landmark.x*frame_width)
|
| 22 |
y = int(landmark.y*frame_height)
|
| 23 |
-
|
| 24 |
-
|
|
|
|
| 25 |
index_x = screen_width/frame_width*x
|
| 26 |
index_y = screen_height/frame_height*y
|
| 27 |
|
| 28 |
-
if id ==
|
| 29 |
-
cv2.circle(
|
| 30 |
thumb_x = screen_width/frame_width*x
|
| 31 |
thumb_y = screen_height/frame_height*y
|
|
|
|
| 32 |
print('outside', abs(index_y - thumb_y))
|
| 33 |
-
|
|
|
|
| 34 |
pyautogui.click()
|
| 35 |
-
pyautogui.sleep(0.
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
cv2.imshow('Virtual Mouse', frame)
|
| 39 |
cv2.waitKey(1)
|
|
|
|
| 1 |
import cv2
|
| 2 |
import mediapipe as mp
|
| 3 |
import pyautogui
|
| 4 |
+
|
| 5 |
+
cap = cv2.VideoCapture(0) # Open Camera
|
| 6 |
+
mpHands = mp.solutions.hands
|
| 7 |
+
hand_detector = mpHands.Hands(False)
|
| 8 |
drawing_utils = mp.solutions.drawing_utils
|
| 9 |
screen_width, screen_height = pyautogui.size()
|
| 10 |
index_y = 0
|
| 11 |
+
|
| 12 |
while True:
|
| 13 |
_, frame = cap.read()
|
| 14 |
frame = cv2.flip(frame, 1)
|
|
|
|
| 16 |
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 17 |
output = hand_detector.process(rgb_frame)
|
| 18 |
hands = output.multi_hand_landmarks
|
| 19 |
+
|
| 20 |
if hands:
|
| 21 |
for hand in hands:
|
| 22 |
drawing_utils.draw_landmarks(frame, hand)
|
|
|
|
| 24 |
for id, landmark in enumerate(landmarks):
|
| 25 |
x = int(landmark.x*frame_width)
|
| 26 |
y = int(landmark.y*frame_height)
|
| 27 |
+
|
| 28 |
+
if id == 4:
|
| 29 |
+
cv2.circle(frame,(x, y), 15, (255,255,0), cv2.FILLED)
|
| 30 |
index_x = screen_width/frame_width*x
|
| 31 |
index_y = screen_height/frame_height*y
|
| 32 |
|
| 33 |
+
if id == 6:
|
| 34 |
+
cv2.circle(frame,(x, y), 15, (255,255,0), cv2.FILLED)
|
| 35 |
thumb_x = screen_width/frame_width*x
|
| 36 |
thumb_y = screen_height/frame_height*y
|
| 37 |
+
|
| 38 |
print('outside', abs(index_y - thumb_y))
|
| 39 |
+
|
| 40 |
+
if abs(index_y - thumb_y) < 40:
|
| 41 |
pyautogui.click()
|
| 42 |
+
pyautogui.sleep(0.1)
|
| 43 |
+
|
| 44 |
+
# if(abs(index_y - thumb_y) < 50):
|
| 45 |
+
# print('outside', abs(index_y - thumb_y))
|
| 46 |
+
|
| 47 |
+
elif abs(index_y - thumb_y) < 250:
|
| 48 |
+
pyautogui.moveTo(thumb_x, thumb_y)
|
| 49 |
+
|
| 50 |
+
drawing_utils.draw_landmarks(frame, hand, mpHands.HAND_CONNECTIONS)
|
| 51 |
+
|
| 52 |
cv2.imshow('Virtual Mouse', frame)
|
| 53 |
cv2.waitKey(1)
|
Realtime_Face_Landmark_Detection.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import mediapipe as mp
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
# model = 'model.task'
|
| 6 |
+
capture = cv2.VideoCapture(0)
|
| 7 |
+
|
| 8 |
+
previousTime = 0
|
| 9 |
+
currentTime = 0
|
| 10 |
+
mp_holistic = mp.solutions.holistic
|
| 11 |
+
holistic_model = mp_holistic.Holistic(
|
| 12 |
+
min_detection_confidence=0.5,
|
| 13 |
+
min_tracking_confidence=0.5
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
mp_drawing = mp.solutions.drawing_utils
|
| 17 |
+
|
| 18 |
+
while capture.isOpened():
|
| 19 |
+
ret, frame = capture.read()
|
| 20 |
+
|
| 21 |
+
frame = cv2.resize(frame, (800, 600))
|
| 22 |
+
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 23 |
+
image.flags.writeable = False
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
results = holistic_model.process(image)
|
| 27 |
+
image.flags.writeable = True
|
| 28 |
+
|
| 29 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 30 |
+
|
| 31 |
+
mp_drawing.draw_landmarks(
|
| 32 |
+
image,
|
| 33 |
+
results.face_landmarks,
|
| 34 |
+
mp_holistic.FACEMESH_CONTOURS,
|
| 35 |
+
mp_drawing.DrawingSpec(
|
| 36 |
+
color=(255,0,255),
|
| 37 |
+
thickness=1,
|
| 38 |
+
circle_radius=1
|
| 39 |
+
),
|
| 40 |
+
mp_drawing.DrawingSpec(
|
| 41 |
+
color=(0,255,255),
|
| 42 |
+
thickness=1,
|
| 43 |
+
circle_radius=1
|
| 44 |
+
)
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
mp_drawing.draw_landmarks(
|
| 48 |
+
image,
|
| 49 |
+
results.right_hand_landmarks,
|
| 50 |
+
mp_holistic.HAND_CONNECTIONS
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
mp_drawing.draw_landmarks(
|
| 54 |
+
image,
|
| 55 |
+
results.left_hand_landmarks,
|
| 56 |
+
mp_holistic.HAND_CONNECTIONS
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
currentTime = time.time()
|
| 60 |
+
fps = 1 / (currentTime-previousTime)
|
| 61 |
+
previousTime = currentTime
|
| 62 |
+
|
| 63 |
+
cv2.putText(image, str(int(fps))+" FPS", (10, 70), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
|
| 64 |
+
|
| 65 |
+
cv2.imshow("Facial and Hand Landmarks", image)
|
| 66 |
+
if cv2.waitKey(5) & 0xFF == ord('q'):
|
| 67 |
+
break
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
capture.release()
|
| 71 |
+
cv2.destroyAllWindows()
|
VolumeControl.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import mediapipe as mp
|
| 3 |
+
import time
|
| 4 |
+
from ctypes import cast, POINTER
|
| 5 |
+
from comtypes import CLSCTX_ALL
|
| 6 |
+
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
cap = cv2.VideoCapture(0)
|
| 10 |
+
|
| 11 |
+
mpHands = mp.solutions.hands
|
| 12 |
+
hands = mpHands.Hands(False)
|
| 13 |
+
|
| 14 |
+
mpDraw = mp.solutions.drawing_utils
|
| 15 |
+
|
| 16 |
+
# Initialize volume control
|
| 17 |
+
devices = AudioUtilities.GetSpeakers()
|
| 18 |
+
interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
|
| 19 |
+
volume = cast(interface, POINTER(IAudioEndpointVolume))
|
| 20 |
+
volRange = volume.GetVolumeRange()
|
| 21 |
+
minVol = volRange[0]
|
| 22 |
+
maxVol = volRange[1]
|
| 23 |
+
|
| 24 |
+
pTime = 0
|
| 25 |
+
cTime = 0
|
| 26 |
+
|
| 27 |
+
while True:
|
| 28 |
+
_, img = cap.read()
|
| 29 |
+
img = cv2.flip(img, 1)
|
| 30 |
+
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 31 |
+
results = hands.process(imgRGB)
|
| 32 |
+
|
| 33 |
+
if results.multi_hand_landmarks:
|
| 34 |
+
for handLms in results.multi_hand_landmarks:
|
| 35 |
+
lmList = []
|
| 36 |
+
for id, lm in enumerate(handLms.landmark):
|
| 37 |
+
h, w, c = img.shape
|
| 38 |
+
cx, cy = int(lm.x * w), int(lm.y * h)
|
| 39 |
+
lmList.append([id, cx, cy])
|
| 40 |
+
|
| 41 |
+
if id == 4:
|
| 42 |
+
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
|
| 43 |
+
|
| 44 |
+
if id == 8:
|
| 45 |
+
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
|
| 46 |
+
|
| 47 |
+
mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)
|
| 48 |
+
|
| 49 |
+
if len(lmList) != 0:
|
| 50 |
+
x1, y1 = lmList[4][1], lmList[4][2]
|
| 51 |
+
x2, y2 = lmList[8][1], lmList[8][2]
|
| 52 |
+
|
| 53 |
+
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 3)
|
| 54 |
+
length = np.hypot(x2 - x1, y2 - y1)
|
| 55 |
+
|
| 56 |
+
# Hand range 50 - 300
|
| 57 |
+
# Volume range -65 - 0
|
| 58 |
+
vol = np.interp(length, [50, 300], [minVol, maxVol])
|
| 59 |
+
volume.SetMasterVolumeLevel(vol, None)
|
| 60 |
+
|
| 61 |
+
# Show the volume level
|
| 62 |
+
volBar = np.interp(length, [50, 300], [400, 150])
|
| 63 |
+
volPer = np.interp(length, [50, 300], [0, 100])
|
| 64 |
+
|
| 65 |
+
cv2.rectangle(img, (50, 150), (85, 400), (0, 255, 0), 3)
|
| 66 |
+
cv2.rectangle(img, (50, int(volBar)), (85, 400), (0, 255, 0), cv2.FILLED)
|
| 67 |
+
cv2.putText(img, f'{int(volPer)} %', (40, 450), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
|
| 68 |
+
|
| 69 |
+
cTime = time.time()
|
| 70 |
+
fps = 1 / (cTime - pTime)
|
| 71 |
+
pTime = cTime
|
| 72 |
+
|
| 73 |
+
cv2.putText(img, str(int(fps)), (10, 78), cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 0), 3)
|
| 74 |
+
|
| 75 |
+
cv2.imshow("Image", img)
|
| 76 |
+
cv2.waitKey(1)
|
tempCodeRunnerFile.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import mediapipe as mp
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
cap = cv2.VideoCapture(0)
|
| 6 |
+
|
| 7 |
+
mpHands = mp.solutions.hands
|
| 8 |
+
hands = mpHands.Hands(False)
|
| 9 |
+
|
| 10 |
+
mpDraw = mp.solutions.drawing_utils
|
| 11 |
+
|
| 12 |
+
pTime = 0
|
| 13 |
+
cTime = 0
|
| 14 |
+
|
| 15 |
+
while True:
|
| 16 |
+
_, img = cap.read()
|
| 17 |
+
img = cv2.flip(img, 1)
|
| 18 |
+
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 19 |
+
results = hands.process(imgRGB)
|
| 20 |
+
|
| 21 |
+
if results.multi_hand_landmarks:
|
| 22 |
+
for handLms in results.multi_hand_landmarks:
|
| 23 |
+
for id, lm in enumerate(handLms.landmark):
|
| 24 |
+
h, w, c = img.shape
|
| 25 |
+
cx, cy = int(lm.x*w), int(lm.y*h)
|
| 26 |
+
|
| 27 |
+
if id == 4:
|
| 28 |
+
cv2.circle(img,(cx, cy), 15, (255,0,255), cv2.FILLED)
|
| 29 |
+
|
| 30 |
+
if id == 8:
|
| 31 |
+
cv2.circle(img,(cx, cy), 15, (255,0,255), cv2.FILLED)
|
| 32 |
+
|
| 33 |
+
mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)
|
| 34 |
+
|
| 35 |
+
cTime = time.time()
|
| 36 |
+
fps = 1/(cTime - pTime)
|
| 37 |
+
pTime = cTime
|
| 38 |
+
|
| 39 |
+
cv2.putText(img, str(int(fps)), (10,78), cv2.FONT_HERSHEY_PLAIN, 3, (255,255,0), 3)
|
| 40 |
+
|
| 41 |
+
cv2.imshow("Image",img)
|
| 42 |
+
cv2.waitKey(1)
|