SAIcgr's picture
Update app.py
d35a22a verified
raw
history blame
9.77 kB
import cv2
import mediapipe as mp
import pyautogui
import math
import time
class HandMouse:
def __init__(self):
self.hands = mp.solutions.hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)
self.mp_draw = mp.solutions.drawing_utils
self.prev_x, self.prev_y = 0, 0
self.smooth_factor = 5
self.last_click_time = 0
self.last_scroll_time = 0
def find_hands(self, img):
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.hands.process(img_rgb)
return results
def get_landmarks(self, img, results):
lm_list = {}
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
for id, lm in enumerate(hand_landmarks.landmark):
h, w, _ = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lm_list[id] = (cx, cy)
self.mp_draw.draw_landmarks(img, hand_landmarks, mp.solutions.hands.HAND_CONNECTIONS)
return lm_list
def move_mouse(self, img, lm_list):
if 8 in lm_list: # Index finger tip
index_tip = lm_list[8]
screen_width, screen_height = pyautogui.size()
img_width, img_height = img.shape[1], img.shape[0]
x, y = index_tip
# Map coordinates to screen size
screen_x = int((x / img_width) * screen_width)
screen_y = int((y / img_height) * screen_height)
# Smooth mouse movement
smoothed_x = self.prev_x + (screen_x - self.prev_x) / self.smooth_factor
smoothed_y = self.prev_y + (screen_y - self.prev_y) / self.smooth_factor
pyautogui.moveTo(smoothed_x, smoothed_y)
self.prev_x, self.prev_y = smoothed_x, smoothed_y
def left_click(self, lm_list):
if 4 in lm_list and 8 in lm_list: # Thumb and index finger
thumb_tip = lm_list[4]
index_tip = lm_list[8]
distance = math.hypot(thumb_tip[0] - index_tip[0], thumb_tip[1] - index_tip[1])
if distance < 30: # Click threshold
current_time = time.time()
if current_time - self.last_click_time > 0.5: # Prevent multiple clicks
pyautogui.click()
self.last_click_time = current_time
def right_click(self, lm_list):
if 8 in lm_list and 12 in lm_list: # Index and middle finger
index_tip = lm_list[8]
middle_tip = lm_list[12]
distance = math.hypot(index_tip[0] - middle_tip[0], index_tip[1] - middle_tip[1])
if distance < 50: # Right-click threshold
pyautogui.click(button='right')
time.sleep(0.5)
def scroll(self, lm_list):
if 12 in lm_list and 0 in lm_list: # Middle finger
middle_tip = lm_list[12]
palm_base = lm_list[0]
# Calculate vertical distance
scroll_distance = middle_tip[1] - palm_base[1]
current_time = time.time()
if abs(scroll_distance) > 30 and current_time - self.last_scroll_time > 0.1:
if scroll_distance > 0:
pyautogui.scroll(-5) # Scroll down
else:
pyautogui.scroll(5) # Scroll up
self.last_scroll_time = current_time
def copy_content(self, lm_list):
if 4 in lm_list and 8 in lm_list and 12 in lm_list: # Thumb, index, and middle finger
thumb_tip = lm_list[4]
index_tip = lm_list[8]
middle_tip = lm_list[12]
distance_thumb_index = math.hypot(thumb_tip[0] - index_tip[0], thumb_tip[1] - index_tip[1])
distance_index_middle = math.hypot(index_tip[0] - middle_tip[0], index_tip[1] - middle_tip[1])
if distance_thumb_index < 30 and distance_index_middle < 30:
pyautogui.hotkey('ctrl', 'c') # Copy content
time.sleep(0.5)
def detect_gestures(self, img, lm_list):
self.move_mouse(img, lm_list)
self.left_click(lm_list)
self.right_click(lm_list)
self.scroll(lm_list)
self.copy_content(lm_list)
def main():
cap = cv2.VideoCapture(0)
hand_mouse = HandMouse()
while True:
success, img = cap.read()
if not success:
break
results = hand_mouse.find_hands(img)
lm_list = hand_mouse.get_landmarks(img, results)
if lm_list:
hand_mouse.detect_gestures(img, lm_list)
# Show video feed
cv2.imshow("Hand Tracking Mouse", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
import cv2
######## destop mouse
import mediapipe as mp
import pyautogui
import math
import time
class HandMouse:
def __init__(self):
# Initialize Mediapipe Hand Detection
self.hands = mp.solutions.hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)
self.mp_draw = mp.solutions.drawing_utils
# Smoothing mouse movement
self.prev_x, self.prev_y = 0, 0
self.smooth_factor = 5
# Timers to prevent multiple actions
self.last_click_time = 0
self.last_right_click_time = 0
self.last_scroll_time = 0
def find_hands(self, img):
"""Detect hands and return results."""
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.hands.process(img_rgb)
return results
def get_landmarks(self, img, results):
"""Extract hand landmarks."""
lm_list = {}
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
for id, lm in enumerate(hand_landmarks.landmark):
h, w, _ = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lm_list[id] = (cx, cy)
self.mp_draw.draw_landmarks(img, hand_landmarks, mp.solutions.hands.HAND_CONNECTIONS)
return lm_list
def move_mouse(self, img, lm_list):
"""Move the mouse using the index finger."""
if 8 in lm_list: # Index finger tip
index_tip = lm_list[8]
screen_width, screen_height = pyautogui.size()
img_width, img_height = img.shape[1], img.shape[0]
x, y = index_tip
# Map to screen size
screen_x = int((x / img_width) * screen_width)
screen_y = int((y / img_height) * screen_height)
# Smooth the movement
smoothed_x = self.prev_x + (screen_x - self.prev_x) / self.smooth_factor
smoothed_y = self.prev_y + (screen_y - self.prev_y) / self.smooth_factor
pyautogui.moveTo(smoothed_x, smoothed_y)
self.prev_x, self.prev_y = smoothed_x, smoothed_y
def left_click(self, lm_list):
"""Perform a left click when thumb and index finger touch."""
if 4 in lm_list and 8 in lm_list: # Thumb and index finger
thumb_tip = lm_list[4]
index_tip = lm_list[8]
distance = math.hypot(thumb_tip[0] - index_tip[0], thumb_tip[1] - index_tip[1])
if distance < 30: # Click threshold
current_time = time.time()
if current_time - self.last_click_time > 0.5: # Prevent multiple clicks
pyautogui.click()
self.last_click_time = current_time
def right_click(self, lm_list):
"""Perform a right click when the thumb finger is raised."""
if 4 in lm_list and 0 in lm_list: # Thumb and palm base
thumb_tip = lm_list[4]
palm_base = lm_list[0]
distance = math.hypot(thumb_tip[0] - palm_base[0], thumb_tip[1] - palm_base[1])
if distance > 50: # Right-click threshold
current_time = time.time()
if current_time - self.last_right_click_time > 0.5: # Prevent multiple clicks
pyautogui.click(button='right')
self.last_right_click_time = current_time
def scroll(self, lm_list):
"""Scroll up or down using the middle finger."""
if 12 in lm_list and 0 in lm_list: # Middle finger and palm base
middle_tip = lm_list[12]
palm_base = lm_list[0]
# Calculate vertical distance
scroll_distance = middle_tip[1] - palm_base[1]
current_time = time.time()
if abs(scroll_distance) > 30 and current_time - self.last_scroll_time > 0.1:
if scroll_distance > 0:
pyautogui.scroll(-5) # Scroll down
else:
pyautogui.scroll(5) # Scroll up
self.last_scroll_time = current_time
def detect_gestures(self, img, lm_list):
"""Detect gestures and perform actions."""
self.move_mouse(img, lm_list)
self.left_click(lm_list)
self.right_click(lm_list)
self.scroll(lm_list)
def main():
cap = cv2.VideoCapture(0)
hand_mouse = HandMouse()
while True:
success, img = cap.read()
if not success:
break
results = hand_mouse.find_hands(img)
lm_list = hand_mouse.get_landmarks(img, results)
if lm_list:
hand_mouse.detect_gestures(img, lm_list)
# Show video feed
cv2.imshow("Hand Tracking Mouse", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()