Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from PIL import Image | |
| import torch | |
| import cv2 | |
| import mediapipe as mp | |
| from PIL import ImageFont, ImageDraw, Image | |
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| import time | |
| def v_capture(cap): | |
| cap = cv2.VideoCapture(0) | |
| mp_drawing = mp.solutions.drawing_utils | |
| mp_hands = mp.solutions.hands | |
| mp_drawing_styles = mp.solutions.drawing_styles | |
| with mp_hands.Hands( | |
| min_detection_confidence=0.5, | |
| min_tracking_confidence=0.5) as hands: | |
| while cap.isOpened(): | |
| success, image = cap.read() | |
| if not success: | |
| print("Ignoring empty camera frame.") | |
| # If loading a video, use 'break' instead of 'continue'. | |
| continue | |
| # Flip the image horizontally for a later selfie-view display, and convert | |
| # the BGR image to RGB. | |
| image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) | |
| # To improve performance, optionally mark the image as not writeable to | |
| # pass by reference. | |
| image.flags.writeable = False | |
| results = hands.process(image) | |
| # Draw the hand annotations on the image. | |
| image.flags.writeable = True | |
| image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| image_height, image_width, _ = image.shape | |
| if results.multi_hand_landmarks: | |
| for hand_landmarks in results.multi_hand_landmarks: | |
| # ์์ง๋ฅผ ์ ์ธํ ๋๋จธ์ง 4๊ฐ ์๊ฐ๋ฝ์ ๋ง๋ ์์น ๊ด๊ณ๋ฅผ ํ์ธํ์ฌ ํ๋๊ทธ ๋ณ์๋ฅผ ์ค์ ํฉ๋๋ค. ์๊ฐ๋ฝ์ ์ผ์๋ก ํธ ์ํ์ธ์ง ํ์ธํฉ๋๋ค. | |
| thumb_finger_state = 0 | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_CMC].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_MCP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_MCP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_IP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_IP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].y * image_height: | |
| thumb_finger_state = 1 | |
| index_finger_state = 0 | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_MCP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_PIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_PIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_DIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_DIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height: | |
| index_finger_state = 1 | |
| middle_finger_state = 0 | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_PIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_PIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_DIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_DIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y * image_height: | |
| middle_finger_state = 1 | |
| ring_finger_state = 0 | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_MCP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_PIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_PIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_DIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_DIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y * image_height: | |
| ring_finger_state = 1 | |
| pinky_finger_state = 0 | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_MCP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_PIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_PIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_DIP].y * image_height: | |
| if hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_DIP].y * image_height >= hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP].y * image_height: | |
| pinky_finger_state = 1 | |
| # ์๊ฐ๋ฝ ์์น ํ์ธํ ๊ฐ์ ์ฌ์ฉํ์ฌ ๊ฐ์,๋ฐ์,๋ณด ์ค ํ๋๋ฅผ ์ถ๋ ฅ ํด์ค๋๋ค. | |
| font = ImageFont.truetype("fonts/gulim.ttc", 60) | |
| capture = image | |
| image = Image.fromarray(image) | |
| draw = ImageDraw.Draw(image) | |
| text = "" | |
| if middle_finger_state == 1 and ring_finger_state == 0 and pinky_finger_state == 0: | |
| text = "fuck you" | |
| if index_finger_state == 1 and middle_finger_state == 1: | |
| text = "๊ฐ์" | |
| time.sleep(0.2) | |
| cv2.imwrite('frame.png', capture) | |
| if thumb_finger_state == 1 and index_finger_state == 1 and middle_finger_state == 1 and ring_finger_state == 1 and pinky_finger_state == 1: | |
| text = "๋ณด" | |
| if index_finger_state == 0 and middle_finger_state == 0 and ring_finger_state == 0 and pinky_finger_state == 0: | |
| text = "์ฃผ๋จน" | |
| l,t,r,b = font.getbbox(text) | |
| w,h = r-l, b-t | |
| x = 50 | |
| y = 50 | |
| draw.rectangle((x, y, x + w, y + h), fill='black') | |
| draw.text((x, y), text, font=font, fill=(255, 255, 255)) | |
| image = np.array(image) | |
| mp_drawing.draw_landmarks( | |
| image, | |
| hand_landmarks, | |
| mp_hands.HAND_CONNECTIONS, | |
| mp_drawing_styles.get_default_hand_landmarks_style(), | |
| mp_drawing_styles.get_default_hand_connections_style()) | |
| cv2.imshow('MediaPipe Hands', image) | |
| if cv2.waitKey(5) & 0xFF == 27: | |
| break | |
| return capture | |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| token = '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' | |
| model1 = torch.hub.load('bryandlee/animegan2-pytorch:main','generator',pretrained='face_paint_512_v1',device=device) | |
| model2 = torch.hub.load('bryandlee/animegan2-pytorch:main','generator',pretrained='face_paint_512_v2',device=device) | |
| model3 = torch.hub.load('bryandlee/animegan2-pytorch:main','generator',pretrained='celeba_distill', device=device) | |
| model4 = torch.hub.load('bryandlee/animegan2-pytorch:main','generator',pretrained='paprika',device=device) | |
| face2paint = torch.hub.load( | |
| 'bryandlee/animegan2-pytorch:main', 'face2paint', | |
| size=512, device=device,side_by_side=False | |
| ) | |
| def inference(img, ver): | |
| img = Image.fromarray(img) | |
| if ver == 'version 1': | |
| return face2paint(model1,img) | |
| elif ver == 'version 2': | |
| return face2paint(model2,img) | |
| elif ver == 'version 3': | |
| return face2paint(model3, img) | |
| elif ver == 'version 4': | |
| return face2paint(model4, img) | |
| with gr.Blocks() as demo: | |
| with gr.Row(): | |
| with gr.Column(): | |
| image = gr.Image(label="Input Image", source="webcam") | |
| print(image) | |
| ver = gr.Radio(['version 1','version 2','version 3','version 4'],label='version') | |
| with gr.Column(): | |
| out = gr.Image(label='Output Image') | |
| run = gr.Button("Run") | |
| run.click(inference,inputs=[image,ver], outputs=out) | |
| # with gr.Blocks() as demo: | |
| # with gr.Row(): | |
| # with gr.Column(): | |
| # image = gr.Image(label="Input Image", source="webcam") | |
| # #ver = gr.Radio(['version 1','version 2','version 3','version 4'],label='version') | |
| # with gr.Column(): | |
| # out = gr.Image(label='Output Image') | |
| # run = gr.Button("Run") | |
| # run.click(v_capture,inputs=image, outputs=out) | |
| demo.launch() |