import cv2 import numpy as np from cvzone.SelfiSegmentationModule import SelfiSegmentation import os import time import gradio as gr segmen = SelfiSegmentation() def save_image_to_desktop(image): desktop_path = os.path.expanduser("~/Desktop") project_folder = os.path.join(desktop_path, "project") os.makedirs(project_folder, exist_ok=True) timestamp = int(time.time()) file_name = f"image_{timestamp}.jpg" file_path = os.path.join(project_folder, file_name) cv2.imwrite(file_path, image) return file_path def apply_sepia_filter(frame): sepia_kernel = np.array([[0.131, 0.534, 0.272], [0.168, 0.686, 0.349], [0.189, 0.769, 0.393]]) sepia_image = cv2.transform(frame, sepia_kernel) return np.clip(sepia_image, 0, 255).astype(np.uint8) def apply_emboss_filter(image): emboss_kernel = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) emboss_image = cv2.filter2D(image, -1, emboss_kernel) return np.clip(emboss_image, 0, 255).astype(np.uint8) def pixelate(image): pixel_size = 10 height, width = image.shape[:2] temp_image = cv2.resize(image, (width // pixel_size, height // pixel_size), interpolation=cv2.INTER_NEAREST) return cv2.resize(temp_image, (width, height), interpolation=cv2.INTER_NEAREST) def apply_edge_enhance(image): enhanced_edge_kernel = np.array([[-1, -1, -1], [-1, 10, -1], [-1, -1, -1]]) enhanced_edge_image = cv2.filter2D(image, -1, enhanced_edge_kernel) return np.clip(enhanced_edge_image, 0, 255).astype(np.uint8) def brightness_control(image, value): img_float = np.float32(image) img_float += value img_float = np.clip(img_float, 0, 255) brightened_image = np.uint8(img_float) return brightened_image def final(option1,option2, frame): option1_map = { "Person": 1, "Background": 2, "Whole Frame": 3 } option2_map = { "Blur": 1, "Sepia": 2, "Pixelate": 3, "Emboss": 4, "Edge Enhance": 5, "Increase Brightness": 'i', "Decrease Brightness": 'd' } option1 = option1_map.get(option1, 1) option2 = option2_map.get(option2, 1) if option1 == 1: # Apply effects to the person person = segmen.removeBG(frame, (0, 0, 0), cutThreshold=0.8) background = cv2.subtract(frame, person) if option2 == 1: person = cv2.GaussianBlur(person, (15, 15), 0) elif option2 == 2: person = apply_sepia_filter(person) elif option2 == 3: person = pixelate(person) elif option2 == 4: person = apply_emboss_filter(person) elif option2 == 5: person = apply_edge_enhance(person) elif option2 == 'i': # person = brightness_control(person, 30) background = brightness_control(background, -30) elif option2 == 'd': person = brightness_control(person, -30) frame = cv2.add(person, background) elif option1 == 2: person = segmen.removeBG(frame, (0, 0, 0), cutThreshold=0.8) background = cv2.subtract(frame, person) if option2 == 1: background = cv2.GaussianBlur(background, (15, 15), 0) elif option2 == 2: background = apply_sepia_filter(background) elif option2 == 3: background = pixelate(background) elif option2 == 4: background = apply_emboss_filter(background) elif option2 == 5: background = apply_edge_enhance(background) elif option2 == 'i': person = brightness_control(person, -30) # background = brightness_control(background, 30) elif option2 == 'd': background = brightness_control(background, -30) frame = cv2.add(person, background) elif option1 == 3: if option2 == 1: frame = cv2.GaussianBlur(frame, (15, 15), 0) elif option2 == 2: frame = apply_sepia_filter(frame) elif option2 == 3: frame = pixelate(frame) elif option2 == 4: frame = apply_emboss_filter(frame) elif option2 == 5: frame = apply_edge_enhance(frame) elif option2 == 'i': frame = brightness_control(frame, 50) elif option2 == 'd': frame = brightness_control(frame, -50) return frame def process_image(img, option1, option2): frame = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) output_frame = final(option1, option2, frame) return cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB) # def process_image(img, option1, option2, button): # frame = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # output_frame = final(option1, option2, frame) # return cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB) with gr.Blocks() as demo: with gr.Row(): img=gr.Image(source="webcam") io=gr.Image() r1=gr.Radio(["Person", "Background", "Whole Frame"]) r2=gr.Radio(["Blur", "Sepia", "Pixelate", "Emboss", "Edge Enhance", "Increase Brightness", "Decrease Brightness"]) btn = gr.Button(value="Submit") btn.click(process_image, inputs=[img,r1,r2], outputs=[io]) # iface = gr.Interface( # fn=process_image, # inputs=[ # gr.Image(source="webcam"), # gr.Radio(["Person", "Background", "Whole Frame"]), # gr.Radio(["Blur", "Sepia", "Pixelate", "Emboss", "Edge Enhance", "Increase Brightness", "Decrease Brightness"]), # gr.Button(label="Apply Effect") # ], # outputs=gr.Image(), # live=True # ) # iface.launch() demo.launch()