Non-playing-Character commited on
Commit
bc77d4b
·
verified ·
1 Parent(s): 9346aa2

Upload 7 files

Browse files
Files changed (7) hide show
  1. .gitattributes +36 -35
  2. README.md +13 -12
  3. app.py +72 -0
  4. image_resizer.py +95 -0
  5. input.mp4 +3 -0
  6. requirements.txt +8 -0
  7. temp.py +80 -0
.gitattributes CHANGED
@@ -1,35 +1,36 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ input.mp4 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,13 @@
1
- ---
2
- title: Background Remover
3
- emoji: 😻
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 5.46.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
+ ---
2
+ title: Remove Video Background
3
+ emoji: 🎞️
4
+ colorFrom: purple
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 5.1.0
8
+ app_file: app.py
9
+ pinned: true
10
+ short_description: Easily remove your videos background!
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import time
5
+ import random
6
+ from PIL import Image
7
+ import torch
8
+ import re
9
+
10
+ torch.jit.script = lambda f: f
11
+
12
+ from transparent_background import Remover
13
+
14
+ def doo(video, color, mode, progress=gr.Progress()):
15
+ print(str(color))
16
+ if str(color).startswith('#'):
17
+ color = color.lstrip('#')
18
+ rgb = tuple(int(color[i:i+2], 16) for i in (0, 2, 4))
19
+ color = str(list(rgb))
20
+ elif str(color).startswith('rgba'):
21
+ rgba_match = re.match(r'rgba\(([\d.]+), ([\d.]+), ([\d.]+), [\d.]+\)', color)
22
+ if rgba_match:
23
+ r, g, b = rgba_match.groups() # Extract r, g, b values
24
+ color = str([int(float(r)), int(float(g)), int(float(b))])
25
+ print(color)
26
+ if mode == 'Fast':
27
+ remover = Remover(mode='fast')
28
+ else:
29
+ remover = Remover()
30
+
31
+ cap = cv2.VideoCapture(video)
32
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Get total frames
33
+ writer = None
34
+ tmpname = random.randint(111111111, 999999999)
35
+ processed_frames = 0
36
+
37
+ while cap.isOpened():
38
+ ret, frame = cap.read()
39
+
40
+ if ret is False:
41
+ break
42
+
43
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
44
+ img = Image.fromarray(frame).convert('RGB')
45
+
46
+ if writer is None:
47
+ writer = cv2.VideoWriter(str(tmpname) + '.mp4', cv2.VideoWriter_fourcc(*'mp4v'), cap.get(cv2.CAP_PROP_FPS), img.size)
48
+
49
+ processed_frames += 1
50
+ print(f"Processing frame {processed_frames}")
51
+ progress(processed_frames / total_frames, desc=f"Processing frame {processed_frames}/{total_frames}")
52
+ out = remover.process(img, type=color)
53
+ writer.write(cv2.cvtColor(np.array(out), cv2.COLOR_BGR2RGB))
54
+
55
+ cap.release()
56
+ writer.release()
57
+ return str(tmpname) + '.mp4'
58
+
59
+ title = "🎞️ Video Background Removal Tool 🎥"
60
+ description = """*Please note that if your video file is long (has a high number of frames), there is a chance that processing break due to GPU timeout. In this case, consider trying Fast mode."""
61
+
62
+ examples = [['./input.mp4']]
63
+
64
+ iface = gr.Interface(
65
+ fn=doo,
66
+ inputs=["video", gr.ColorPicker(label="Background color", value="#00FF00"), gr.components.Radio(['Normal', 'Fast'], label='Select mode', value='Normal', info='Normal is more accurate, but takes longer. | Fast has lower accuracy so the process will be faster.')],
67
+ outputs="video",
68
+ examples=examples,
69
+ title=title,
70
+ description=description
71
+ )
72
+ iface.launch()
image_resizer.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image, ImageOps
3
+ import numpy as np
4
+ import cv2
5
+
6
+
7
+ def get_new_size_and_padding(old_w, old_h, target_ratio):
8
+ old_ratio = old_w / old_h
9
+ if old_ratio > target_ratio:
10
+ # Image is wider than target: pad height
11
+ new_w = old_w
12
+ new_h = int(old_w / target_ratio)
13
+ pad_top = (new_h - old_h) // 2
14
+ pad_bottom = new_h - old_h - pad_top
15
+ pad_left = pad_right = 0
16
+ else:
17
+ # Image is taller than target: pad width
18
+ new_h = old_h
19
+ new_w = int(old_h * target_ratio)
20
+ pad_left = (new_w - old_w) // 2
21
+ pad_right = new_w - old_w - pad_left
22
+ pad_top = pad_bottom = 0
23
+ return (new_w, new_h), (pad_left, pad_top, pad_right, pad_bottom)
24
+
25
+
26
+ def image_filler(img, padding):
27
+ # Use OpenCV to blur the border region for a simple "image filler"
28
+ np_img = np.array(img)
29
+ h, w = np_img.shape[:2]
30
+ pad_left, pad_top, pad_right, pad_bottom = padding
31
+
32
+ # Create a blurred version of the image
33
+ blurred = cv2.GaussianBlur(np_img, (51, 51), 0)
34
+
35
+ # Create a new image with the blurred background
36
+ new_h = h + pad_top + pad_bottom
37
+ new_w = w + pad_left + pad_right
38
+ result = np.zeros((new_h, new_w, 3), dtype=np.uint8)
39
+ result[:, :] = blurred[0, 0] # Fill with a color from the image
40
+
41
+ # Place the blurred image in the background
42
+ result[:, :] = blurred[0, 0]
43
+ result[pad_top : pad_top + h, pad_left : pad_left + w] = np_img
44
+
45
+ # Blend the edges for a smooth transition
46
+ # (For simplicity, just use the blurred background)
47
+ return Image.fromarray(result)
48
+
49
+
50
+ def resize_with_border(image, aspect_ratio, border_fill):
51
+ # Parse aspect ratio
52
+ w_ratio, h_ratio = map(int, aspect_ratio.split(":"))
53
+ target_ratio = w_ratio / h_ratio
54
+
55
+ # Ensure image is RGB
56
+ image = image.convert("RGB")
57
+ old_w, old_h = image.size
58
+
59
+ (new_w, new_h), padding = get_new_size_and_padding(old_w, old_h, target_ratio)
60
+
61
+ if border_fill == "White":
62
+ # Use Pillow's expand with white fill
63
+ result = ImageOps.expand(image, border=padding, fill=(255, 255, 255))
64
+ else:
65
+ # Use image filler (blurred border)
66
+ result = image_filler(image, padding)
67
+
68
+ # Resize to exact target size (optional, in case of rounding)
69
+ result = result.resize((new_w, new_h), Image.LANCZOS)
70
+ return result
71
+
72
+
73
+ aspect_ratios = ["1:1", "4:3", "16:9", "3:2", "21:9"]
74
+
75
+ with gr.Blocks() as demo:
76
+ gr.Markdown("# Image Resizer with Border Fill")
77
+ with gr.Row():
78
+ with gr.Column():
79
+ image_input = gr.Image(type="pil", label="Upload Image")
80
+ aspect_input = gr.Dropdown(
81
+ aspect_ratios, value="1:1", label="Target Aspect Ratio"
82
+ )
83
+ fill_input = gr.Radio(
84
+ ["White", "Image Filler"], value="White", label="Border Fill"
85
+ )
86
+ submit_btn = gr.Button("Resize Image")
87
+ with gr.Column():
88
+ image_output = gr.Image(type="pil", label="Resized Image")
89
+ submit_btn.click(
90
+ resize_with_border,
91
+ inputs=[image_input, aspect_input, fill_input],
92
+ outputs=image_output,
93
+ )
94
+
95
+ demo.launch()
input.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b83e1d4b264e50e945abf630cca5c6d0e387d84435a14fa1ca679a6a50b4608
3
+ size 266619
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ opencv-python
4
+ timm
5
+ tqdm
6
+ kornia
7
+ gdown
8
+ transparent-background
temp.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import cv2
4
+ import numpy as np
5
+ import time
6
+ import random
7
+ from PIL import Image
8
+ import torch
9
+ import re
10
+
11
+ torch.jit.script = lambda f: f
12
+
13
+ from transparent_background import Remover
14
+
15
+ @spaces.GPU(duration=90)
16
+ def doo(video, color, mode, progress=gr.Progress()):
17
+ print(str(color))
18
+ if str(color).startswith('#'):
19
+ color = color.lstrip('#')
20
+ rgb = tuple(int(color[i:i+2], 16) for i in (0, 2, 4))
21
+ color = str(list(rgb))
22
+ elif str(color).startswith('rgba'):
23
+ rgba_match = re.match(r'rgba\(([\d.]+), ([\d.]+), ([\d.]+), [\d.]+\)', color)
24
+ if rgba_match:
25
+ r, g, b = rgba_match.groups() # Extract r, g, b values
26
+ color = str([int(float(r)), int(float(g)), int(float(b))])
27
+ print(color)
28
+ if mode == 'Fast':
29
+ remover = Remover(mode='fast')
30
+ else:
31
+ remover = Remover()
32
+
33
+ cap = cv2.VideoCapture(video)
34
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Get total frames
35
+ writer = None
36
+ tmpname = random.randint(111111111, 999999999)
37
+ processed_frames = 0
38
+ start_time = time.time()
39
+
40
+ while cap.isOpened():
41
+ ret, frame = cap.read()
42
+
43
+ if ret is False:
44
+ break
45
+
46
+ if time.time() - start_time >= 20 * 60 - 5:
47
+ print("GPU Timeout is coming")
48
+ cap.release()
49
+ writer.release()
50
+ return str(tmpname) + '.mp4'
51
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
52
+ img = Image.fromarray(frame).convert('RGB')
53
+
54
+ if writer is None:
55
+ writer = cv2.VideoWriter(str(tmpname) + '.mp4', cv2.VideoWriter_fourcc(*'mp4v'), cap.get(cv2.CAP_PROP_FPS), img.size)
56
+
57
+ processed_frames += 1
58
+ print(f"Processing frame {processed_frames}")
59
+ progress(processed_frames / total_frames, desc=f"Processing frame {processed_frames}/{total_frames}")
60
+ out = remover.process(img, type="overlay")
61
+ writer.write(cv2.cvtColor(np.array(out), cv2.COLOR_BGR2RGB))
62
+
63
+ cap.release()
64
+ writer.release()
65
+ return str(tmpname) + '.mp4'
66
+
67
+ title = "🎞️ Video Background Removal Tool 🎥"
68
+ description = """*Please note that if your video file is long (has a high number of frames), there is a chance that processing break due to GPU timeout. In this case, consider trying Fast mode."""
69
+
70
+ examples = [['./input.mp4']]
71
+
72
+ iface = gr.Interface(
73
+ fn=doo,
74
+ inputs=["video", gr.ColorPicker(label="Background color", value="#00FF00"), gr.components.Radio(['Normal', 'Fast'], label='Select mode', value='Normal', info='Normal is more accurate, but takes longer. | Fast has lower accuracy so the process will be faster.')],
75
+ outputs="video",
76
+ examples=examples,
77
+ title=title,
78
+ description=description
79
+ )
80
+ iface.launch(share=True)