gexu13 commited on
Commit
f74ae4b
·
verified ·
1 Parent(s): 680da51

Upload 16 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ person/person5.png filter=lfs diff=lfs merge=lfs -text
37
+ Side_By_Side_3D_Images/sbs_backyard.png filter=lfs diff=lfs merge=lfs -text
38
+ Side_By_Side_3D_Images/sbs_campus.png filter=lfs diff=lfs merge=lfs -text
39
+ Side_By_Side_3D_Images/sbs_downtown.png filter=lfs diff=lfs merge=lfs -text
40
+ Side_By_Side_3D_Images/sbs_neu.png filter=lfs diff=lfs merge=lfs -text
41
+ Side_By_Side_3D_Images/sbs_steam_clock.png filter=lfs diff=lfs merge=lfs -text
42
+ Side_By_Side_3D_Images/sbs_trail.png filter=lfs diff=lfs merge=lfs -text
Side_By_Side_3D_Images/sbs_backyard.png ADDED

Git LFS Details

  • SHA256: 8df7e1094cd44df271fd3cad9a362df5f853aa886c7f4bbfc4ef3a88c537dd80
  • Pointer size: 132 Bytes
  • Size of remote file: 2.4 MB
Side_By_Side_3D_Images/sbs_campus.png ADDED

Git LFS Details

  • SHA256: aaa90b773df0eb80161d0ccc2129a5f8babf93f31c6791b60d9bc3f8bd1c9041
  • Pointer size: 132 Bytes
  • Size of remote file: 1.51 MB
Side_By_Side_3D_Images/sbs_downtown.png ADDED

Git LFS Details

  • SHA256: d13d947b3b2a462bd79c145958bb5a44b00c3d0463c4c55871041fc25aabc792
  • Pointer size: 132 Bytes
  • Size of remote file: 2.15 MB
Side_By_Side_3D_Images/sbs_neu.png ADDED

Git LFS Details

  • SHA256: fa090e1b20ce0b683b5d0f9cdc5b5c1c0f93127cec0a06fc408341860d8c57e9
  • Pointer size: 131 Bytes
  • Size of remote file: 825 kB
Side_By_Side_3D_Images/sbs_steam_clock.png ADDED

Git LFS Details

  • SHA256: 7a5192340b654f5184ba669d65c46a2736e28b2017fcc5b19c10877762c690ec
  • Pointer size: 132 Bytes
  • Size of remote file: 1.97 MB
Side_By_Side_3D_Images/sbs_trail.png ADDED

Git LFS Details

  • SHA256: 04da0cb1972d716f7f4f90698944b2a8b51ae75775414ec8b01cd8dfab36a6c4
  • Pointer size: 132 Bytes
  • Size of remote file: 2.96 MB
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import os
4
+
5
+ from image_segmentation_mask_rcnn import segment_person
6
+ from insert_person_into_stereo import insert_person_from_combined_stereo
7
+ from create_anaglyph import create_anaglyph
8
+
9
+ # Predefined sample files (make sure these exist in your project directory)
10
+ DEFAULT_BACKGROUNDS = {
11
+ "Backyard": "Side_By_Side_3D_Images/sbs_backyard.png",
12
+ "Campus": "Side_By_Side_3D_Images/sbs_campus.png",
13
+ "Downtown": "Side_By_Side_3D_Images/sbs_downtown.png",
14
+ "NEU": "Side_By_Side_3D_Images/sbs_neu.png",
15
+ "STEAM_CLOCK": "Side_By_Side_3D_Images/sbs_steam_clock.png",
16
+ "Trail": "Side_By_Side_3D_Images/sbs_trail.png"
17
+ }
18
+
19
+ DEFAULT_PEOPLE = {
20
+ "PERSON1": "person/person1.jpg",
21
+ "PERSON2": "person/person2.png",
22
+ "PERSON3": "person/person3.png",
23
+ "PERSON4": "person/person4.png",
24
+ "PERSON5": "person/person5.png",
25
+ }
26
+
27
+ def pipeline(person_image, stereo_image, depth, x, y):
28
+ segmented = segment_person(person_image)
29
+
30
+ left_image, right_image, _ = insert_person_from_combined_stereo(
31
+ stereo_image=stereo_image,
32
+ segmented_person=segmented,
33
+ depth=depth,
34
+ position=(x, y)
35
+ )
36
+
37
+ anaglyph = create_anaglyph(left_image, right_image)
38
+ return anaglyph
39
+
40
+ def get_image_dimensions(stereo_image, person_image):
41
+ if stereo_image is None or person_image is None:
42
+ return gr.update(), gr.update()
43
+
44
+ w_bg, h_bg = stereo_image.size
45
+ w_p, h_p = person_image.size
46
+
47
+ max_x = max(10, w_bg // 2 - w_p // 2) # Ensure > 0
48
+ max_y = max(10, h_bg) # Ensure > 0
49
+
50
+ return gr.update(minimum=0, maximum=max_x, value=max_x // 2), gr.update(minimum=0, maximum=max_y, value=int(h_bg * 0.9))
51
+
52
+ def main():
53
+ with gr.Blocks() as demo:
54
+ gr.Markdown("# 🧍‍➡️ 3D Anaglyph Composer")
55
+
56
+ with gr.Row():
57
+ person_input = gr.Image(type="pil", label="Person Image")
58
+ stereo_input = gr.Image(type="pil", label="Stereo Background")
59
+
60
+ # Sample selectors
61
+ with gr.Row():
62
+ with gr.Column():
63
+ gr.Markdown("### Sample People")
64
+ for label, path in DEFAULT_PEOPLE.items():
65
+ with gr.Row():
66
+ preview = gr.Image(value=path, label=label, interactive=False, show_label=False, width=128, height=128)
67
+ use_btn = gr.Button(f"Use {label}")
68
+ use_btn.click(lambda p=path: Image.open(p), outputs=person_input)
69
+
70
+ with gr.Column():
71
+ gr.Markdown("### Sample Backgrounds")
72
+ for label, path in DEFAULT_BACKGROUNDS.items():
73
+ with gr.Row():
74
+ preview = gr.Image(value=path, label=label, interactive=False, show_label=False)
75
+ use_btn = gr.Button(f"Use {label}")
76
+ use_btn.click(lambda p=path: Image.open(p), outputs=stereo_input)
77
+
78
+ depth_input = gr.Dropdown(["close", "medium", "far"], value="medium", label="Depth")
79
+ x_slider = gr.Slider(0, 2000, value=1000, label="Person X Position")
80
+ y_slider = gr.Slider(0, 2000, value=500, label="Person Y Position")
81
+
82
+ generate_btn = gr.Button("Generate 3D Anaglyph")
83
+ output_img = gr.Image(type="pil", label="Anaglyph 3D Image")
84
+
85
+ # Dynamically update position sliders when images are uploaded
86
+ person_input.change(get_image_dimensions, inputs=[stereo_input, person_input], outputs=[x_slider, y_slider])
87
+ stereo_input.change(get_image_dimensions, inputs=[stereo_input, person_input], outputs=[x_slider, y_slider])
88
+
89
+ generate_btn.click(
90
+ fn=pipeline,
91
+ inputs=[person_input, stereo_input, depth_input, x_slider, y_slider],
92
+ outputs=output_img
93
+ )
94
+
95
+ demo.launch()
96
+
97
+ if __name__ == "__main__":
98
+ main()
create_anaglyph.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+
4
+ def create_anaglyph(left_img, right_img, output_path=""):
5
+ if left_img is None or right_img is None:
6
+ raise FileNotFoundError("Left or right image not found.")
7
+
8
+ # Ensure both images are the same size
9
+ left_img = left_img.resize(right_img.size)
10
+
11
+ # Convert images to NumPy arrays in RGB format
12
+ left_np = np.array(left_img.convert("RGB"))
13
+ right_np = np.array(right_img.convert("RGB"))
14
+
15
+ # Extract color channels
16
+ r_left = left_np[:, :, 0]
17
+ g_right = right_np[:, :, 1]
18
+ b_right = right_np[:, :, 2]
19
+
20
+ # Create anaglyph image: Red from left image, Green/Blue from right image
21
+ anaglyph_np = np.stack((r_left, g_right, b_right), axis=2).astype(np.uint8)
22
+ anaglyph_img = Image.fromarray(anaglyph_np)
23
+
24
+ # Save output (optional)
25
+ if output_path:
26
+ anaglyph_img.save(output_path)
27
+ print(f"Anaglyph image saved to: {output_path}")
28
+
29
+ return anaglyph_img
30
+
31
+
32
+ if __name__ == "__main__":
33
+ from PIL import Image
34
+
35
+ left = Image.open("stereo_close_left_with_person.png").convert("RGB")
36
+ right = Image.open("stereo_close_right_with_person.png").convert("RGB")
37
+
38
+ create_anaglyph(
39
+ left_img=left,
40
+ right_img=right,
41
+ output_path="anaglyph_with_person.png"
42
+ )
image_segmentation_mask_rcnn.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision.models.detection import maskrcnn_resnet50_fpn
3
+ from torchvision.transforms import functional as F
4
+ import numpy as np
5
+ from PIL import Image
6
+
7
+ # Load the pre-trained Mask R-CNN model
8
+ def load_model():
9
+ model = maskrcnn_resnet50_fpn(pretrained=True)
10
+ model.eval()
11
+ return model
12
+
13
+ # Get the mask for the person class
14
+ def extract_person_mask(model, image_pil, score_threshold=0.8):
15
+ image_tensor = F.to_tensor(image_pil)
16
+ with torch.no_grad():
17
+ predictions = model([image_tensor])[0]
18
+
19
+ for i, label in enumerate(predictions['labels']):
20
+ if label.item() == 1 and predictions['scores'][i].item() > score_threshold:
21
+ mask = predictions['masks'][i, 0].cpu().numpy()
22
+ mask = (mask > 0.5).astype(np.uint8) * 255
23
+ return mask
24
+
25
+ return None
26
+
27
+ # Apply the mask to the image and convert to transparent PNG
28
+ def apply_mask_to_image(image_pil, mask):
29
+ image_rgba = image_pil.convert("RGBA")
30
+ image_np = np.array(image_rgba)
31
+ image_np[:, :, 3] = mask
32
+ return Image.fromarray(image_np)
33
+
34
+ # Save the image
35
+ def save_segmented_person(output_image, output_path):
36
+ output_image.save(output_path)
37
+ print(f"Segmented person saved to: {output_path}")
38
+
39
+ # Main function to run everything
40
+ def segment_person(image_pil, output_path=""):
41
+ model = load_model()
42
+ mask = extract_person_mask(model, image_pil)
43
+
44
+ if mask is not None:
45
+ segmented_image = apply_mask_to_image(image_pil, mask)
46
+ if output_path:
47
+ save_segmented_person(segmented_image, output_path)
48
+ return segmented_image
49
+ else:
50
+ print("No person detected with high enough confidence.")
51
+ return None
52
+
53
+ # Example usage
54
+ if __name__ == "__main__":
55
+ input_image_path = "./person/person1.jpg"
56
+ output_image_path = "segmented_person.png"
57
+ image = Image.open(input_image_path).convert("RGB")
58
+ segment_person(image, output_image_path)
insert_person_into_stereo.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+
4
+ # Depth → disparity & scaling factor
5
+ disparity_map = {
6
+ "close": 60,
7
+ "medium": 30,
8
+ "far": 5
9
+ }
10
+
11
+ scale_map = {
12
+ "close": 1.2,
13
+ "medium": 0.8,
14
+ "far": 0.4
15
+ }
16
+
17
+ def clamp_large_person_image(image, max_dim=800):
18
+ w, h = image.size
19
+ if max(w, h) > max_dim:
20
+ scale = max_dim / max(w, h)
21
+ new_size = (int(w * scale), int(h * scale))
22
+ resized = image.resize(new_size, Image.Resampling.LANCZOS)
23
+ print(f"⚠️ Person image auto-resized from ({w}, {h}) to {new_size} before scaling.")
24
+ return resized
25
+ return image
26
+
27
+ def resize_person(person_img, scale_factor):
28
+ w, h = person_img.size
29
+ new_size = (int(w * scale_factor), int(h * scale_factor))
30
+ return person_img.resize(new_size, Image.Resampling.LANCZOS)
31
+
32
+ def overlay_image_auto_scale(background, overlay_rgba, x, y):
33
+ bg_w, bg_h = background.size
34
+ ov_w, ov_h = overlay_rgba.size
35
+
36
+ # Clamp overlay position and crop overlay if needed
37
+ if x < 0:
38
+ overlay_rgba = overlay_rgba.crop((-x, 0, ov_w, ov_h))
39
+ ov_w += x
40
+ x = 0
41
+ if y < 0:
42
+ overlay_rgba = overlay_rgba.crop((0, -y, ov_w, ov_h))
43
+ ov_h += y
44
+ y = 0
45
+ if x + ov_w > bg_w:
46
+ overlay_rgba = overlay_rgba.crop((0, 0, bg_w - x, ov_h))
47
+ ov_w = bg_w - x
48
+ if y + ov_h > bg_h:
49
+ overlay_rgba = overlay_rgba.crop((0, 0, ov_w, bg_h - y))
50
+ ov_h = bg_h - y
51
+
52
+ if ov_w < 20 or ov_h < 20:
53
+ print("⚠️ Person fully clipped or too small, skipped.")
54
+ return background
55
+
56
+ # Paste with transparency
57
+ background = background.copy()
58
+ background.paste(overlay_rgba, (x, y), overlay_rgba)
59
+ return background
60
+
61
+ def insert_person_from_combined_stereo(
62
+ stereo_image,
63
+ segmented_person,
64
+ depth="medium",
65
+ position=(100, 100),
66
+ scale=None,
67
+ save_output=False
68
+ ):
69
+ print(f"Stereo image size: {stereo_image.size}")
70
+ print(f"Segmented person size: {segmented_person.size}")
71
+
72
+ if stereo_image is None:
73
+ raise FileNotFoundError(f"Stereo image not found.")
74
+ if segmented_person is None or segmented_person.mode != "RGBA":
75
+ raise ValueError("Segmented person image must be RGBA with an alpha channel.")
76
+
77
+ # Clamp large image
78
+ segmented_person = clamp_large_person_image(segmented_person)
79
+
80
+ # Get stereo L/R images
81
+ w, h = stereo_image.size
82
+ half_w = w // 2
83
+ left_image = stereo_image.crop((0, 0, half_w, h))
84
+ right_image = stereo_image.crop((half_w, 0, w, h))
85
+
86
+ # Use depth to get scale and disparity
87
+ scale_factor = scale if scale is not None else scale_map.get(depth, 0.8)
88
+ disparity = disparity_map.get(depth, 10)
89
+
90
+ # Use user-specified position
91
+ x_base, y_base = position
92
+
93
+ # Resize person
94
+ person_resized = resize_person(segmented_person, scale_factor)
95
+ print(f"Resized person size: {person_resized.size}")
96
+
97
+ # Calculate positions for stereo images
98
+ x_left = x_base - disparity // 2
99
+ x_right = x_base + disparity // 2
100
+
101
+ ov_w, ov_h = person_resized.size
102
+ x_left_adj = x_left - ov_w // 2
103
+ x_right_adj = x_right - ov_w // 2
104
+ y_adj = y_base - ov_h # bottom-aligned
105
+
106
+ # Overlay onto L/R views
107
+ left_with_person = overlay_image_auto_scale(left_image, person_resized, x_left_adj, y_adj)
108
+ right_with_person = overlay_image_auto_scale(right_image, person_resized, x_right_adj, y_adj)
109
+
110
+ # Merge back into one side-by-side image
111
+ combined_output = Image.new("RGB", (w, h))
112
+ combined_output.paste(left_with_person, (0, 0))
113
+ combined_output.paste(right_with_person, (half_w, 0))
114
+
115
+ # Optionally save
116
+ if save_output:
117
+ left_with_person.save(f"stereo_{depth}_left_with_person.png")
118
+ right_with_person.save(f"stereo_{depth}_right_with_person.png")
119
+ combined_output.save(f"stereo_{depth}_combined_with_person.png")
120
+
121
+ print(f"✅ Step 2 complete: Person inserted into stereo image (depth: {depth})")
122
+ return left_with_person, right_with_person, combined_output
123
+
124
+
125
+ if __name__ == "__main__":
126
+ stereo_image = Image.open("./Side_By_Side_3D_Images/sbs_downtown.png").convert("RGB")
127
+ person_image = Image.open("segmented_person.png").convert("RGBA")
128
+
129
+ insert_person_from_combined_stereo(
130
+ stereo_image=stereo_image,
131
+ segmented_person=person_image,
132
+ depth="close",
133
+ position=(500, 1000),
134
+ save_output=True
135
+ )
136
+
person/person1.jpg ADDED
person/person2.png ADDED
person/person3.png ADDED
person/person4.png ADDED
person/person5.png ADDED

Git LFS Details

  • SHA256: a244cffcb063488054e8cf282b6d3d6161ca1a92d480ca28438ddc1babb58bac
  • Pointer size: 131 Bytes
  • Size of remote file: 114 kB
requirements.txt ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.1.0
2
+ aiofiles==23.2.1
3
+ aiohappyeyeballs==2.5.0
4
+ aiohttp==3.11.13
5
+ aiosignal==1.3.2
6
+ annotated-types==0.7.0
7
+ anyio==4.8.0
8
+ appnope==0.1.4
9
+ asttokens==3.0.0
10
+ astunparse==1.6.3
11
+ attrs==24.2.0
12
+ Automat==22.10.0
13
+ autopep8==2.3.1
14
+ av==14.1.0
15
+ backcall==0.2.0
16
+ beautifulsoup4==4.12.3
17
+ bleach==6.2.0
18
+ bs4==0.0.2
19
+ certifi==2025.1.31
20
+ cffi==1.17.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.8
23
+ comm==0.2.2
24
+ constantly==23.10.4
25
+ contourpy==1.3.1
26
+ coverage==7.6.4
27
+ cryptography==43.0.0
28
+ cssselect==1.2.0
29
+ cycler==0.12.1
30
+ datasets==3.3.2
31
+ debugpy==1.8.9
32
+ decorator==5.1.1
33
+ defusedxml==0.7.1
34
+ dill==0.3.8
35
+ distro==1.9.0
36
+ docopt==0.6.2
37
+ dotenv==0.9.9
38
+ executing==2.1.0
39
+ faiss-cpu==1.10.0
40
+ fastapi==0.115.8
41
+ fastjsonschema==2.21.1
42
+ ffmpy==0.5.0
43
+ filelock==3.15.4
44
+ flatbuffers==25.2.10
45
+ fonttools==4.55.2
46
+ frozenlist==1.5.0
47
+ fsspec==2024.12.0
48
+ fuzzywuzzy==0.18.0
49
+ gast==0.6.0
50
+ git-filter-repo==2.47.0
51
+ google-pasta==0.2.0
52
+ gradio==5.15.0
53
+ gradio_client==1.7.0
54
+ grpcio==1.71.0
55
+ h11==0.14.0
56
+ h5py==3.13.0
57
+ httpcore==1.0.7
58
+ httpx==0.28.1
59
+ huggingface-hub==0.28.1
60
+ hyperlink==21.0.0
61
+ idna==3.7
62
+ imageio==2.37.0
63
+ incremental==24.7.2
64
+ iniconfig==2.0.0
65
+ ipykernel==6.29.5
66
+ ipython==8.12.3
67
+ itemadapter==0.9.0
68
+ itemloaders==1.3.1
69
+ jedi==0.19.2
70
+ Jinja2==3.1.5
71
+ jiter==0.8.2
72
+ jmespath==1.0.1
73
+ joblib==1.4.2
74
+ jsonschema==4.23.0
75
+ jsonschema-specifications==2024.10.1
76
+ jupyter_client==8.6.3
77
+ jupyter_core==5.7.2
78
+ jupyterlab_pygments==0.3.0
79
+ keras==3.9.0
80
+ kiwisolver==1.4.7
81
+ lazy_loader==0.4
82
+ libclang==18.1.1
83
+ lxml==5.3.0
84
+ Markdown==3.7
85
+ markdown-it-py==3.0.0
86
+ MarkupSafe==2.1.5
87
+ matplotlib==3.9.3
88
+ matplotlib-inline==0.1.7
89
+ mdurl==0.1.2
90
+ mistune==3.1.2
91
+ ml_dtypes==0.5.1
92
+ mpmath==1.3.0
93
+ multidict==6.1.0
94
+ multiprocess==0.70.16
95
+ namex==0.0.8
96
+ nbclient==0.10.2
97
+ nbconvert==7.16.6
98
+ nbformat==5.10.4
99
+ nest-asyncio==1.6.0
100
+ networkx==3.4.2
101
+ numpy==2.1.3
102
+ openai==1.61.1
103
+ opencv-python==4.11.0.86
104
+ opt_einsum==3.4.0
105
+ optree==0.14.1
106
+ orjson==3.10.15
107
+ packaging==24.1
108
+ pandas==2.2.3
109
+ pandocfilters==1.5.1
110
+ parsel==1.9.1
111
+ parso==0.8.4
112
+ pedal==2.6.4
113
+ pexpect==4.9.0
114
+ pickleshare==0.7.5
115
+ pillow==11.0.0
116
+ pipreqs==0.5.0
117
+ platformdirs==4.3.6
118
+ pluggy==1.5.0
119
+ prompt_toolkit==3.0.48
120
+ propcache==0.3.0
121
+ Protego==0.3.1
122
+ protobuf==5.29.3
123
+ psutil==6.1.0
124
+ ptyprocess==0.7.0
125
+ pure_eval==0.2.3
126
+ pyarrow==19.0.1
127
+ pyasn1==0.6.0
128
+ pyasn1_modules==0.4.0
129
+ pycodestyle==2.12.1
130
+ pycparser==2.22
131
+ pydantic==2.10.6
132
+ pydantic_core==2.27.2
133
+ PyDispatcher==2.0.7
134
+ pydub==0.25.1
135
+ Pygments==2.18.0
136
+ pyOpenSSL==24.2.1
137
+ pyparsing==3.2.0
138
+ pytest==8.3.3
139
+ python-dateutil==2.9.0.post0
140
+ python-dotenv==1.0.1
141
+ python-multipart==0.0.20
142
+ pytz==2024.2
143
+ PyYAML==6.0.2
144
+ pyzmq==26.2.0
145
+ queuelib==1.7.0
146
+ RapidFuzz==3.10.1
147
+ referencing==0.36.2
148
+ regex==2024.11.6
149
+ requests==2.32.3
150
+ requests-file==2.1.0
151
+ rich==13.9.4
152
+ rpds-py==0.23.0
153
+ ruff==0.9.4
154
+ safehttpx==0.1.6
155
+ safetensors==0.5.2
156
+ scikit-image==0.25.1
157
+ scikit-learn==1.6.1
158
+ scipy==1.15.1
159
+ Scrapy==2.11.2
160
+ semantic-version==2.10.0
161
+ sentence-transformers==3.4.1
162
+ service-identity==24.1.0
163
+ setuptools==72.2.0
164
+ shapely==2.0.6
165
+ shellingham==1.5.4
166
+ six==1.16.0
167
+ sniffio==1.3.1
168
+ soupsieve==2.6
169
+ stack-data==0.6.3
170
+ starlette==0.45.3
171
+ sympy==1.13.1
172
+ tabulate==0.9.0
173
+ tensorboard==2.19.0
174
+ tensorboard-data-server==0.7.2
175
+ tensorflow==2.19.0
176
+ termcolor==2.4.0
177
+ tf_keras==2.19.0
178
+ threadpoolctl==3.5.0
179
+ tifffile==2025.1.10
180
+ tinycss2==1.4.0
181
+ tldextract==5.1.2
182
+ tokenizers==0.21.0
183
+ tomlkit==0.13.2
184
+ torch==2.6.0
185
+ torchaudio==2.6.0
186
+ torchvision==0.21.0
187
+ tornado==6.4.2
188
+ tqdm==4.67.1
189
+ traitlets==5.14.3
190
+ transformers==4.49.0
191
+ Twisted==24.7.0
192
+ typer==0.15.1
193
+ typing_extensions==4.12.2
194
+ tzdata==2024.2
195
+ urllib3==2.2.2
196
+ uvicorn==0.34.0
197
+ w3lib==2.2.1
198
+ wcwidth==0.2.13
199
+ webencodings==0.5.1
200
+ websockets==14.2
201
+ Werkzeug==3.1.3
202
+ wheel==0.45.1
203
+ wrapt==1.17.2
204
+ xxhash==3.5.0
205
+ yarg==0.1.9
206
+ yarl==1.18.3
207
+ zope.interface==7.0.1