HowardZhangdqs commited on
Commit
eb1792c
·
verified ·
1 Parent(s): 9536ca2

Upload 1033 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. auto_batch_process.py +213 -0
  3. batch_process_images.py +289 -0
  4. extract_pages.py +53 -0
  5. extracted_pages/page_0064.png +3 -0
  6. extracted_pages/page_0065.png +3 -0
  7. extracted_pages/page_0066.png +3 -0
  8. extracted_pages/page_0067.png +3 -0
  9. extracted_pages/page_0068.png +3 -0
  10. extracted_pages/page_0069.png +3 -0
  11. extracted_pages/page_0070.png +3 -0
  12. extracted_pages/page_0071.png +3 -0
  13. extracted_pages/page_0072.png +3 -0
  14. extracted_pages/page_0073.png +3 -0
  15. extracted_pages/page_0074.png +3 -0
  16. extracted_pages/page_0075.png +3 -0
  17. extracted_pages/page_0076.png +3 -0
  18. extracted_pages/page_0077.png +3 -0
  19. extracted_pages/page_0078.png +3 -0
  20. extracted_pages/page_0079.png +3 -0
  21. extracted_pages/page_0080.png +3 -0
  22. extracted_pages/page_0081.png +3 -0
  23. extracted_pages/page_0082.png +3 -0
  24. extracted_pages/page_0083.png +3 -0
  25. extracted_pages/page_0084.png +3 -0
  26. extracted_pages/page_0085.png +3 -0
  27. extracted_pages/page_0086.png +3 -0
  28. extracted_pages/page_0087.png +3 -0
  29. extracted_pages/page_0088.png +3 -0
  30. extracted_pages/page_0089.png +3 -0
  31. extracted_pages/page_0090.png +3 -0
  32. extracted_pages/page_0091.png +3 -0
  33. extracted_pages/page_0092.png +3 -0
  34. extracted_pages/page_0093.png +3 -0
  35. extracted_pages/page_0094.png +3 -0
  36. extracted_pages/page_0095.png +3 -0
  37. extracted_pages/page_0096.png +3 -0
  38. extracted_pages/page_0097.png +3 -0
  39. extracted_pages/page_0098.png +3 -0
  40. extracted_pages/page_0099.png +3 -0
  41. extracted_pages/page_0100.png +3 -0
  42. extracted_pages/page_0101.png +3 -0
  43. extracted_pages/page_0102.png +3 -0
  44. extracted_pages/page_0103.png +3 -0
  45. extracted_pages/page_0104.png +3 -0
  46. extracted_pages/page_0105.png +3 -0
  47. extracted_pages/page_0106.png +3 -0
  48. extracted_pages/page_0107.png +3 -0
  49. extracted_pages/page_0108.png +3 -0
  50. extracted_pages/page_0109.png +3 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ 中国手语.pdf filter=lfs diff=lfs merge=lfs -text
auto_batch_process.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import os
4
+ from pathlib import Path
5
+ import glob
6
+
7
+ def process_single_image(image_path):
8
+ """
9
+ Process a single image and return segmentation info
10
+
11
+ Args:
12
+ image_path (str): Path to the input image
13
+
14
+ Returns:
15
+ tuple: (segments, visualization_image)
16
+ """
17
+
18
+ # Read the image
19
+ img = cv2.imread(image_path)
20
+ if img is None:
21
+ print(f"Error: Could not read image from {image_path}")
22
+ return None, None
23
+
24
+ print(f"Processing: {Path(image_path).name}")
25
+
26
+ # Convert to grayscale
27
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
28
+
29
+ # Apply binary thresholding (binarization)
30
+ _, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
31
+
32
+ # Create morphological kernel for dilation (10px)
33
+ kernel = np.ones((10, 10), np.uint8)
34
+
35
+ # Apply dilation to expand black regions
36
+ dilated = cv2.dilate(binary, kernel, iterations=1)
37
+
38
+ # Create horizontal kernel for connecting broken lines (40px horizontal)
39
+ horizontal_kernel = np.ones((1, 40), np.uint8)
40
+
41
+ # Apply horizontal dilation to connect broken line segments
42
+ dilated_horizontal = cv2.dilate(dilated, horizontal_kernel, iterations=1)
43
+
44
+ # Get image dimensions
45
+ height, width = dilated_horizontal.shape
46
+
47
+ # Find lines where black pixels exceed 70% of width
48
+ cut_lines = []
49
+ threshold = width * 0.7
50
+
51
+ for y in range(height):
52
+ black_pixel_count = np.sum(dilated_horizontal[y, :] > 0)
53
+ if black_pixel_count >= threshold:
54
+ cut_lines.append(y)
55
+
56
+ # Group consecutive cut lines to find actual separation boundaries
57
+ # Also enforce minimum 600px distance between separation lines
58
+ separation_lines = []
59
+ if cut_lines:
60
+ current_group = [cut_lines[0]]
61
+
62
+ for i in range(1, len(cut_lines)):
63
+ if cut_lines[i] - cut_lines[i-1] <= 5: # Lines within 5 pixels are considered same group
64
+ current_group.append(cut_lines[i])
65
+ else:
66
+ # End of current group, add middle line
67
+ middle_line = current_group[len(current_group)//2]
68
+ separation_lines.append(middle_line)
69
+ current_group = [cut_lines[i]]
70
+
71
+ # Don't forget the last group
72
+ if current_group:
73
+ middle_line = current_group[len(current_group)//2]
74
+ separation_lines.append(middle_line)
75
+
76
+ # Filter separation lines to ensure minimum 600px distance
77
+ filtered_separation_lines = []
78
+ for line_y in separation_lines:
79
+ # Check if this line is at least 600px away from all previously accepted lines
80
+ valid = True
81
+ for prev_line in filtered_separation_lines:
82
+ if abs(line_y - prev_line) < 300:
83
+ valid = False
84
+ break
85
+
86
+ if valid:
87
+ filtered_separation_lines.append(line_y)
88
+
89
+ separation_lines = filtered_separation_lines
90
+
91
+ print(f"Found {len(separation_lines)} separation lines")
92
+
93
+ # Define segment boundaries
94
+ segments = []
95
+ start_y = 0
96
+
97
+ for line_y in separation_lines:
98
+ if line_y > start_y + 20: # Minimum segment height of 20 pixels
99
+ segments.append((start_y, line_y))
100
+ start_y = line_y + 1
101
+
102
+ # Add the last segment
103
+ if start_y < height - 20:
104
+ segments.append((start_y, height))
105
+
106
+ # Create visualization showing cut lines
107
+ visualization = img.copy()
108
+ for line_y in separation_lines:
109
+ cv2.line(visualization, (0, line_y), (width-1, line_y), (0, 0, 255), 3)
110
+
111
+ # Add text showing number of segments
112
+ cv2.putText(visualization, f'Segments: {len(segments)}', (10, 30),
113
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
114
+
115
+ return segments, visualization
116
+
117
+ def save_segments(image_path, segments, output_dir):
118
+ """
119
+ Save image segments
120
+
121
+ Args:
122
+ image_path (str): Original image path
123
+ segments (list): List of (start_y, end_y) tuples
124
+ output_dir (str): Output directory
125
+ """
126
+
127
+ img = cv2.imread(image_path)
128
+ base_name = Path(image_path).stem
129
+
130
+ os.makedirs(output_dir, exist_ok=True)
131
+
132
+ for i, (start_y, end_y) in enumerate(segments):
133
+ segment = img[start_y:end_y, :]
134
+ output_path = os.path.join(output_dir, f"{base_name}_segment_{i}.png")
135
+ cv2.imwrite(output_path, segment)
136
+
137
+ print(f"Saved {len(segments)} segments")
138
+
139
+ def auto_batch_process_images(input_dir, output_dir="segmented_images"):
140
+ """
141
+ Automatically batch process all images in the input directory without manual approval
142
+
143
+ Args:
144
+ input_dir (str): Directory containing input images
145
+ output_dir (str): Directory to save output segments
146
+ """
147
+
148
+ # Find all PNG files in the input directory
149
+ image_files = glob.glob(os.path.join(input_dir, "*.png"))
150
+ image_files.sort() # Sort to process in order
151
+
152
+ if not image_files:
153
+ print(f"No PNG files found in {input_dir}")
154
+ return
155
+
156
+ print(f"Found {len(image_files)} images to process")
157
+ print(f"Processing automatically without manual approval...")
158
+ print("=" * 50)
159
+
160
+ # Statistics
161
+ processed_count = 0
162
+ failed_count = 0
163
+ total_segments = 0
164
+
165
+ for i, image_path in enumerate(image_files, 1):
166
+ filename = Path(image_path).name
167
+ print(f"\nProcessing {i}/{len(image_files)}: {filename}")
168
+
169
+ # Process the image
170
+ segments, visualization = process_single_image(image_path)
171
+
172
+ if segments is None:
173
+ print(f"Failed to process {image_path}")
174
+ failed_count += 1
175
+ continue
176
+
177
+ if len(segments) == 0:
178
+ print(f"No segments found in {image_path}")
179
+ failed_count += 1
180
+ continue
181
+
182
+ # Automatically save segments
183
+ save_segments(image_path, segments, output_dir)
184
+ processed_count += 1
185
+ total_segments += len(segments)
186
+
187
+ print(f"Successfully processed with {len(segments)} segments")
188
+
189
+ print("\n" + "=" * 50)
190
+ print("Automatic batch processing complete!")
191
+ print(f"Total images: {len(image_files)}")
192
+ print(f"Successfully processed: {processed_count}")
193
+ print(f"Failed: {failed_count}")
194
+ print(f"Total segments created: {total_segments}")
195
+ print(f"Output directory: {output_dir}")
196
+
197
+ def main():
198
+ # Set input directory to the extracted pages
199
+ input_dir = "/data/scientific_research/sign_language/extracted_pages"
200
+ output_dir = "segmented_images"
201
+
202
+ if not os.path.exists(input_dir):
203
+ print(f"Error: Input directory {input_dir} not found")
204
+ return
205
+
206
+ print("Starting automatic batch image processing...")
207
+ print(f"Input directory: {input_dir}")
208
+ print(f"Output directory: {output_dir}")
209
+
210
+ auto_batch_process_images(input_dir, output_dir)
211
+
212
+ if __name__ == "__main__":
213
+ main()
batch_process_images.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import os
4
+ from pathlib import Path
5
+ import glob
6
+
7
+ def process_single_image(image_path):
8
+ """
9
+ Process a single image and return segmentation info
10
+
11
+ Args:
12
+ image_path (str): Path to the input image
13
+
14
+ Returns:
15
+ tuple: (segments, visualization_image)
16
+ """
17
+
18
+ # Read the image
19
+ img = cv2.imread(image_path)
20
+ if img is None:
21
+ print(f"Error: Could not read image from {image_path}")
22
+ return None, None
23
+
24
+ print(f"Processing: {Path(image_path).name}")
25
+
26
+ # Convert to grayscale
27
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
28
+
29
+ # Apply binary thresholding (binarization)
30
+ _, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
31
+
32
+ # Create morphological kernel for dilation (10px)
33
+ kernel = np.ones((10, 10), np.uint8)
34
+
35
+ # Apply dilation to expand black regions
36
+ dilated = cv2.dilate(binary, kernel, iterations=1)
37
+
38
+ # Create horizontal kernel for connecting broken lines (40px horizontal)
39
+ horizontal_kernel = np.ones((1, 40), np.uint8)
40
+
41
+ # Apply horizontal dilation to connect broken line segments
42
+ dilated_horizontal = cv2.dilate(dilated, horizontal_kernel, iterations=1)
43
+
44
+ # Get image dimensions
45
+ height, width = dilated_horizontal.shape
46
+
47
+ # Find lines where black pixels exceed 70% of width
48
+ cut_lines = []
49
+ threshold = width * 0.7
50
+
51
+ for y in range(height):
52
+ black_pixel_count = np.sum(dilated_horizontal[y, :] > 0)
53
+ if black_pixel_count >= threshold:
54
+ cut_lines.append(y)
55
+
56
+ # Group consecutive cut lines to find actual separation boundaries
57
+ # Also enforce minimum 600px distance between separation lines
58
+ separation_lines = []
59
+ if cut_lines:
60
+ current_group = [cut_lines[0]]
61
+
62
+ for i in range(1, len(cut_lines)):
63
+ if cut_lines[i] - cut_lines[i-1] <= 5: # Lines within 5 pixels are considered same group
64
+ current_group.append(cut_lines[i])
65
+ else:
66
+ # End of current group, add middle line
67
+ middle_line = current_group[len(current_group)//2]
68
+ separation_lines.append(middle_line)
69
+ current_group = [cut_lines[i]]
70
+
71
+ # Don't forget the last group
72
+ if current_group:
73
+ middle_line = current_group[len(current_group)//2]
74
+ separation_lines.append(middle_line)
75
+
76
+ # Filter separation lines to ensure minimum 600px distance
77
+ filtered_separation_lines = []
78
+ for line_y in separation_lines:
79
+ # Check if this line is at least 600px away from all previously accepted lines
80
+ valid = True
81
+ for prev_line in filtered_separation_lines:
82
+ if abs(line_y - prev_line) < 300:
83
+ valid = False
84
+ break
85
+
86
+ if valid:
87
+ filtered_separation_lines.append(line_y)
88
+
89
+ separation_lines = filtered_separation_lines
90
+
91
+ print(f"Found {len(separation_lines)} separation lines")
92
+
93
+ # Define segment boundaries
94
+ segments = []
95
+ start_y = 0
96
+
97
+ for line_y in separation_lines:
98
+ if line_y > start_y + 20: # Minimum segment height of 20 pixels
99
+ segments.append((start_y, line_y))
100
+ start_y = line_y + 1
101
+
102
+ # Add the last segment
103
+ if start_y < height - 20:
104
+ segments.append((start_y, height))
105
+
106
+ # Create visualization showing cut lines
107
+ visualization = img.copy()
108
+ for line_y in separation_lines:
109
+ cv2.line(visualization, (0, line_y), (width-1, line_y), (0, 0, 255), 3)
110
+
111
+ # Add text showing number of segments
112
+ cv2.putText(visualization, f'Segments: {len(segments)}', (10, 30),
113
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
114
+
115
+ return segments, visualization
116
+
117
+ def save_segments(image_path, segments, output_dir, add_error_suffix=False):
118
+ """
119
+ Save image segments
120
+
121
+ Args:
122
+ image_path (str): Original image path
123
+ segments (list): List of (start_y, end_y) tuples
124
+ output_dir (str): Output directory
125
+ add_error_suffix (bool): Whether to add 'err' suffix
126
+ """
127
+
128
+ img = cv2.imread(image_path)
129
+ base_name = Path(image_path).stem
130
+
131
+ os.makedirs(output_dir, exist_ok=True)
132
+
133
+ for i, (start_y, end_y) in enumerate(segments):
134
+ segment = img[start_y:end_y, :]
135
+
136
+ if add_error_suffix:
137
+ output_path = os.path.join(output_dir, f"{base_name}_segment_{i}_err.png")
138
+ else:
139
+ output_path = os.path.join(output_dir, f"{base_name}_segment_{i}.png")
140
+
141
+ cv2.imwrite(output_path, segment)
142
+
143
+ suffix = "_err" if add_error_suffix else ""
144
+ print(f"Saved {len(segments)} segments with suffix '{suffix}'")
145
+
146
+ def batch_process_images(input_dir, output_dir="batch_segmented_images"):
147
+ """
148
+ Batch process all images in the input directory with navigation support
149
+
150
+ Args:
151
+ input_dir (str): Directory containing input images
152
+ output_dir (str): Directory to save output segments
153
+ """
154
+
155
+ # Find all PNG files in the input directory
156
+ image_files = glob.glob(os.path.join(input_dir, "*.png"))
157
+ image_files.sort() # Sort to process in order
158
+
159
+ if not image_files:
160
+ print(f"No PNG files found in {input_dir}")
161
+ return
162
+
163
+ print(f"Found {len(image_files)} images to process")
164
+ print("Controls:")
165
+ print(" SPACE: Accept and save segments normally")
166
+ print(" Any other key: Save segments with '_err' suffix")
167
+ print(" UP ARROW: Go to previous image")
168
+ print(" DOWN ARROW: Go to next image")
169
+ print(" ESC: Exit")
170
+ print("=" * 50)
171
+
172
+ # Cache processed results to avoid reprocessing when navigating
173
+ processed_cache = {}
174
+
175
+ # Statistics
176
+ results = {} # image_path -> "accepted" | "error" | "skipped" | None
177
+
178
+ current_index = 0
179
+
180
+ while current_index < len(image_files):
181
+ image_path = image_files[current_index]
182
+ filename = Path(image_path).name
183
+
184
+ print(f"\nViewing {current_index + 1}/{len(image_files)}: {filename}")
185
+
186
+ # Check if already processed
187
+ if image_path in processed_cache:
188
+ segments, visualization = processed_cache[image_path]
189
+ else:
190
+ # Process the image
191
+ segments, visualization = process_single_image(image_path)
192
+
193
+ if segments is None:
194
+ print(f"Failed to process {image_path}")
195
+ current_index += 1
196
+ continue
197
+
198
+ if len(segments) == 0:
199
+ print(f"No segments found in {image_path}")
200
+ current_index += 1
201
+ continue
202
+
203
+ # Cache the result
204
+ processed_cache[image_path] = (segments, visualization)
205
+
206
+ # Resize visualization for display if too large
207
+ display_viz = visualization.copy()
208
+ height, width = display_viz.shape[:2]
209
+ if height > 800:
210
+ scale = 800 / height
211
+ new_width = int(width * scale)
212
+ display_viz = cv2.resize(display_viz, (new_width, 800))
213
+
214
+ # Add navigation info to display
215
+ nav_text = f"[{current_index + 1}/{len(image_files)}] {filename}"
216
+ status_text = ""
217
+ if image_path in results:
218
+ if results[image_path] == "accepted":
219
+ status_text = " [ACCEPTED]"
220
+ elif results[image_path] == "error":
221
+ status_text = " [ERROR]"
222
+ elif results[image_path] == "skipped":
223
+ status_text = " [SKIPPED]"
224
+
225
+ cv2.putText(display_viz, nav_text + status_text, (10, height - 20 if height <= 800 else 780),
226
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
227
+
228
+ # Show the visualization with cut lines
229
+ window_title = 'SPACE=Accept | Other=Error | UP/DOWN=Navigate | ESC=Exit'
230
+ cv2.imshow(window_title, display_viz)
231
+
232
+ # Wait for user input
233
+ key = cv2.waitKey(0) & 0xFF
234
+
235
+ if key == 27: # ESC key - exit
236
+ print("Exiting...")
237
+ break
238
+ elif key == 82 or key == 0: # UP arrow key (82 on Linux, 0 on some systems)
239
+ if current_index > 0:
240
+ current_index -= 1
241
+ print("Going to previous image")
242
+ else:
243
+ print("Already at first image")
244
+ elif key == 84 or key == 1: # DOWN arrow key (84 on Linux, 1 on some systems)
245
+ current_index += 1
246
+ print("Going to next image")
247
+ elif key == 32: # SPACE key - accept
248
+ print("Accepted - saving normal segments")
249
+ save_segments(image_path, segments, output_dir, add_error_suffix=False)
250
+ results[image_path] = "accepted"
251
+ current_index += 1
252
+ else:
253
+ print(f"Marked as error - saving with '_err' suffix (key code: {key})")
254
+ save_segments(image_path, segments, output_dir, add_error_suffix=True)
255
+ results[image_path] = "error"
256
+ current_index += 1
257
+
258
+ cv2.destroyAllWindows()
259
+
260
+ # Count final results
261
+ accepted_count = sum(1 for v in results.values() if v == "accepted")
262
+ error_count = sum(1 for v in results.values() if v == "error")
263
+ skipped_count = len(image_files) - len(results)
264
+
265
+ print("\n" + "=" * 50)
266
+ print("Batch processing complete!")
267
+ print(f"Total images: {len(image_files)}")
268
+ print(f"Accepted: {accepted_count}")
269
+ print(f"Marked as error: {error_count}")
270
+ print(f"Skipped: {skipped_count}")
271
+ print(f"Output directory: {output_dir}")
272
+
273
+ def main():
274
+ # Set input directory to the extracted pages
275
+ input_dir = "/data/scientific_research/sign_language/extracted_pages"
276
+ output_dir = "batch_segmented_images"
277
+
278
+ if not os.path.exists(input_dir):
279
+ print(f"Error: Input directory {input_dir} not found")
280
+ return
281
+
282
+ print("Starting batch image processing...")
283
+ print(f"Input directory: {input_dir}")
284
+ print(f"Output directory: {output_dir}")
285
+
286
+ batch_process_images(input_dir, output_dir)
287
+
288
+ if __name__ == "__main__":
289
+ main()
extract_pages.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ from pdf2image import convert_from_path
4
+
5
+ def extract_pages_to_png(pdf_path, start_page, end_page, output_dir="extracted_pages"):
6
+ """
7
+ Extract pages from PDF and save as high-quality PNG images
8
+ """
9
+ # Create output directory if it doesn't exist
10
+ os.makedirs(output_dir, exist_ok=True)
11
+
12
+ print(f"Extracting pages {start_page}-{end_page} from {pdf_path}")
13
+ print(f"Output directory: {output_dir}")
14
+
15
+ # Convert PDF pages to images with high DPI for quality
16
+ # Extract in batches to manage memory usage
17
+ batch_size = 50
18
+ total_pages = end_page - start_page + 1
19
+
20
+ for batch_start in range(start_page, end_page + 1, batch_size):
21
+ batch_end = min(batch_start + batch_size - 1, end_page)
22
+
23
+ print(f"Processing batch: pages {batch_start}-{batch_end}")
24
+
25
+ try:
26
+ # Convert pages with high DPI (300 DPI for high quality)
27
+ images = convert_from_path(
28
+ pdf_path,
29
+ dpi=300,
30
+ first_page=batch_start,
31
+ last_page=batch_end,
32
+ fmt='PNG'
33
+ )
34
+
35
+ # Save each image
36
+ for i, image in enumerate(images):
37
+ page_num = batch_start + i
38
+ output_path = os.path.join(output_dir, f"page_{page_num:04d}.png")
39
+ image.save(output_path, "PNG", optimize=True)
40
+ print(f"Saved: {output_path}")
41
+
42
+ except Exception as e:
43
+ print(f"Error processing batch {batch_start}-{batch_end}: {e}")
44
+ continue
45
+
46
+ print(f"Extraction complete! Total pages processed: {total_pages}")
47
+
48
+ if __name__ == "__main__":
49
+ pdf_file = "中国手语.pdf"
50
+ start_page = 665
51
+ end_page = 1089
52
+
53
+ extract_pages_to_png(pdf_file, start_page, end_page)
extracted_pages/page_0064.png ADDED

Git LFS Details

  • SHA256: e5c1e3d1582af76427a00594d0d4b52aecda9acc45b0a24b302393348ca7dbbb
  • Pointer size: 131 Bytes
  • Size of remote file: 249 kB
extracted_pages/page_0065.png ADDED

Git LFS Details

  • SHA256: c8dd3205b1420d38ad283a1c9166547683a0904b815a60a755825d703edc6b0e
  • Pointer size: 131 Bytes
  • Size of remote file: 276 kB
extracted_pages/page_0066.png ADDED

Git LFS Details

  • SHA256: 8edda3e637aaff49f1207d666bb0987dea672a31a275f6958b64b7907f130b5d
  • Pointer size: 131 Bytes
  • Size of remote file: 304 kB
extracted_pages/page_0067.png ADDED

Git LFS Details

  • SHA256: db685109b0973a7e9f9fc134f45c7535cdfc1e69c2777506acc7e0d4873dc45b
  • Pointer size: 131 Bytes
  • Size of remote file: 253 kB
extracted_pages/page_0068.png ADDED

Git LFS Details

  • SHA256: bd7987a32c9cd899ad3469f561088d06368d025b2fc5438c9c5cf8c822404390
  • Pointer size: 131 Bytes
  • Size of remote file: 316 kB
extracted_pages/page_0069.png ADDED

Git LFS Details

  • SHA256: 04bff0b4ec8d3698d597e9c4b1e6bd6a72ca9fa9f179e3ddc6aaf8e8e42ad560
  • Pointer size: 131 Bytes
  • Size of remote file: 298 kB
extracted_pages/page_0070.png ADDED

Git LFS Details

  • SHA256: e1314379f0d1324bf87b4e6539ea452b11e20b9bf24fbedee870369d5fe66d61
  • Pointer size: 131 Bytes
  • Size of remote file: 307 kB
extracted_pages/page_0071.png ADDED

Git LFS Details

  • SHA256: 3f136610e45a000ea7172121ff4b465afd8d696e88f9402633481e2aac4a19af
  • Pointer size: 131 Bytes
  • Size of remote file: 384 kB
extracted_pages/page_0072.png ADDED

Git LFS Details

  • SHA256: 278aa455b700ee33d54579b04f545e3a53fc87e9358b8870301147210ab9c0f7
  • Pointer size: 131 Bytes
  • Size of remote file: 292 kB
extracted_pages/page_0073.png ADDED

Git LFS Details

  • SHA256: 4134544044c0cea83cbd91f24384e6b093d6f8a69caa7caf0d3f48a2e1361b62
  • Pointer size: 131 Bytes
  • Size of remote file: 368 kB
extracted_pages/page_0074.png ADDED

Git LFS Details

  • SHA256: cc9e183d1f7bee1f0b15d4ebf56df1c7f11854193cb2ddb50c5785ab202a78e8
  • Pointer size: 131 Bytes
  • Size of remote file: 376 kB
extracted_pages/page_0075.png ADDED

Git LFS Details

  • SHA256: 36b74fcf50dfdca83867335150a40081ca76976c1ce2417db3dfb3a0d988aea3
  • Pointer size: 131 Bytes
  • Size of remote file: 313 kB
extracted_pages/page_0076.png ADDED

Git LFS Details

  • SHA256: fb0e029a57ead0ebbd722ab04c0cbe9c4b7f6bdf2faf5462187620cf589fb8d8
  • Pointer size: 131 Bytes
  • Size of remote file: 380 kB
extracted_pages/page_0077.png ADDED

Git LFS Details

  • SHA256: 820bc979f931e1656d2d698f02a8304918b521a158de1de4bab06277a9a2d783
  • Pointer size: 131 Bytes
  • Size of remote file: 398 kB
extracted_pages/page_0078.png ADDED

Git LFS Details

  • SHA256: e154f460be0d1ed22aeebfe0ab82bfc8e61fc3db66fe040f48c2147e1025078e
  • Pointer size: 131 Bytes
  • Size of remote file: 460 kB
extracted_pages/page_0079.png ADDED

Git LFS Details

  • SHA256: 58ea828fdfd2cb0c87426f29e16ec8445af11a1914ff4c1427a6f1abc654a5b2
  • Pointer size: 131 Bytes
  • Size of remote file: 381 kB
extracted_pages/page_0080.png ADDED

Git LFS Details

  • SHA256: 9b0d9f4ed9bc5eab7116d04da58395ac13ca310b77098fa557551446e01e13ad
  • Pointer size: 131 Bytes
  • Size of remote file: 429 kB
extracted_pages/page_0081.png ADDED

Git LFS Details

  • SHA256: a839f169bfc5535efe6b655eb8a0a0b3c3b9354c8a21cd84523177af5fa372e0
  • Pointer size: 131 Bytes
  • Size of remote file: 391 kB
extracted_pages/page_0082.png ADDED

Git LFS Details

  • SHA256: 04e0aa7e8e5db374bf0a0447d26c7983488dbdd13aa702ae216c4eb377d4eeee
  • Pointer size: 131 Bytes
  • Size of remote file: 391 kB
extracted_pages/page_0083.png ADDED

Git LFS Details

  • SHA256: cc68239265260b02773532067cf5c1dcf6c2d8016ec71cd9b19cb6da96a65580
  • Pointer size: 131 Bytes
  • Size of remote file: 400 kB
extracted_pages/page_0084.png ADDED

Git LFS Details

  • SHA256: bec07e945b39282883fcccb3fa023adee53c844c4a2385f563ba9f79575efbe3
  • Pointer size: 131 Bytes
  • Size of remote file: 376 kB
extracted_pages/page_0085.png ADDED

Git LFS Details

  • SHA256: ba82fe70b9c4906f59180a8e65fd44f5c4772876cdf0e3fe7cb86bae89349cf3
  • Pointer size: 131 Bytes
  • Size of remote file: 427 kB
extracted_pages/page_0086.png ADDED

Git LFS Details

  • SHA256: ebd5fe39004fcc75a7e9392dc981919df2bea88f12e0bba76cd1d171786eea7a
  • Pointer size: 131 Bytes
  • Size of remote file: 359 kB
extracted_pages/page_0087.png ADDED

Git LFS Details

  • SHA256: 3ed03a8b38db82422b5909d9b2eee688543f038b7be27480f0c8af55d48c91e5
  • Pointer size: 131 Bytes
  • Size of remote file: 471 kB
extracted_pages/page_0088.png ADDED

Git LFS Details

  • SHA256: e6a79551104369e2fce6446084e0859b28e1ad412050ea1268f110674c39ee0c
  • Pointer size: 131 Bytes
  • Size of remote file: 423 kB
extracted_pages/page_0089.png ADDED

Git LFS Details

  • SHA256: 136f4a15fded2c937683d7ac817cc0ad18636ab4941c9b4881d38ef0547d6cbf
  • Pointer size: 131 Bytes
  • Size of remote file: 504 kB
extracted_pages/page_0090.png ADDED

Git LFS Details

  • SHA256: 84ae7e1b6d87fec54d6bc825dd1a12fbe444873be41e7815038f0288a55446e9
  • Pointer size: 131 Bytes
  • Size of remote file: 396 kB
extracted_pages/page_0091.png ADDED

Git LFS Details

  • SHA256: d55cb5a26fc784af911d6db0b20ba98689dea25f39fda81b6a2b292919ef8187
  • Pointer size: 131 Bytes
  • Size of remote file: 439 kB
extracted_pages/page_0092.png ADDED

Git LFS Details

  • SHA256: 74dbece885a65165f6f2d3f01c24e89d0f6cbac2a50c0f099b28427c33a889c9
  • Pointer size: 131 Bytes
  • Size of remote file: 392 kB
extracted_pages/page_0093.png ADDED

Git LFS Details

  • SHA256: b9c130d41907d94ad8eb8a8ee6e839efa15af694ec638dbe6612aeda908308be
  • Pointer size: 131 Bytes
  • Size of remote file: 456 kB
extracted_pages/page_0094.png ADDED

Git LFS Details

  • SHA256: 829e4b238f6208941602b8fee7c16b36d8b463f8522d76d3cafd7492c59514f1
  • Pointer size: 131 Bytes
  • Size of remote file: 391 kB
extracted_pages/page_0095.png ADDED

Git LFS Details

  • SHA256: b55612b64763db533f5344a72c3d00eab87d5d2a75dae271c95fb11d776d1443
  • Pointer size: 131 Bytes
  • Size of remote file: 445 kB
extracted_pages/page_0096.png ADDED

Git LFS Details

  • SHA256: 319f7741f9e6f0ffd2d9275b296e7c45d52ba425e103b58b0e48c6134bceb76c
  • Pointer size: 131 Bytes
  • Size of remote file: 311 kB
extracted_pages/page_0097.png ADDED

Git LFS Details

  • SHA256: c1336bbb02c32f2e0418339d7b94d37d27ac8600b5be8a2820a925febd710885
  • Pointer size: 131 Bytes
  • Size of remote file: 480 kB
extracted_pages/page_0098.png ADDED

Git LFS Details

  • SHA256: c23df4113bf9e15a603255d40b1f6998d2675d4ca8a5304a8a6df49b024b3b56
  • Pointer size: 131 Bytes
  • Size of remote file: 416 kB
extracted_pages/page_0099.png ADDED

Git LFS Details

  • SHA256: a9dbc3b991096fec284afc7aa40ef84bda3ff93c29149f52b1223ca52df46e23
  • Pointer size: 131 Bytes
  • Size of remote file: 397 kB
extracted_pages/page_0100.png ADDED

Git LFS Details

  • SHA256: 2491dca4cb256fc10c83f0e712e7b3e80c28fd1d6a1e40947cd64445f3f5d3e1
  • Pointer size: 131 Bytes
  • Size of remote file: 411 kB
extracted_pages/page_0101.png ADDED

Git LFS Details

  • SHA256: 3e78d8e7380f32cb5a931a05ec52211b2d61415b1f2bdb39f568a634a2447740
  • Pointer size: 131 Bytes
  • Size of remote file: 459 kB
extracted_pages/page_0102.png ADDED

Git LFS Details

  • SHA256: 9a72861f76148aef6300a1ae79a652cc40e88ac40a0ca3784088d53bc2182a39
  • Pointer size: 131 Bytes
  • Size of remote file: 372 kB
extracted_pages/page_0103.png ADDED

Git LFS Details

  • SHA256: 692702b785b850bf7b441c62eed3c26269d4e18e175510ad88e21a2bee66a754
  • Pointer size: 131 Bytes
  • Size of remote file: 469 kB
extracted_pages/page_0104.png ADDED

Git LFS Details

  • SHA256: e47c95946085088ae963bb28f64f0f0134274399ba684528cb15102aa78c38c4
  • Pointer size: 131 Bytes
  • Size of remote file: 321 kB
extracted_pages/page_0105.png ADDED

Git LFS Details

  • SHA256: 0ebc5372c2bd3e6891e6381fc4867249c9a9fcaaa01c4bd9bb41fa35c7b40331
  • Pointer size: 131 Bytes
  • Size of remote file: 408 kB
extracted_pages/page_0106.png ADDED

Git LFS Details

  • SHA256: 6d5b235283e1759244673991cc16abe3a8e6cbb2a587dcc5ceaf1393b843b8ba
  • Pointer size: 131 Bytes
  • Size of remote file: 328 kB
extracted_pages/page_0107.png ADDED

Git LFS Details

  • SHA256: 231958b342a3b9d1e24df9c0b685f6a6b65065e238d04a33d19e4e6b3c216a81
  • Pointer size: 131 Bytes
  • Size of remote file: 441 kB
extracted_pages/page_0108.png ADDED

Git LFS Details

  • SHA256: 51e00ceab89b1f7efc3139090375f0dcf7d04f797403a07eb81e7ccf3402c9fd
  • Pointer size: 131 Bytes
  • Size of remote file: 407 kB
extracted_pages/page_0109.png ADDED

Git LFS Details

  • SHA256: 7f75ab775c0718cd5e21654b469f4076bbee4816987bf24163a7eb84fd76a3d6
  • Pointer size: 131 Bytes
  • Size of remote file: 427 kB