sreepathi-ravikumar commited on
Commit
68b396b
·
verified ·
1 Parent(s): e0ae544

Update rust_highlight/src/lib.rs

Browse files
Files changed (1) hide show
  1. rust_highlight/src/lib.rs +120 -147
rust_highlight/src/lib.rs CHANGED
@@ -1,10 +1,7 @@
1
  // rust_highlight/src/lib.rs
2
-
3
-
4
-
5
  use pyo3::prelude::*;
6
  use pyo3::types::PyModule;
7
- use opencv::core::{Mat, Point, Scalar, CV_8UC3};
8
  use opencv::imgproc::{circle, get_text_size, line, put_text, HersheyFonts, LineTypes};
9
  use opencv::prelude::*;
10
  use std::process::{Command, Stdio};
@@ -12,6 +9,7 @@ use std::io::Write;
12
  use std::time::Instant;
13
  use std::path::Path;
14
  use std::f64::consts::PI;
 
15
 
16
  #[pyfunction]
17
  fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f64, clips_dir: String) -> PyResult<Option<String>> {
@@ -21,7 +19,7 @@ fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f6
21
 
22
  let skip_spaces = false;
23
  let fps: f64 = 30.0;
24
- let animation_frames_per_char: usize = 2;
25
  let width: i32 = 1280;
26
  let height: i32 = 720;
27
  let margin_x: i32 = 40;
@@ -32,21 +30,22 @@ fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f6
32
  let header_font_scale: f64 = 2.0;
33
  let default_thickness: i32 = 2;
34
  let header_thickness: i32 = 3;
35
- let default_text_color = Scalar::new(0.0, 0.0, 0.0, 0.0); // BGR Black
36
- let header_text_color = Scalar::new(255.0, 0.0, 0.0, 0.0); // BGR Blue
37
- let bg_color = Scalar::new(255.0, 255.0, 255.0, 0.0); // BGR White
38
  let ffmpeg_preset = "ultrafast";
39
  let crf = "28";
40
- // Pen settings
41
- let pen_color = Scalar::new(0.0, 0.0, 255.0, 0.0); // BGR Red
42
  let pen_tip_radius: i32 = 5;
43
  let pen_length: i32 = 20;
44
  let pen_thickness: i32 = 2;
45
  let pen_base_angle: i32 = 45;
46
  let pen_movement_amplitude: i32 = 10;
47
 
48
- let silent_video_name = format!("silent_video{}.mp4", id);
49
- let silent_video_path = format!("{}/{}", clips_dir, silent_video_name);
 
 
50
  let final_video_name = format!("clip{}.mp4", id);
51
  let final_video_path = format!("{}/{}", clips_dir, final_video_name);
52
 
@@ -67,26 +66,19 @@ fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f6
67
  };
68
 
69
  let total_glyphs = visible_indices.len();
70
- println!("Wrapped lines: {} lines, total glyphs (counted): {}", wrapped_lines.len(), total_glyphs);
71
  if total_glyphs == 0 {
72
  println!("No text to animate.");
73
  return Ok(None);
74
  }
75
 
76
- // Calculate animation duration (1/4 of audio duration)
77
  let animation_duration = duration / 4.0;
78
  let static_duration = duration - animation_duration;
79
 
80
- // Calculate total frames needed
81
- let animation_frames = total_glyphs * animation_frames_per_char;
82
- let animation_total_frames = (animation_duration * fps) as usize;
83
- let static_frames = (static_duration * fps) as usize;
84
-
85
- println!("Animation duration: {:.3}s ({} frames), Static duration: {:.3}s ({} frames)",
86
- animation_duration, animation_total_frames, static_duration, static_frames);
87
 
88
  // Pre-calc line heights and y_positions
89
- let mut line_heights: Vec<i32> = Vec::new();
90
  let mut y_positions: Vec<i32> = Vec::new();
91
  let mut y = margin_y;
92
  for (i, line) in wrapped_lines.iter().enumerate() {
@@ -98,43 +90,21 @@ fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f6
98
  let size = get_text_size(&line_for_size, font, font_scale, thickness, &mut base_line).unwrap();
99
  let h = size.height;
100
  let lh = h + base_line + line_spacing;
101
- line_heights.push(lh);
102
  y_positions.push(y);
103
  y += lh;
104
  }
105
 
106
- // FFmpeg for silent video
107
- let mut child = Command::new("ffmpeg")
108
- .arg("-y")
109
- .arg("-f").arg("rawvideo")
110
- .arg("-pix_fmt").arg("bgr24")
111
- .arg("-s").arg(format!("{}x{}", width, height))
112
- .arg("-r").arg(fps.to_string())
113
- .arg("-i").arg("-")
114
- .arg("-an")
115
- .arg("-c:v").arg("libx264")
116
- .arg("-preset").arg(ffmpeg_preset)
117
- .arg("-crf").arg(crf)
118
- .arg("-pix_fmt").arg("yuv420p")
119
- .arg(&silent_video_path)
120
- .stdin(Stdio::piped())
121
- .spawn()
122
- .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to spawn FFmpeg: {}", e)))?;
123
-
124
- let mut stdin = child.stdin.take().unwrap();
125
-
126
  let t0 = Instant::now();
127
- let mut frames_sent: usize = 0;
128
- let mut prev_visible_sub = String::new();
129
 
130
- // Pre-render base frame with full text (optimization)
131
- let base_frame = render_frame(
 
132
  &full_text,
133
  -1,
134
  0,
135
  0.0,
136
- width as i32,
137
- height as i32,
138
  &line_styles,
139
  &y_positions,
140
  margin_x,
@@ -154,13 +124,20 @@ fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f6
154
  pen_movement_amplitude,
155
  )?;
156
 
157
- // Buffer for batch writing
158
- let frame_size = (width * height * 3) as usize;
159
- let mut buffer: Vec<u8> = Vec::with_capacity(frame_size * 100);
 
 
 
 
 
 
 
 
160
 
161
- // PHASE 1: Animation phase (first 1/4 of duration)
162
  for &idx_in_full in visible_indices.iter() {
163
- let visible_sub = &full_text[0..=idx_in_full];
164
  if visible_sub != prev_visible_sub {
165
  let lines: Vec<&str> = visible_sub.split('\n').collect();
166
  let last_line = lines.last().unwrap();
@@ -176,121 +153,123 @@ fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f6
176
  let pen_y = y_positions[line_idx] + h / 2;
177
 
178
  for anim_step in 0..animation_frames_per_char {
179
- let anim_offset = (anim_step as f64) / (animation_frames_per_char as f64);
180
- let frame_img = render_frame(
181
- visible_sub,
182
- pen_x,
183
- pen_y,
184
- anim_offset,
185
- width as i32,
186
- height as i32,
187
- &line_styles,
188
- &y_positions,
189
- margin_x,
190
- font,
191
- default_font_scale,
192
- header_font_scale,
193
- default_thickness,
194
- header_thickness,
195
- default_text_color,
196
- header_text_color,
197
- bg_color,
198
- pen_color,
199
- pen_tip_radius,
200
- pen_length,
201
- pen_thickness,
202
- pen_base_angle,
203
- pen_movement_amplitude,
204
- )?;
205
-
206
- buffer.extend_from_slice(frame_img.data_bytes().unwrap());
207
- frames_sent += 1;
208
-
209
- // Flush buffer every 100 frames
210
- if buffer.len() >= frame_size * 100 {
211
- stdin.write_all(&buffer)
212
- .map_err(|e| pyo3::exceptions::PyIOError::new_err(format!("Failed to write frames: {}", e)))?;
213
- buffer.clear();
214
- }
215
  }
216
- prev_visible_sub = visible_sub.to_string();
217
  }
218
  }
219
 
220
- // Flush remaining animation frames
221
- if !buffer.is_empty() {
222
- stdin.write_all(&buffer)
223
- .map_err(|e| pyo3::exceptions::PyIOError::new_err(format!("Failed to write frames: {}", e)))?;
224
- buffer.clear();
225
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
- // PHASE 2: Static phase (remaining 3/4 of duration)
228
- // Reuse the pre-rendered base frame
229
- println!("Animation complete. Rendering {} static frames...", static_frames);
230
-
231
- let base_frame_bytes = base_frame.data_bytes().unwrap();
232
-
233
- // Write static frames in batches
234
- let batch_size = 300;
235
- let full_batches = static_frames / batch_size;
236
- let remainder = static_frames % batch_size;
237
-
238
- for _ in 0..full_batches {
239
- for _ in 0..batch_size {
240
- buffer.extend_from_slice(base_frame_bytes);
241
- }
242
- stdin.write_all(&buffer)
243
- .map_err(|e| pyo3::exceptions::PyIOError::new_err(format!("Failed to write static frames: {}", e)))?;
244
- buffer.clear();
245
- frames_sent += batch_size;
246
  }
247
 
248
- // Write remaining frames
249
- if remainder > 0 {
250
- for _ in 0..remainder {
251
- buffer.extend_from_slice(base_frame_bytes);
252
- }
253
- stdin.write_all(&buffer)
254
- .map_err(|e| pyo3::exceptions::PyIOError::new_err(format!("Failed to write static frames: {}", e)))?;
255
- frames_sent += remainder;
256
- }
257
 
258
  drop(stdin);
259
- child.wait().map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("FFmpeg failed: {}", e)))?;
260
 
261
- let elapsed = t0.elapsed().as_secs_f64();
262
- println!("Total frames sent: {}, elapsed time: {:.3} seconds", frames_sent, elapsed);
263
 
264
- if !Path::new(&silent_video_path).exists() {
265
- println!("Silent video generation failed.");
266
- return Ok(None);
267
- }
268
 
269
- let rendered_duration = frames_sent as f64 / fps;
270
- println!("Rendered video duration: {:.3}s, Target audio duration: {:.3}s", rendered_duration, duration);
 
 
 
 
 
271
 
272
- // Combine with audio (no speed adjustment needed as we matched the duration)
273
  let mut combine_child = Command::new("ffmpeg")
274
  .arg("-y")
275
- .arg("-i").arg(&silent_video_path)
 
 
276
  .arg("-i").arg(&audio_path)
 
 
 
277
  .arg("-c:v").arg("libx264")
278
  .arg("-preset").arg("ultrafast")
279
  .arg("-crf").arg("28")
280
  .arg("-pix_fmt").arg("yuv420p")
281
  .arg("-c:a").arg("aac")
282
  .arg("-shortest")
283
- .arg("-map").arg("0:v:0")
284
- .arg("-map").arg("1:a:0")
285
  .arg(&final_video_path)
286
  .spawn()
287
  .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to spawn FFmpeg for combine: {}", e)))?;
288
 
289
  combine_child.wait().map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("FFmpeg combine failed: {}", e)))?;
290
 
291
- // Clean up
292
- std::fs::remove_file(&silent_video_path)
293
- .map_err(|e| pyo3::exceptions::PyIOError::new_err(format!("Failed to remove silent video: {}", e)))?;
 
 
 
294
 
295
  Ok(Some(final_video_path))
296
  }
@@ -402,7 +381,6 @@ fn render_frame(
402
  }
403
  }
404
 
405
- // Only draw pen if pen_x > 0
406
  if pen_x > 0 {
407
  let offset_y = (pen_movement_amplitude as f64 * (anim_offset * PI).sin()) as i32;
408
  let pen_tip_y = pen_y + offset_y;
@@ -422,9 +400,4 @@ fn render_frame(
422
  fn rust_highlight(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
423
  m.add_function(wrap_pyfunction!(generate_video_clip, m)?)?;
424
  Ok(())
425
-
426
-
427
-
428
-
429
-
430
  }
 
1
  // rust_highlight/src/lib.rs
 
 
 
2
  use pyo3::prelude::*;
3
  use pyo3::types::PyModule;
4
+ use opencv::core::{Mat, Point, Scalar, CV_8UC3, Vector};
5
  use opencv::imgproc::{circle, get_text_size, line, put_text, HersheyFonts, LineTypes};
6
  use opencv::prelude::*;
7
  use std::process::{Command, Stdio};
 
9
  use std::time::Instant;
10
  use std::path::Path;
11
  use std::f64::consts::PI;
12
+ use rayon::prelude::*;
13
 
14
  #[pyfunction]
15
  fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f64, clips_dir: String) -> PyResult<Option<String>> {
 
19
 
20
  let skip_spaces = false;
21
  let fps: f64 = 30.0;
22
+ let animation_frames_per_char: usize = 1; // Reduced from 2 for speed
23
  let width: i32 = 1280;
24
  let height: i32 = 720;
25
  let margin_x: i32 = 40;
 
30
  let header_font_scale: f64 = 2.0;
31
  let default_thickness: i32 = 2;
32
  let header_thickness: i32 = 3;
33
+ let default_text_color = Scalar::new(0.0, 0.0, 0.0, 0.0);
34
+ let header_text_color = Scalar::new(255.0, 0.0, 0.0, 0.0);
35
+ let bg_color = Scalar::new(255.0, 255.0, 255.0, 0.0);
36
  let ffmpeg_preset = "ultrafast";
37
  let crf = "28";
38
+ let pen_color = Scalar::new(0.0, 0.0, 255.0, 0.0);
 
39
  let pen_tip_radius: i32 = 5;
40
  let pen_length: i32 = 20;
41
  let pen_thickness: i32 = 2;
42
  let pen_base_angle: i32 = 45;
43
  let pen_movement_amplitude: i32 = 10;
44
 
45
+ let animation_video_name = format!("anim_video{}.mp4", id);
46
+ let animation_video_path = format!("{}/{}", clips_dir, animation_video_name);
47
+ let static_frame_name = format!("static_{}.png", id);
48
+ let static_frame_path = format!("{}/{}", clips_dir, static_frame_name);
49
  let final_video_name = format!("clip{}.mp4", id);
50
  let final_video_path = format!("{}/{}", clips_dir, final_video_name);
51
 
 
66
  };
67
 
68
  let total_glyphs = visible_indices.len();
69
+ println!("Wrapped lines: {} lines, total glyphs: {}", wrapped_lines.len(), total_glyphs);
70
  if total_glyphs == 0 {
71
  println!("No text to animate.");
72
  return Ok(None);
73
  }
74
 
75
+ // Calculate durations
76
  let animation_duration = duration / 4.0;
77
  let static_duration = duration - animation_duration;
78
 
79
+ println!("Animation duration: {:.3}s, Static duration: {:.3}s", animation_duration, static_duration);
 
 
 
 
 
 
80
 
81
  // Pre-calc line heights and y_positions
 
82
  let mut y_positions: Vec<i32> = Vec::new();
83
  let mut y = margin_y;
84
  for (i, line) in wrapped_lines.iter().enumerate() {
 
90
  let size = get_text_size(&line_for_size, font, font_scale, thickness, &mut base_line).unwrap();
91
  let h = size.height;
92
  let lh = h + base_line + line_spacing;
 
93
  y_positions.push(y);
94
  y += lh;
95
  }
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  let t0 = Instant::now();
 
 
98
 
99
+ // STEP 1: Pre-render the final static frame (used for Phase 2)
100
+ println!("Rendering static frame...");
101
+ let static_frame = render_frame(
102
  &full_text,
103
  -1,
104
  0,
105
  0.0,
106
+ width,
107
+ height,
108
  &line_styles,
109
  &y_positions,
110
  margin_x,
 
124
  pen_movement_amplitude,
125
  )?;
126
 
127
+ // Save static frame as PNG
128
+ let mut params = Vector::new();
129
+ opencv::imgcodecs::imwrite(&static_frame_path, &static_frame, &params)
130
+ .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to save static frame: {}", e)))?;
131
+
132
+ // STEP 2: Render animation frames in parallel (HUGE speedup)
133
+ println!("Rendering animation frames in parallel...");
134
+
135
+ // Collect frame data for parallel processing
136
+ let mut frame_specs: Vec<(String, i32, i32, usize)> = Vec::new();
137
+ let mut prev_visible_sub = String::new();
138
 
 
139
  for &idx_in_full in visible_indices.iter() {
140
+ let visible_sub = full_text[0..=idx_in_full].to_string();
141
  if visible_sub != prev_visible_sub {
142
  let lines: Vec<&str> = visible_sub.split('\n').collect();
143
  let last_line = lines.last().unwrap();
 
153
  let pen_y = y_positions[line_idx] + h / 2;
154
 
155
  for anim_step in 0..animation_frames_per_char {
156
+ frame_specs.push((visible_sub.clone(), pen_x, pen_y, anim_step));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  }
158
+ prev_visible_sub = visible_sub;
159
  }
160
  }
161
 
162
+ println!("Total animation frames to render: {}", frame_specs.len());
163
+
164
+ // Parallel rendering using rayon
165
+ let animation_frames: Vec<Vec<u8>> = frame_specs
166
+ .par_iter()
167
+ .map(|(visible_sub, pen_x, pen_y, anim_step)| {
168
+ let anim_offset = (*anim_step as f64) / (animation_frames_per_char as f64);
169
+ let frame = render_frame(
170
+ visible_sub,
171
+ *pen_x,
172
+ *pen_y,
173
+ anim_offset,
174
+ width,
175
+ height,
176
+ &line_styles,
177
+ &y_positions,
178
+ margin_x,
179
+ font,
180
+ default_font_scale,
181
+ header_font_scale,
182
+ default_thickness,
183
+ header_thickness,
184
+ default_text_color,
185
+ header_text_color,
186
+ bg_color,
187
+ pen_color,
188
+ pen_tip_radius,
189
+ pen_length,
190
+ pen_thickness,
191
+ pen_base_angle,
192
+ pen_movement_amplitude,
193
+ ).unwrap();
194
+ frame.data_bytes().unwrap().to_vec()
195
+ })
196
+ .collect();
197
+
198
+ println!("Animation frames rendered in {:.3}s", t0.elapsed().as_secs_f64());
199
+
200
+ // STEP 3: Write animation frames to FFmpeg
201
+ let mut child = Command::new("ffmpeg")
202
+ .arg("-y")
203
+ .arg("-f").arg("rawvideo")
204
+ .arg("-pix_fmt").arg("bgr24")
205
+ .arg("-s").arg(format!("{}x{}", width, height))
206
+ .arg("-r").arg(fps.to_string())
207
+ .arg("-i").arg("-")
208
+ .arg("-an")
209
+ .arg("-c:v").arg("libx264")
210
+ .arg("-preset").arg(ffmpeg_preset)
211
+ .arg("-crf").arg(crf)
212
+ .arg("-pix_fmt").arg("yuv420p")
213
+ .arg(&animation_video_path)
214
+ .stdin(Stdio::piped())
215
+ .spawn()
216
+ .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to spawn FFmpeg: {}", e)))?;
217
 
218
+ let mut stdin = child.stdin.take().unwrap();
219
+
220
+ // Write all animation frames in one large batch
221
+ let mut buffer: Vec<u8> = Vec::with_capacity(animation_frames.len() * width as usize * height as usize * 3);
222
+ for frame_data in animation_frames {
223
+ buffer.extend_from_slice(&frame_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  }
225
 
226
+ stdin.write_all(&buffer)
227
+ .map_err(|e| pyo3::exceptions::PyIOError::new_err(format!("Failed to write animation frames: {}", e)))?;
 
 
 
 
 
 
 
228
 
229
  drop(stdin);
230
+ child.wait().map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("FFmpeg animation failed: {}", e)))?;
231
 
232
+ println!("Animation video created in {:.3}s", t0.elapsed().as_secs_f64());
 
233
 
234
+ // STEP 4: Combine animation + static frame + audio using FFmpeg filters
235
+ let animation_actual_duration = frame_specs.len() as f64 / fps;
236
+ let speed_multiplier = animation_duration / animation_actual_duration;
 
237
 
238
+ println!("Combining videos with FFmpeg filters...");
239
+
240
+ let filter_complex = format!(
241
+ "[0:v]setpts={}*PTS[v0];[1:v]loop=loop=-1:size=1:start=0,trim=duration={}[v1];[v0][v1]concat=n=2:v=1:a=0[outv]",
242
+ speed_multiplier,
243
+ static_duration
244
+ );
245
 
 
246
  let mut combine_child = Command::new("ffmpeg")
247
  .arg("-y")
248
+ .arg("-i").arg(&animation_video_path)
249
+ .arg("-loop").arg("1")
250
+ .arg("-i").arg(&static_frame_path)
251
  .arg("-i").arg(&audio_path)
252
+ .arg("-filter_complex").arg(&filter_complex)
253
+ .arg("-map").arg("[outv]")
254
+ .arg("-map").arg("2:a:0")
255
  .arg("-c:v").arg("libx264")
256
  .arg("-preset").arg("ultrafast")
257
  .arg("-crf").arg("28")
258
  .arg("-pix_fmt").arg("yuv420p")
259
  .arg("-c:a").arg("aac")
260
  .arg("-shortest")
 
 
261
  .arg(&final_video_path)
262
  .spawn()
263
  .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to spawn FFmpeg for combine: {}", e)))?;
264
 
265
  combine_child.wait().map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("FFmpeg combine failed: {}", e)))?;
266
 
267
+ let elapsed = t0.elapsed().as_secs_f64();
268
+ println!("Total processing time: {:.3}s", elapsed);
269
+
270
+ // Clean up temporary files
271
+ let _ = std::fs::remove_file(&animation_video_path);
272
+ let _ = std::fs::remove_file(&static_frame_path);
273
 
274
  Ok(Some(final_video_path))
275
  }
 
381
  }
382
  }
383
 
 
384
  if pen_x > 0 {
385
  let offset_y = (pen_movement_amplitude as f64 * (anim_offset * PI).sin()) as i32;
386
  let pen_tip_y = pen_y + offset_y;
 
400
  fn rust_highlight(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
401
  m.add_function(wrap_pyfunction!(generate_video_clip, m)?)?;
402
  Ok(())
 
 
 
 
 
403
  }