File size: 15,861 Bytes
26bdd3c 04a1f81 24033ff 68b396b 26bdd3c 24033ff 26bdd3c 1f2ab31 26bdd3c 68b396b f05a886 04a1f81 26bdd3c 04a1f81 26bdd3c 31b0c24 26bdd3c 68b396b 26bdd3c 68b396b 26bdd3c 68b396b 26bdd3c 24033ff 26bdd3c 31b0c24 26bdd3c 141a650 26bdd3c 68b396b 26bdd3c fbb769b 31b0c24 1194818 68b396b 1194818 26bdd3c 7b12459 26bdd3c 31b0c24 68b396b 42926fa 68b396b 42926fa 68b396b 31b0c24 68b396b 42926fa 26bdd3c 68b396b 26bdd3c 0d0de9b 26bdd3c 68b396b 26bdd3c 68b396b 26bdd3c 68b396b 31b0c24 9c7c1c2 31b0c24 68b396b 31b0c24 68b396b 9c7c1c2 42926fa 1194818 31b0c24 1194818 26bdd3c 31b0c24 26bdd3c 68b396b 26bdd3c 31b0c24 68b396b 26bdd3c 31b0c24 68b396b 31b0c24 68b396b 26bdd3c 31b0c24 26bdd3c 68b396b 26bdd3c 68b396b 26bdd3c 31b0c24 706d234 31b0c24 26bdd3c 68b396b 26bdd3c 24033ff 26bdd3c 24033ff 26bdd3c 24033ff 26bdd3c 1194818 26bdd3c 1194818 26bdd3c 1194818 26bdd3c 1194818 26bdd3c 1194818 26bdd3c 1194818 26bdd3c 5b29fc9 26bdd3c f05a886 1194818 26bdd3c f05a886 706d234 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 |
// rust_highlight/src/lib.rs
use pyo3::prelude::*;
use pyo3::types::PyModule;
use opencv::core::{Mat, Point, Scalar, CV_8UC3, Vector};
use opencv::imgproc::{circle, get_text_size, line, put_text, HersheyFonts, LineTypes};
use opencv::prelude::*;
use std::process::{Command, Stdio};
use std::io::Write;
use std::time::Instant;
use std::path::Path;
use std::f64::consts::PI;
use rayon::prelude::*;
#[pyfunction]
fn generate_video_clip(id: usize, text: String, audio_path: String, duration: f64, clips_dir: String) -> PyResult<Option<String>> {
if !Path::new(&audio_path).exists() {
return Err(pyo3::exceptions::PyFileNotFoundError::new_err(format!("Audio not found: {}", audio_path)));
}
let skip_spaces = false;
let fps: f64 = 30.0;
let animation_frames_per_char: usize = 1; // Reduced from 2 for speed
let width: i32 = 1280;
let height: i32 = 720;
let margin_x: i32 = 40;
let margin_y: i32 = 60;
let line_spacing: i32 = 8;
let font = HersheyFonts::FONT_HERSHEY_SIMPLEX as i32;
let default_font_scale: f64 = 1.5;
let header_font_scale: f64 = 2.0;
let default_thickness: i32 = 2;
let header_thickness: i32 = 3;
let default_text_color = Scalar::new(0.0, 0.0, 0.0, 0.0);
let header_text_color = Scalar::new(255.0, 0.0, 0.0, 0.0);
let bg_color = Scalar::new(255.0, 255.0, 255.0, 0.0);
let ffmpeg_preset = "ultrafast";
let crf = "28";
let pen_color = Scalar::new(0.0, 0.0, 255.0, 0.0);
let pen_tip_radius: i32 = 5;
let pen_length: i32 = 20;
let pen_thickness: i32 = 2;
let pen_base_angle: i32 = 45;
let pen_movement_amplitude: i32 = 10;
let animation_video_name = format!("anim_video{}.mp4", id);
let animation_video_path = format!("{}/{}", clips_dir, animation_video_name);
let static_frame_name = format!("static_{}.png", id);
let static_frame_path = format!("{}/{}", clips_dir, static_frame_name);
let final_video_name = format!("clip{}.mp4", id);
let final_video_path = format!("{}/{}", clips_dir, final_video_name);
// Wrap text
let text_area_width = width - 2 * margin_x;
let (wrapped_lines, line_styles) = wrap_text_cv(&text, font, default_font_scale, default_thickness, text_area_width, header_font_scale, header_thickness);
let full_text = wrapped_lines.join("\n");
if full_text.is_empty() {
println!("No text to animate.");
return Ok(None);
}
let visible_indices: Vec<usize> = if skip_spaces {
full_text.char_indices().filter(|&(_, ch)| ch != ' ' && ch != '\n' && ch != '\t').map(|(i, _)| i).collect()
} else {
(0..full_text.len()).collect()
};
let total_glyphs = visible_indices.len();
println!("Wrapped lines: {} lines, total glyphs: {}", wrapped_lines.len(), total_glyphs);
if total_glyphs == 0 {
println!("No text to animate.");
return Ok(None);
}
// Calculate durations
let animation_duration = duration / 4.0;
let static_duration = duration - animation_duration;
println!("Animation duration: {:.3}s, Static duration: {:.3}s", animation_duration, static_duration);
// Pre-calc line heights and y_positions
let mut y_positions: Vec<i32> = Vec::new();
let mut y = margin_y;
for (i, line) in wrapped_lines.iter().enumerate() {
let is_header = line_styles[i];
let font_scale = if is_header { header_font_scale } else { default_font_scale };
let thickness = if is_header { header_thickness } else { default_thickness };
let line_for_size = if line.is_empty() { "Ay".to_string() } else { line.clone() };
let mut base_line = 0;
let size = get_text_size(&line_for_size, font, font_scale, thickness, &mut base_line).unwrap();
let h = size.height;
let lh = h + base_line + line_spacing;
y_positions.push(y);
y += lh;
}
let t0 = Instant::now();
// STEP 1: Pre-render the final static frame (used for Phase 2)
println!("Rendering static frame...");
let static_frame = render_frame(
&full_text,
-1,
0,
0.0,
width,
height,
&line_styles,
&y_positions,
margin_x,
font,
default_font_scale,
header_font_scale,
default_thickness,
header_thickness,
default_text_color,
header_text_color,
bg_color,
pen_color,
pen_tip_radius,
pen_length,
pen_thickness,
pen_base_angle,
pen_movement_amplitude,
)?;
// Save static frame as PNG
let mut params = Vector::new();
opencv::imgcodecs::imwrite(&static_frame_path, &static_frame, ¶ms)
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to save static frame: {}", e)))?;
// STEP 2: Render animation frames in parallel (HUGE speedup)
println!("Rendering animation frames in parallel...");
// Collect frame data for parallel processing
let mut frame_specs: Vec<(String, i32, i32, usize)> = Vec::new();
let mut prev_visible_sub = String::new();
for &idx_in_full in visible_indices.iter() {
let visible_sub = full_text[0..=idx_in_full].to_string();
if visible_sub != prev_visible_sub {
let lines: Vec<&str> = visible_sub.split('\n').collect();
let last_line = lines.last().unwrap();
let line_idx = lines.len() - 1;
let is_header = line_styles[line_idx];
let font_scale = if is_header { header_font_scale } else { default_font_scale };
let thickness = if is_header { header_thickness } else { default_thickness };
let mut base_line = 0;
let size = get_text_size(last_line, font, font_scale, thickness, &mut base_line).unwrap();
let w = size.width;
let h = size.height;
let pen_x = margin_x + w + 5;
let pen_y = y_positions[line_idx] + h / 2;
for anim_step in 0..animation_frames_per_char {
frame_specs.push((visible_sub.clone(), pen_x, pen_y, anim_step));
}
prev_visible_sub = visible_sub;
}
}
println!("Total animation frames to render: {}", frame_specs.len());
// STEP 3: Start FFmpeg process first
let mut child = Command::new("ffmpeg")
.arg("-y")
.arg("-f").arg("rawvideo")
.arg("-pix_fmt").arg("bgr24")
.arg("-s").arg(format!("{}x{}", width, height))
.arg("-r").arg(fps.to_string())
.arg("-i").arg("-")
.arg("-an")
.arg("-c:v").arg("libx264")
.arg("-preset").arg(ffmpeg_preset)
.arg("-crf").arg(crf)
.arg("-pix_fmt").arg("yuv420p")
.arg(&animation_video_path)
.stdin(Stdio::piped())
.spawn()
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to spawn FFmpeg: {}", e)))?;
let mut stdin = child.stdin.take().unwrap();
// Parallel rendering using rayon and streaming to FFmpeg
let animation_frames: Vec<Vec<u8>> = frame_specs
.par_iter()
.map(|(visible_sub, pen_x, pen_y, anim_step)| {
let anim_offset = (*anim_step as f64) / (animation_frames_per_char as f64);
let frame = render_frame(
visible_sub,
*pen_x,
*pen_y,
anim_offset,
width,
height,
&line_styles,
&y_positions,
margin_x,
font,
default_font_scale,
header_font_scale,
default_thickness,
header_thickness,
default_text_color,
header_text_color,
bg_color,
pen_color,
pen_tip_radius,
pen_length,
pen_thickness,
pen_base_angle,
pen_movement_amplitude,
).unwrap();
frame.data_bytes().unwrap().to_vec()
})
.collect();
println!("Animation frames rendered in {:.3}s", t0.elapsed().as_secs_f64());
// Write all animation frames in one large batch
let mut buffer: Vec<u8> = Vec::with_capacity(animation_frames.len() * width as usize * height as usize * 3);
for frame_data in &animation_frames {
buffer.extend_from_slice(frame_data);
}
stdin.write_all(&buffer)
.map_err(|e| pyo3::exceptions::PyIOError::new_err(format!("Failed to write animation frames: {}", e)))?;
drop(stdin);
child.wait().map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("FFmpeg animation failed: {}", e)))?;
println!("Animation video created in {:.3}s", t0.elapsed().as_secs_f64());
// STEP 4: Combine animation + static frame + audio using FFmpeg filters
let animation_actual_duration = frame_specs.len() as f64 / fps;
let speed_multiplier = animation_duration / animation_actual_duration;
println!("Combining videos with FFmpeg filters...");
let filter_complex = format!(
"[0:v]setpts={}*PTS[v0];[1:v]loop=loop=-1:size=1:start=0,trim=duration={}[v1];[v0][v1]concat=n=2:v=1:a=0[outv]",
speed_multiplier,
static_duration
);
let mut combine_child = Command::new("ffmpeg")
.arg("-y")
.arg("-i").arg(&animation_video_path)
.arg("-loop").arg("1")
.arg("-i").arg(&static_frame_path)
.arg("-i").arg(&audio_path)
.arg("-filter_complex").arg(&filter_complex)
.arg("-map").arg("[outv]")
.arg("-map").arg("2:a:0")
.arg("-c:v").arg("libx264")
.arg("-preset").arg("ultrafast")
.arg("-crf").arg("28")
.arg("-pix_fmt").arg("yuv420p")
.arg("-c:a").arg("aac")
.arg("-shortest")
.arg(&final_video_path)
.spawn()
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to spawn FFmpeg for combine: {}", e)))?;
combine_child.wait().map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("FFmpeg combine failed: {}", e)))?;
let elapsed = t0.elapsed().as_secs_f64();
println!("Total processing time: {:.3}s", elapsed);
// Clean up temporary files
let _ = std::fs::remove_file(&animation_video_path);
let _ = std::fs::remove_file(&static_frame_path);
Ok(Some(final_video_path))
}
fn wrap_text_cv(text: &str, font: i32, default_font_scale: f64, default_thickness: i32, max_width: i32, header_font_scale: f64, header_thickness: i32) -> (Vec<String>, Vec<bool>) {
let mut wrapped_lines: Vec<String> = Vec::new();
let mut styles: Vec<bool> = Vec::new();
for para in text.lines() {
let trimmed = para.trim_start();
let is_header = trimmed.starts_with("###");
let mut para_str = para.to_string();
if is_header {
para_str = trimmed[3..].trim().to_string();
}
let font_scale = if is_header { header_font_scale } else { default_font_scale };
let thickness = if is_header { header_thickness } else { default_thickness };
if para_str.is_empty() {
wrapped_lines.push("".to_string());
styles.push(false);
continue;
}
let words: Vec<&str> = para_str.split_whitespace().collect();
let mut cur = String::new();
for &w in &words {
let candidate = if cur.is_empty() { w.to_string() } else { format!("{} {}", cur, w) };
let mut base_line = 0;
let size = get_text_size(&candidate, font, font_scale, thickness, &mut base_line).unwrap();
if size.width <= max_width {
cur = candidate;
} else {
if !cur.is_empty() {
wrapped_lines.push(cur.clone());
styles.push(is_header);
cur.clear();
}
let mut base_line_single = 0;
let size_single = get_text_size(w, font, font_scale, thickness, &mut base_line_single).unwrap();
if size_single.width > max_width {
let mut chunk = String::new();
for ch in w.chars() {
let cand2 = format!("{}{}", chunk, ch);
let mut base_line_ch = 0;
let size_ch = get_text_size(&cand2, font, font_scale, thickness, &mut base_line_ch).unwrap();
if size_ch.width <= max_width {
chunk = cand2;
} else {
wrapped_lines.push(chunk.clone());
styles.push(is_header);
chunk = ch.to_string();
}
}
cur = chunk;
} else {
cur = w.to_string();
}
}
}
if !cur.is_empty() {
wrapped_lines.push(cur);
styles.push(is_header);
}
}
(wrapped_lines, styles)
}
fn render_frame(
visible_text: &str,
pen_x: i32,
pen_y: i32,
anim_offset: f64,
width: i32,
height: i32,
line_styles: &Vec<bool>,
y_positions: &Vec<i32>,
margin_x: i32,
font: i32,
default_font_scale: f64,
header_font_scale: f64,
default_thickness: i32,
header_thickness: i32,
default_text_color: Scalar,
header_text_color: Scalar,
bg_color: Scalar,
pen_color: Scalar,
pen_tip_radius: i32,
pen_length: i32,
pen_thickness: i32,
pen_base_angle: i32,
pen_movement_amplitude: i32,
) -> PyResult<Mat> {
let mut img = Mat::new_rows_cols_with_default(height, width, CV_8UC3, bg_color)
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to create Mat: {}", e)))?;
let lines: Vec<&str> = visible_text.split('\n').collect();
for (idx, &line) in lines.iter().enumerate() {
let is_header = line_styles[idx];
let font_scale = if is_header { header_font_scale } else { default_font_scale };
let thickness = if is_header { header_thickness } else { default_thickness };
let color = if is_header { header_text_color } else { default_text_color };
let x = margin_x;
let y = y_positions[idx];
let mut base_line = 0;
let size = get_text_size(line, font, font_scale, thickness, &mut base_line).unwrap();
let h = size.height;
let y_draw = y + h;
if !line.is_empty() {
put_text(&mut img, line, Point::new(x, y_draw), font, font_scale, color, thickness, LineTypes::LINE_AA as i32, false)
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to put text: {}", e)))?;
}
}
if pen_x > 0 {
let offset_y = (pen_movement_amplitude as f64 * (anim_offset * PI).sin()) as i32;
let pen_tip_y = pen_y + offset_y;
let angle_rad = (pen_base_angle as f64).to_radians();
let pen_end_x = pen_x + (pen_length as f64 * angle_rad.cos()) as i32;
let pen_end_y = pen_tip_y - (pen_length as f64 * angle_rad.sin()) as i32;
line(&mut img, Point::new(pen_x, pen_tip_y), Point::new(pen_end_x, pen_end_y), pen_color, pen_thickness, LineTypes::LINE_8 as i32, 0)
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to draw line: {}", e)))?;
circle(&mut img, Point::new(pen_x, pen_tip_y), pen_tip_radius, pen_color, -1, LineTypes::LINE_8 as i32, 0)
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("Failed to draw circle: {}", e)))?;
}
Ok(img)
}
#[pymodule]
fn rust_highlight(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_function(wrap_pyfunction!(generate_video_clip, m)?)?;
Ok(())
} |