Update video2.py
Browse files
video2.py
CHANGED
|
@@ -266,210 +266,25 @@ import cv2
|
|
| 266 |
from moviepy.editor import VideoFileClip, AudioFileClip
|
| 267 |
from moviepy.video.fx.speedx import speedx
|
| 268 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
def video_func(id, lines, lang):
|
| 270 |
-
duration, audio_path = audio_func(id, lines, lang)
|
| 271 |
if not duration or not audio_path:
|
| 272 |
print("Failed to generate audio.")
|
| 273 |
return None
|
| 274 |
TEXT = lines[id]
|
| 275 |
print("-----------------------------------------------------------------------------")
|
| 276 |
print(TEXT)
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
WIDTH, HEIGHT = 1280, 720
|
| 282 |
-
MARGIN_X, MARGIN_Y = 40, 60
|
| 283 |
-
LINE_SPACING = 8
|
| 284 |
-
FONT = cv2.FONT_HERSHEY_SIMPLEX
|
| 285 |
-
DEFAULT_FONT_SCALE = 1.5
|
| 286 |
-
HEADER_FONT_SCALE = 2.0 # Increased size for headers
|
| 287 |
-
DEFAULT_THICKNESS = 2
|
| 288 |
-
HEADER_THICKNESS = 3 # Bolder for headers
|
| 289 |
-
DEFAULT_TEXT_COLOR = (0, 0, 0) # BGR Black
|
| 290 |
-
HEADER_TEXT_COLOR = (255, 0, 0) # BGR Blue
|
| 291 |
-
BG_COLOR = (255, 255, 255) # BGR White
|
| 292 |
-
silent_video_name = f"silent_video{id}.mp4"
|
| 293 |
-
silent_video_path = os.path.join(CLIPS_DIR, silent_video_name)
|
| 294 |
-
FFMPEG_PRESET = "ultrafast"
|
| 295 |
-
CRF = 28 # Increased CRF for faster encoding (lower quality, but quicker)
|
| 296 |
-
# Pen settings
|
| 297 |
-
PEN_COLOR = (0, 0, 255) # Red pen (BGR)
|
| 298 |
-
PEN_TIP_RADIUS = 5
|
| 299 |
-
PEN_LENGTH = 20
|
| 300 |
-
PEN_THICKNESS = 2
|
| 301 |
-
PEN_BASE_ANGLE = 45
|
| 302 |
-
PEN_MOVEMENT_AMPLITUDE = 10
|
| 303 |
-
# ===================================
|
| 304 |
-
|
| 305 |
-
# Helper: wrap text by pixel width using cv2.getTextSize, now with per-line styles
|
| 306 |
-
def wrap_text_cv(text, font, default_font_scale, default_thickness, max_width):
|
| 307 |
-
wrapped_lines = []
|
| 308 |
-
styles = [] # List of (is_header) for each wrapped line
|
| 309 |
-
for para in text.splitlines():
|
| 310 |
-
is_header = para.strip().startswith("###")
|
| 311 |
-
if is_header:
|
| 312 |
-
para = para.strip()[3:].strip() # Remove "### " or "###"
|
| 313 |
-
font_scale = HEADER_FONT_SCALE
|
| 314 |
-
thickness = HEADER_THICKNESS
|
| 315 |
-
else:
|
| 316 |
-
font_scale = default_font_scale
|
| 317 |
-
thickness = default_thickness
|
| 318 |
-
if para == "":
|
| 319 |
-
wrapped_lines.append("")
|
| 320 |
-
styles.append(False) # Not header
|
| 321 |
-
continue
|
| 322 |
-
words = para.split(" ")
|
| 323 |
-
cur = ""
|
| 324 |
-
for w in words:
|
| 325 |
-
candidate = w if cur == "" else cur + " " + w
|
| 326 |
-
(w_w, w_h), _ = cv2.getTextSize(candidate, font, font_scale, thickness)
|
| 327 |
-
if w_w <= max_width:
|
| 328 |
-
cur = candidate
|
| 329 |
-
else:
|
| 330 |
-
if cur != "":
|
| 331 |
-
wrapped_lines.append(cur)
|
| 332 |
-
styles.append(is_header)
|
| 333 |
-
(single_w, _), _ = cv2.getTextSize(w, font, font_scale, thickness)
|
| 334 |
-
if single_w > max_width:
|
| 335 |
-
chunk = ""
|
| 336 |
-
for ch in w:
|
| 337 |
-
cand2 = chunk + ch
|
| 338 |
-
(c_w, _), _ = cv2.getTextSize(cand2, font, font_scale, thickness)
|
| 339 |
-
if c_w <= max_width:
|
| 340 |
-
chunk = cand2
|
| 341 |
-
else:
|
| 342 |
-
wrapped_lines.append(chunk)
|
| 343 |
-
styles.append(is_header)
|
| 344 |
-
chunk = ch
|
| 345 |
-
if chunk:
|
| 346 |
-
cur = chunk
|
| 347 |
-
else:
|
| 348 |
-
cur = ""
|
| 349 |
-
else:
|
| 350 |
-
cur = w
|
| 351 |
-
if cur != "":
|
| 352 |
-
wrapped_lines.append(cur)
|
| 353 |
-
styles.append(is_header)
|
| 354 |
-
return wrapped_lines, styles
|
| 355 |
-
|
| 356 |
-
# Pre-wrap text with styles
|
| 357 |
-
text_area_width = WIDTH - 2 * MARGIN_X
|
| 358 |
-
wrapped_lines, line_styles = wrap_text_cv(TEXT, FONT, DEFAULT_FONT_SCALE, DEFAULT_THICKNESS, text_area_width)
|
| 359 |
-
full_text = "\n".join(wrapped_lines)
|
| 360 |
-
if not full_text:
|
| 361 |
-
full_text = ""
|
| 362 |
-
# Visible indices
|
| 363 |
-
if SKIP_SPACES:
|
| 364 |
-
visible_indices = [i for i, ch in enumerate(full_text) if (ch != ' ' and ch != '\n' and ch != '\t')]
|
| 365 |
-
else:
|
| 366 |
-
visible_indices = list(range(len(full_text)))
|
| 367 |
-
|
| 368 |
-
total_glyphs = len(visible_indices)
|
| 369 |
-
print(f"Wrapped lines: {len(wrapped_lines)} lines, total glyphs (counted): {total_glyphs}")
|
| 370 |
-
if total_glyphs == 0:
|
| 371 |
-
print("No text to animate.")
|
| 372 |
-
return None
|
| 373 |
-
# Minimal frames
|
| 374 |
-
min_frames = total_glyphs * ANIMATION_FRAMES_PER_CHAR
|
| 375 |
-
print(f"Rendering {min_frames} minimal frames for full text animation.")
|
| 376 |
-
# Pre-calc line heights and y_positions with per-line styles
|
| 377 |
-
line_heights = []
|
| 378 |
-
y_positions = []
|
| 379 |
-
y = MARGIN_Y
|
| 380 |
-
for i, line in enumerate(wrapped_lines):
|
| 381 |
-
is_header = line_styles[i]
|
| 382 |
-
font_scale = HEADER_FONT_SCALE if is_header else DEFAULT_FONT_SCALE
|
| 383 |
-
thickness = HEADER_THICKNESS if is_header else DEFAULT_THICKNESS
|
| 384 |
-
if line == "":
|
| 385 |
-
(w, h), baseline = cv2.getTextSize("Ay", FONT, font_scale, thickness)
|
| 386 |
-
else:
|
| 387 |
-
(w, h), baseline = cv2.getTextSize(line, FONT, font_scale, thickness)
|
| 388 |
-
lh = h + baseline + LINE_SPACING
|
| 389 |
-
line_heights.append(lh)
|
| 390 |
-
y_positions.append(y)
|
| 391 |
-
y += lh
|
| 392 |
-
# Prepare ffmpeg
|
| 393 |
-
ffmpeg_cmd = (
|
| 394 |
-
f'ffmpeg -y '
|
| 395 |
-
f'-f rawvideo -pix_fmt bgr24 -s {WIDTH}x{HEIGHT} -r {FPS} -i - '
|
| 396 |
-
f'-an '
|
| 397 |
-
f'-c:v libx264 -preset {FFMPEG_PRESET} -crf {CRF} -pix_fmt yuv420p '
|
| 398 |
-
f'{silent_video_path}'
|
| 399 |
-
)
|
| 400 |
-
print("FFMPEG CMD:", ffmpeg_cmd)
|
| 401 |
-
|
| 402 |
-
proc = subprocess.Popen(shlex.split(ffmpeg_cmd), stdin=subprocess.PIPE, bufsize=10**8)
|
| 403 |
-
# Render function, now with per-line colors and styles
|
| 404 |
-
def render_frame(visible_text, pen_x, pen_y, anim_offset):
|
| 405 |
-
img = np.full((HEIGHT, WIDTH, 3), BG_COLOR, dtype=np.uint8)
|
| 406 |
-
lines = visible_text.split("\n")
|
| 407 |
-
for idx, line in enumerate(lines):
|
| 408 |
-
is_header = line_styles[idx]
|
| 409 |
-
font_scale = HEADER_FONT_SCALE if is_header else DEFAULT_FONT_SCALE
|
| 410 |
-
thickness = HEADER_THICKNESS if is_header else DEFAULT_THICKNESS
|
| 411 |
-
color = HEADER_TEXT_COLOR if is_header else DEFAULT_TEXT_COLOR
|
| 412 |
-
x = MARGIN_X
|
| 413 |
-
y = y_positions[idx]
|
| 414 |
-
(w, h), baseline = cv2.getTextSize(line, FONT, font_scale, thickness)
|
| 415 |
-
y_draw = y + h
|
| 416 |
-
if line != "":
|
| 417 |
-
cv2.putText(img, line, (x, y_draw), FONT, font_scale, color, thickness, lineType=cv2.LINE_AA)
|
| 418 |
-
if pen_x > 0:
|
| 419 |
-
offset_y = int(PEN_MOVEMENT_AMPLITUDE * math.sin(anim_offset * math.pi))
|
| 420 |
-
pen_tip_y = pen_y + offset_y
|
| 421 |
-
angle_rad = math.radians(PEN_BASE_ANGLE)
|
| 422 |
-
pen_end_x = pen_x + int(PEN_LENGTH * math.cos(angle_rad))
|
| 423 |
-
pen_end_y = pen_tip_y - int(PEN_LENGTH * math.sin(angle_rad))
|
| 424 |
-
cv2.line(img, (pen_x, pen_tip_y), (pen_end_x, pen_end_y), PEN_COLOR, PEN_THICKNESS)
|
| 425 |
-
cv2.circle(img, (pen_x, pen_tip_y), PEN_TIP_RADIUS, PEN_COLOR, -1)
|
| 426 |
-
return img
|
| 427 |
-
|
| 428 |
-
t0 = time.time()
|
| 429 |
-
frames_sent = 0
|
| 430 |
-
prev_visible_sub = ""
|
| 431 |
-
last_pen_x = 0
|
| 432 |
-
last_pen_y = 0
|
| 433 |
-
for rank, idx_in_full in enumerate(visible_indices):
|
| 434 |
-
visible_sub = full_text[:idx_in_full + 1]
|
| 435 |
-
if visible_sub != prev_visible_sub:
|
| 436 |
-
lines = visible_sub.split("\n")
|
| 437 |
-
last_line = lines[-1]
|
| 438 |
-
line_idx = len(lines) - 1
|
| 439 |
-
is_header = line_styles[line_idx]
|
| 440 |
-
font_scale = HEADER_FONT_SCALE if is_header else DEFAULT_FONT_SCALE
|
| 441 |
-
thickness = HEADER_THICKNESS if is_header else DEFAULT_THICKNESS
|
| 442 |
-
(w, h), baseline = cv2.getTextSize(last_line, FONT, font_scale, thickness)
|
| 443 |
-
pen_x = MARGIN_X + w + 5
|
| 444 |
-
pen_y = y_positions[line_idx] + h // 2
|
| 445 |
-
last_pen_x = pen_x
|
| 446 |
-
last_pen_y = pen_y
|
| 447 |
-
for anim_step in range(ANIMATION_FRAMES_PER_CHAR):
|
| 448 |
-
frame_img = render_frame(visible_sub, pen_x, pen_y, anim_step / ANIMATION_FRAMES_PER_CHAR)
|
| 449 |
-
proc.stdin.write(frame_img.tobytes())
|
| 450 |
-
frames_sent += 1
|
| 451 |
-
prev_visible_sub = visible_sub
|
| 452 |
-
proc.stdin.close()
|
| 453 |
-
proc.wait()
|
| 454 |
-
elapsed = time.time() - t0
|
| 455 |
-
print(f"Frames sent: {frames_sent}, elapsed time: {elapsed:.3f} seconds")
|
| 456 |
-
if not os.path.exists(silent_video_path):
|
| 457 |
-
print("Silent video generation failed.")
|
| 458 |
return None
|
| 459 |
-
|
| 460 |
-
final_video_name = f"clip{id}.mp4"
|
| 461 |
-
final_video_path = os.path.join(CLIPS_DIR, final_video_name)
|
| 462 |
-
video_clip = VideoFileClip(silent_video_path)
|
| 463 |
-
rendered_duration = video_clip.duration
|
| 464 |
-
print(f"Rendered video duration: {rendered_duration:.3f}s, Audio duration: {duration:.3f}s")
|
| 465 |
-
if rendered_duration > 0 and duration > 0:
|
| 466 |
-
speed_factor = rendered_duration / duration
|
| 467 |
-
print(f"Adjusting video speed by factor: {speed_factor:.3f}")
|
| 468 |
-
video_clip = video_clip.fx(speedx, speed_factor)
|
| 469 |
-
final_clip = video_clip.set_audio(AudioFileClip(audio_path))
|
| 470 |
-
# Write final video with faster settings
|
| 471 |
-
final_clip.write_videofile(final_video_path, codec='libx264', audio_codec='aac', preset='ultrafast', verbose=False, logger=None, threads=4) # Added threads for multi-threading
|
| 472 |
print(f"Final video saved at: {final_video_path}")
|
| 473 |
-
# Clean up
|
| 474 |
-
os.remove(silent_video_path)
|
| 475 |
return final_video_path
|
|
|
|
| 266 |
from moviepy.editor import VideoFileClip, AudioFileClip
|
| 267 |
from moviepy.video.fx.speedx import speedx
|
| 268 |
|
| 269 |
+
# video.py
|
| 270 |
+
import os
|
| 271 |
+
import rust_highlight
|
| 272 |
+
|
| 273 |
+
CLIPS_DIR = os.path.join(os.environ.get('BASE_DIR', '/app/data'), 'clips') # Adjust as needed for Hugging Face Spaces
|
| 274 |
+
|
| 275 |
def video_func(id, lines, lang):
|
| 276 |
+
duration, audio_path = audio_func(id, lines, lang) # Assuming audio_func is defined elsewhere
|
| 277 |
if not duration or not audio_path:
|
| 278 |
print("Failed to generate audio.")
|
| 279 |
return None
|
| 280 |
TEXT = lines[id]
|
| 281 |
print("-----------------------------------------------------------------------------")
|
| 282 |
print(TEXT)
|
| 283 |
+
|
| 284 |
+
final_video_path = rust_highlight.generate_video_clip(id, TEXT, audio_path, duration, CLIPS_DIR)
|
| 285 |
+
if final_video_path is None:
|
| 286 |
+
print("Failed to generate video.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 287 |
return None
|
| 288 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
print(f"Final video saved at: {final_video_path}")
|
|
|
|
|
|
|
| 290 |
return final_video_path
|