File size: 8,331 Bytes
eeaca5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import torch
import numpy as np
from PIL import Image, ImageDraw
import cv2
# ====================================================================================================
# --- Grid Animator Node ---
# By empoweringtheuser @ civitai
#
# Versión 11: Anti-Clipping.
# Se implementa un sistema de lienzo de trabajo de gran tamaño para evitar que la
# transformación de perspectiva recorte partes de la imagen. Esto soluciona el problema
# de las "líneas que desaparecen" de forma definitiva.
# ====================================================================================================
class GridAnimator:
CATEGORY = "animation/generators"
RETURN_TYPES = ("IMAGE",)
RETURN_NAMES = ("images",)
FUNCTION = "generate_animation"
OUTPUT_NODE = False
@staticmethod
def _project_3d_to_2d(points_3d, rotation_yaw, rotation_pitch, focal_length, canvas_size):
w, h = canvas_size
yaw, pitch = np.deg2rad(rotation_yaw), np.deg2rad(rotation_pitch)
R_yaw = np.array([[np.cos(yaw), 0, np.sin(yaw)], [0, 1, 0], [-np.sin(yaw), 0, np.cos(yaw)]])
R_pitch = np.array([[1, 0, 0], [0, np.cos(pitch), -np.sin(pitch)], [0, np.sin(pitch), np.cos(pitch)]])
R = np.dot(R_pitch, R_yaw)
rotated_points = np.dot(points_3d, R.T)
projected_points = []
for p in rotated_points:
x, y, z = p
scale_factor = focal_length / (focal_length + z) if (focal_length + z) != 0 else focal_length
projected_points.append((x * scale_factor, y * scale_factor))
projected_points = np.float32(projected_points)
projected_points[:, 0] += w / 2
projected_points[:, 1] += h / 2
return projected_points
@classmethod
def INPUT_TYPES(cls):
# Los inputs no cambian.
return {
"required": {
"width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 8}),
"height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 8}),
"num_frames": ("INT", {"default": 73, "min": 1, "max": 1000, "label": "Duración (frames)"}),
"rows": ("INT", {"default": 1, "min": 1, "max": 50, "label": "Filas"}),
"columns": ("INT", {"default": 1, "min": 1, "max": 50, "label": "Columnas"}),
"square_size": ("INT", {"default": 200, "min": 10, "max": 1024, "label": "Tamaño del lado"}),
"spacing": ("INT", {"default": 24, "min": 0, "max": 1024, "step": 1, "label": "Espaciado"}),
"line_thickness": ("INT", {"default": 4, "min": 1, "max": 50, "label": "Grosor de línea"}),
"color": ("STRING", {"default": "#FF0033", "label": "Color (Hex)"}),
"focal_length": ("INT", {"default": 500, "min": 50, "max": 5000, "step": 10, "label": "Focal Length (Perspective)"}),
"start_yaw": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 1.0, "label": "Inicio Yaw (grados)"}),
"end_yaw": ("FLOAT", {"default": 45.0, "min": -180.0, "max": 180.0, "step": 1.0, "label": "Fin Yaw (grados)"}),
"start_pitch": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 1.0, "label": "Inicio Pitch (grados)"}),
"end_pitch": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 1.0, "label": "Fin Pitch (grados)"}),
"start_zoom": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0, "step": 0.05}),
"end_zoom": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0, "step": 0.05}),
}
}
def generate_animation(self, width, height, num_frames, rows, columns, square_size, spacing, line_thickness, color, focal_length, start_yaw, end_yaw, start_pitch, end_pitch, start_zoom, end_zoom):
# --- 1. Crear un GRAN Lienzo de Trabajo para evitar el recorte ---
# Usamos un factor de 2, que debería ser suficiente para rotaciones extremas.
canvas_size = max(width, height) * 2
# Calculamos el tamaño de la cuadrícula que dibujaremos
grid_w = (columns * square_size) + max(0, columns - 1) * spacing
grid_h = (rows * square_size) + max(0, rows - 1) * spacing
# Dibujamos la cuadrícula EN EL CENTRO del gran lienzo de trabajo
grid_canvas_pil = Image.new('RGBA', (canvas_size, canvas_size), (0, 0, 0, 0))
draw = ImageDraw.Draw(grid_canvas_pil)
start_x = (canvas_size - grid_w) // 2
start_y = (canvas_size - grid_h) // 2
for r in range(rows):
for c in range(columns):
x0, y0 = start_x + c * (square_size + spacing), start_y + r * (square_size + spacing)
draw.rectangle([x0, y0, x0 + square_size, y0 + square_size], outline=color, width=line_thickness)
grid_canvas_cv = cv2.cvtColor(np.array(grid_canvas_pil), cv2.COLOR_RGBA2BGRA)
# --- 2. Preparar Puntos para la Transformación 3D ---
# Los puntos 3D siguen siendo del tamaño de la cuadrícula, no del lienzo
points_3d = np.float32([[-grid_w/2, -grid_h/2, 0], [grid_w/2, -grid_h/2, 0], [grid_w/2, grid_h/2, 0], [-grid_w/2, grid_h/2, 0]])
# Los puntos de origen son las esquinas del DIBUJO en el lienzo grande
src_pts = np.float32([[start_x, start_y], [start_x + grid_w, start_y], [start_x + grid_w, start_y + grid_h], [start_x, start_y + grid_h]])
output_frames = []
for i in range(num_frames):
progress = i / (num_frames - 1) if num_frames > 1 else 0.0
current_yaw = start_yaw + (end_yaw - start_yaw) * progress
current_pitch = start_pitch + (end_pitch - start_pitch) * progress
current_zoom = start_zoom + (end_zoom - start_zoom) * progress
# Proyectamos los puntos 3D en nuestro lienzo grande
dst_pts = self._project_3d_to_2d(points_3d, current_yaw, current_pitch, focal_length, (canvas_size, canvas_size))
# Realizamos la transformación DENTRO del lienzo grande
matrix = cv2.getPerspectiveTransform(src_pts, dst_pts)
transformed_cv = cv2.warpPerspective(grid_canvas_cv, matrix, (canvas_size, canvas_size), flags=cv2.INTER_LANCZOS4)
# --- 3. Recorte Automático, Zoom y Composición Final ---
# Encontramos el contenido no transparente en el lienzo grande
alpha_channel = transformed_cv[:, :, 3]
contours, _ = cv2.findContours(alpha_channel, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
final_pil = None
if contours:
# Unimos todos los contornos para obtener un bounding box general
all_points = np.concatenate(contours)
x, y, w, h = cv2.boundingRect(all_points)
# Recortamos la imagen transformada al contenido exacto
cropped_cv = transformed_cv[y:y+h, x:x+w]
# Aplicamos el zoom
zoomed_w, zoomed_h = int(w * current_zoom), int(h * current_zoom)
if zoomed_w > 0 and zoomed_h > 0:
zoomed_cv = cv2.resize(cropped_cv, (zoomed_w, zoomed_h), interpolation=cv2.INTER_LANCZOS4)
final_pil = Image.fromarray(cv2.cvtColor(zoomed_cv, cv2.COLOR_BGRA2RGBA))
# Pegamos la imagen final (si existe) en el lienzo de salida del usuario
final_frame = Image.new('RGB', (width, height), 'white')
if final_pil:
paste_x = (width - final_pil.width) // 2
paste_y = (height - final_pil.height) // 2
final_frame.paste(final_pil, (paste_x, paste_y), final_pil)
np_frame = np.array(final_frame).astype(np.float32) / 255.0
output_frames.append(np_frame)
frames_np = np.stack(output_frames, axis=0)
frames_tensor = torch.from_numpy(frames_np)
return (frames_tensor,)
NODE_CLASS_MAPPINGS = {"GridAnimator": GridAnimator}
NODE_DISPLAY_NAME_MAPPINGS = {"GridAnimator": "Grid Animator 3D 🔳"} |