Spaces:
Running
Running
| import gradio as gr | |
| from PIL import Image | |
| import time | |
| import torch | |
| from diffusers import DiffusionPipeline | |
| import tempfile | |
| import os | |
| from moviepy import VideoFileClip, concatenate_videoclips | |
| import shutil | |
| # Cargar modelo m谩s ligero de Hugging Face (Zeroscope) | |
| pipe = DiffusionPipeline.from_pretrained( | |
| "cerspense/zeroscope_v2_576w", | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| variant="fp16" if torch.cuda.is_available() else None | |
| ).to("cuda" if torch.cuda.is_available() else "cpu") | |
| # Historial de prompts y carpeta de salida | |
| global_prompt_history = [] | |
| output_dir = "./videos_guardados" | |
| os.makedirs(output_dir, exist_ok=True) | |
| def generar_video(prompt, imagen, duracion, reiniciar_historial): | |
| global global_prompt_history | |
| if reiniciar_historial: | |
| global_prompt_history = [] | |
| if prompt: | |
| global_prompt_history.append(prompt) | |
| texto_completo = ", ".join(global_prompt_history) | |
| print(f"Prompt combinado: {texto_completo}") | |
| segundos_por_clip = 2 | |
| max_clips = min(duracion // segundos_por_clip, 5) | |
| video_paths = [] | |
| for i in range(max_clips): | |
| result = pipe(prompt=texto_completo) | |
| video = result["videos"][0] | |
| temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name | |
| with open(temp_path, "wb") as f: | |
| f.write(video) | |
| video_paths.append(temp_path) | |
| clips = [VideoFileClip(path) for path in video_paths] | |
| final_clip = concatenate_videoclips(clips) | |
| final_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name | |
| final_clip.write_videofile(final_path, codec="libx264", audio=False, verbose=False, logger=None) | |
| nombre_archivo = f"video_{int(time.time())}.mp4" | |
| ruta_guardado = os.path.join(output_dir, nombre_archivo) | |
| shutil.copy(final_path, ruta_guardado) | |
| for clip in clips: | |
| clip.close() | |
| for path in video_paths: | |
| os.remove(path) | |
| return final_path | |
| demo = gr.Interface( | |
| fn=generar_video, | |
| inputs=[ | |
| gr.Textbox(label="Prompt (Texto de la animaci贸n)", placeholder="Ej: Gato bailando en la luna"), | |
| gr.Image(type="pil", label="Imagen de referencia (opcional)"), | |
| gr.Slider(minimum=1, maximum=180, value=5, label="Duraci贸n del video (segundos)"), | |
| gr.Checkbox(label="Reiniciar historial de prompts", value=False) | |
| ], | |
| outputs=gr.Video(label="Video generado"), | |
| title="VideoAnimador AI", | |
| description="Genera un video animado usando texto, imagen o ambos. Puedes seguir agregando prompts para extender la historia o reiniciar. Los videos se guardan localmente en /videos_guardados." | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |