import gradio as gr import requests import os from PIL import Image import numpy as np import cv2 import tempfile def process_video(video_path, lora_url, lora_strength, enable_fflf, blend_strength, seed, steps, cfg_scale, width, height, frames): # Download LoRA from URL if lora_url: try: response = requests.get(lora_url) lora_path = os.path.join(tempfile.gettempdir(), "downloaded_lora.safetensors") with open(lora_path, 'wb') as f: f.write(response.content) except Exception as e: return f"Error downloading LoRA: {str(e)}" # Process video with FFLF if enabled if enable_fflf and video_path: try: # Extract last frame cap = cv2.VideoCapture(video_path) total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) cap.set(cv2.CAP_PROP_POS_FRAMES, total_frames - 1) ret, last_frame = cap.read() cap.release() if ret: # Blend with first frame if needed cap = cv2.VideoCapture(video_path) ret, first_frame = cap.read() cap.release() if blend_strength > 0: blended_frame = cv2.addWeighted( first_frame, 1 - blend_strength, last_frame, blend_strength, 0 ) last_frame = blended_frame # Here you would integrate with WAN 2.2 generation # For demo purposes, we'll just return the processed frame return last_frame except Exception as e: return f"Error processing video: {str(e)}" # Placeholder for actual generation return "Generation complete with LoRA strength: " + str(lora_strength) with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown("# WANderFrame - LoRA Linker") with gr.Row(): with gr.Column(): # LoRA Configuration with gr.Group(): gr.Markdown("## LoRA Configuration") lora_url = gr.Textbox(label="LoRA URL", placeholder="https://example.com/lora.safetensors") lora_strength = gr.Slider(0, 1, value=0.7, label="LoRA Strength") # FFLF Configuration with gr.Group(): gr.Markdown("## FFLF Configuration") enable_fflf = gr.Checkbox(label="Enable First Frame Last Frame", value=True) blend_strength = gr.Slider(0, 1, value=0.5, label="Frame Blend Strength") input_video = gr.Video(label="Input Video (for FFLF)") with gr.Column(): # Generation Parameters with gr.Group(): gr.Markdown("## Generation Parameters") seed = gr.Number(label="Seed", value=-1) steps = gr.Number(label="Steps", value=30) cfg_scale = gr.Number(label="CFG Scale", value=7.5) width = gr.Number(label="Width", value=512) height = gr.Number(label="Height", value=512) frames = gr.Number(label="Frames", value=24) # Output output = gr.Textbox(label="Output", interactive=False) generate_btn = gr.Button("Generate", variant="primary") generate_btn.click( fn=process_video, inputs=[input_video, lora_url, lora_strength, enable_fflf, blend_strength, seed, steps, cfg_scale, width, height, frames], outputs=output ) demo.launch()