import torch from diffusers import DiffusionPipeline import gradio as gr from PIL import Image as PILImage import numpy as np from diffusers.utils import export_to_gif # Replace this with your new Hugging Face repository ID huggingface_repo_id = "Nehal721/my-smart-kitchen-video-model" # Model ko load karein (ek baar jab app shuru ho) try: pipe = DiffusionPipeline.from_pretrained(huggingface_repo_id, torch_dtype=torch.float16) if torch.cuda.is_available(): pipe.to("cuda") print("Model loaded on GPU.") else: pipe.to("cpu") print("Model loaded on CPU.") # xformers memory optimization (optional) try: if torch.cuda.is_available(): pipe.enable_xformers_memory_efficient_attention() print("xFormers memory efficient attention enabled.") except Exception as e: print(f"xFormers not enabled: {e}") except Exception as e: print(f"Error loading model: {e}") pipe = None def generate_video_from_prompt(prompt): if pipe is None: return "Error: Model could not be loaded." try: # Video frames generate karein video_frames = pipe(prompt, num_inference_steps=40, height=256, width=448, num_frames=24).frames # Frames ko GIF mein convert karein gif_path = "output.gif" frames_for_gif = [] if isinstance(video_frames, np.ndarray) and video_frames.ndim == 5: video_frames = video_frames.squeeze(0) if isinstance(video_frames, np.ndarray): for frame_np in video_frames: if frame_np.dtype != np.uint8: frame_np = (frame_np * 255).astype(np.uint8) frames_for_gif.append(PILImage.fromarray(frame_np, 'RGB')) if not frames_for_gif: raise ValueError("No valid frames for GIF conversion.") export_to_gif(frames_for_gif, gif_path) return gif_path # GIF file ka path return karein except Exception as e: return f"An error occurred during video generation: {e}" # Gradio app ka interface banayein iface = gr.Interface( fn=generate_video_from_prompt, inputs="text", outputs="image", title="Smart Kitchen Video Generator", description="Text prompts se cooking videos generate karein." ) # App ko run karein iface.launch(share=False)