Spaces:
Paused
Paused
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from multiprocessing import Process, Queue | |
| from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler | |
| from diffusers.utils import export_to_video | |
| import io | |
| from fastapi.responses import StreamingResponse | |
| import uvicorn | |
| import torch | |
| app = FastAPI() | |
| video_pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") | |
| video_pipe.scheduler = DPMSolverMultistepScheduler.from_config(video_pipe.scheduler.config) | |
| video_pipe.enable_model_cpu_offload() | |
| class VideoRequest(BaseModel): | |
| prompt: str | |
| def generate_video_response(request, queue): | |
| try: | |
| video_frames = video_pipe(request.prompt, num_inference_steps=25).frames | |
| video_path = export_to_video(video_frames) | |
| queue.put(video_path) | |
| except Exception as e: | |
| queue.put(f"Error: {str(e)}") | |
| async def generate_video(request: VideoRequest): | |
| queue = Queue() | |
| p = Process(target=generate_video_response, args=(request, queue)) | |
| p.start() | |
| p.join() | |
| response = queue.get() | |
| if "Error" in response: | |
| raise HTTPException(status_code=500, detail=response) | |
| return StreamingResponse(io.BytesIO(response), media_type="video/mp4") | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |