halilcelik commited on
Commit
c266148
·
verified ·
1 Parent(s): 1264f9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -16
app.py CHANGED
@@ -3,29 +3,44 @@ import torch
3
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
4
  from diffusers.utils import export_to_video
5
  import uuid
 
6
 
7
- # CPU Dostu Model
8
- model_id = "vdo/zeroscope_v2_576w"
9
 
10
- def load_pipeline():
11
- pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
12
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
13
- return pipe
14
 
15
- # Pipeline'ı globalde tanımlıyoruz
16
- pipe = load_pipeline()
 
 
 
17
 
18
  def generate_video(prompt):
19
- # CPU için en hafif ayarlar
20
- video_frames = pipe(prompt, num_inference_steps=10, height=320, width=576, num_frames=16).frames
21
- video_path = f"video_{uuid.uuid4()}.mp4"
22
- export_to_video(video_frames[0], video_path)
23
- return video_path
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- # n8n ile %100 uyumlu API ismi
26
  demo = gr.Interface(
27
- fn=generate_video,
28
- inputs=gr.Textbox(label="Prompt"),
29
  outputs=gr.Video(label="Result"),
30
  api_name="predict"
31
  )
 
3
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
4
  from diffusers.utils import export_to_video
5
  import uuid
6
+ import os
7
 
8
+ # CPU için en hafif ve uyumlu model
9
+ model_id = "cerspense/zeroscope_v2_576w"
10
 
11
+ # Cihazı CPU olarak zorla
12
+ device = "cpu"
 
 
13
 
14
+ print("Model yukleniyor patron, beklemede kal...")
15
+ pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
16
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
17
+ pipe.to(device)
18
+ print("Model yuklendi, motor hazır!")
19
 
20
  def generate_video(prompt):
21
+ # AYARLARI KISTIK:
22
+ # num_inference_steps=3 (Hız için dibe çektik)
23
+ # num_frames=6 (Cok kısa bir 'gif' gibi video)
24
+ # height/width=256 (Cözünürlüğü düşürdük)
25
+ try:
26
+ video_frames = pipe(
27
+ prompt,
28
+ num_inference_steps=3,
29
+ height=256,
30
+ width=256,
31
+ num_frames=6
32
+ ).frames
33
+
34
+ video_path = f"video_{uuid.uuid4()}.mp4"
35
+ export_to_video(video_frames[0], video_path)
36
+ return video_path
37
+ except Exception as e:
38
+ return str(e)
39
 
40
+ # n8n ile direkt predict yolu üzerinden konuşacak
41
  demo = gr.Interface(
42
+ fn=generate_video,
43
+ inputs=gr.Textbox(label="Prompt"),
44
  outputs=gr.Video(label="Result"),
45
  api_name="predict"
46
  )