Update model.py
Browse files
model.py
CHANGED
|
@@ -1,24 +1,21 @@
|
|
|
|
|
| 1 |
import torch
|
| 2 |
from diffusers import ShapEPipeline
|
| 3 |
from diffusers.utils import export_to_gif
|
| 4 |
|
| 5 |
-
#
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
guidance_scale
|
| 11 |
-
|
| 12 |
-
prompt
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
size=256, # Image size for the model
|
| 20 |
-
).images
|
| 21 |
-
|
| 22 |
-
# Export images to GIF format
|
| 23 |
-
gif_path = export_to_gif(images, "shark_3d.gif")
|
| 24 |
-
print(f"GIF saved at {gif_path}")
|
|
|
|
| 1 |
+
# model.py
|
| 2 |
import torch
|
| 3 |
from diffusers import ShapEPipeline
|
| 4 |
from diffusers.utils import export_to_gif
|
| 5 |
|
| 6 |
+
# Load pipeline once to avoid reloading with each request
|
| 7 |
+
def load_pipeline():
|
| 8 |
+
ckpt_id = "openai/shap-e"
|
| 9 |
+
pipe = ShapEPipeline.from_pretrained(ckpt_id, torch_dtype=torch.float32, trust_remote_code=True).to("cpu")
|
| 10 |
+
return pipe
|
| 11 |
|
| 12 |
+
# Generate images and export to GIF
|
| 13 |
+
def generate_3d_gif(pipe, prompt, guidance_scale=10.0, num_inference_steps=32, size=256):
|
| 14 |
+
images = pipe(
|
| 15 |
+
prompt=prompt,
|
| 16 |
+
guidance_scale=guidance_scale,
|
| 17 |
+
num_inference_steps=num_inference_steps,
|
| 18 |
+
size=size
|
| 19 |
+
).images
|
| 20 |
+
gif_path = export_to_gif(images, "generated_3d.gif")
|
| 21 |
+
return gif_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|