Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,50 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
-
import
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
from get3d.models import get_model
|
| 9 |
-
from get3d.configs import MODEL_CONFIGS
|
| 10 |
-
|
| 11 |
-
# Configurazione: scegli un modello pretrained (es: 'chair', 'car', 'table')
|
| 12 |
-
MODEL_NAME = 'chair'
|
| 13 |
-
config = MODEL_CONFIGS[MODEL_NAME]
|
| 14 |
|
|
|
|
| 15 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 16 |
-
|
| 17 |
-
model =
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def generate_mesh(prompt: str) -> str:
|
| 22 |
-
"""
|
| 23 |
-
Genera una mesh 3D casuale basata sul prompt (demo non testuale) e la salva come .obj
|
| 24 |
-
"""
|
| 25 |
-
# GET3D non supporta direttamente text-to-3D; qui usiamo un vettore latente casuale
|
| 26 |
-
z = torch.randn(1, config.z_dim, device=device)
|
| 27 |
-
with torch.no_grad():
|
| 28 |
-
verts, faces = model.generate_mesh(z)
|
| 29 |
-
|
| 30 |
-
# Costruisci l'oggetto Trimesh e salvalo
|
| 31 |
-
mesh = trimesh.Trimesh(vertices=verts[0].cpu().numpy(), faces=faces[0].cpu().numpy())
|
| 32 |
-
output_path = f"/mnt/data/mesh_{torch.randint(0,1e6,(1,)).item()}.obj"
|
| 33 |
-
mesh.export(output_path)
|
| 34 |
-
return output_path
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
return history
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
if __name__ == "__main__":
|
| 50 |
-
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Gradio demo per Shap-E (text-to-3D) – Hugging Face Spaces
|
| 3 |
+
Autore: tu
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
import gradio as gr
|
| 8 |
import torch
|
| 9 |
+
from shap_e.diffusion.sample import sample_latents
|
| 10 |
+
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
|
| 11 |
+
from shap_e.models.download import load_model, load_config
|
| 12 |
+
from shap_e.util.notebooks import decode_latent_mesh
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
# ---------- caricamento modelli ----------
|
| 15 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 16 |
+
xm = load_model('transmitter', device=device)
|
| 17 |
+
model = load_model('text300M', device=device)
|
| 18 |
+
diffusion = diffusion_from_config(load_config('diffusion'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
+
# ---------- logica ----------
|
| 21 |
+
def generate(prompt: str,
|
| 22 |
+
guidance: float = 15.0,
|
| 23 |
+
steps: int = 64):
|
| 24 |
+
"""Genera la mesh e restituisce il file .ply scaricabile."""
|
| 25 |
+
latents = sample_latents(
|
| 26 |
+
batch_size=1,
|
| 27 |
+
model=model,
|
| 28 |
+
diffusion=diffusion,
|
| 29 |
+
guidance_scale=guidance,
|
| 30 |
+
model_kwargs=dict(texts=[prompt]),
|
| 31 |
+
progress=True,
|
| 32 |
+
clip_denoised=True,
|
| 33 |
+
use_fp16=True,
|
| 34 |
+
use_karras=True,
|
| 35 |
+
karras_steps=steps,
|
| 36 |
+
sigma_min=1e-3,
|
| 37 |
+
sigma_max=160,
|
| 38 |
+
s_churn=0,
|
| 39 |
+
)
|
| 40 |
|
| 41 |
+
t = decode_latent_mesh(xm, latents[0]).tri_mesh()
|
| 42 |
+
out_path = "output.ply"
|
| 43 |
+
with open(out_path, "wb") as f:
|
| 44 |
+
t.write_ply(f)
|
| 45 |
+
return out_path
|
|
|
|
| 46 |
|
| 47 |
+
# ---------- interfaccia Gradio ----------
|
| 48 |
+
iface = gr.Interface(
|
| 49 |
+
fn=generate,
|
| 50 |
+
inputs=[
|
| 51 |
+
gr.Textbox(label="Prompt"),
|
| 52 |
+
gr.Slider(1, 30, value=15, label="Guidance scale"),
|
| 53 |
+
gr.Slider(32, 128, value=64, step=16, label="Karras steps")
|
| 54 |
+
],
|
| 55 |
+
outputs=gr.File(label="Scarica mesh .ply"),
|
| 56 |
+
title="Shap-E Text-to-3D",
|
| 57 |
+
description="Genera una mesh 3D da una descrizione testuale con Shap-E.",
|
| 58 |
+
examples=[["a high–quality red sports car"],
|
| 59 |
+
["a cute low-poly cat"]],
|
| 60 |
+
cache_examples=False # vogliamo sempre generare
|
| 61 |
+
)
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
+
iface.launch()
|