Spaces:
Build error
Build error
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import copy
|
| 4 |
+
from llama_cpp import Llama
|
| 5 |
+
from huggingface_hub import hf_hub_download
|
| 6 |
+
from trimesh.exchange.gltf import export_glb
|
| 7 |
+
import trimesh
|
| 8 |
+
import numpy as np
|
| 9 |
+
import tempfile
|
| 10 |
+
|
| 11 |
+
# Initialize Llama model from Hugging Face
|
| 12 |
+
model = Llama(
|
| 13 |
+
model_path=hf_hub_download(
|
| 14 |
+
repo_id=os.environ.get("REPO_ID", "Lyte/LLaMA-Mesh-Q4_K_M-GGUF"),
|
| 15 |
+
filename=os.environ.get("MODEL_FILE", "llama-mesh-q4_k_m.gguf"),
|
| 16 |
+
)
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
DESCRIPTION = '''
|
| 20 |
+
<div>
|
| 21 |
+
<h1 style="text-align: center;">LLaMA-Mesh</h1>
|
| 22 |
+
<p>LLaMA-Mesh: Unifying 3D Mesh Generation with Language Models.</p>
|
| 23 |
+
<p>Supports up to 4096 tokens. Run locally for 8k token context.</p>
|
| 24 |
+
<p>To generate another mesh, click "clear" and start a new dialog.</p>
|
| 25 |
+
</div>
|
| 26 |
+
'''
|
| 27 |
+
|
| 28 |
+
LICENSE = """
|
| 29 |
+
<p/>--- Built with Meta Llama 3.1 8B ---
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
PLACEHOLDER = """
|
| 33 |
+
<div style="padding: 30px; text-align: center;">
|
| 34 |
+
<h1 style="font-size: 28px; opacity: 0.55;">LLaMA-Mesh</h1>
|
| 35 |
+
<p style="font-size: 18px; opacity: 0.65;">Create 3D meshes by chatting.</p>
|
| 36 |
+
</div>
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
css = """
|
| 40 |
+
h1 {
|
| 41 |
+
text-align: center;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
#duplicate-button {
|
| 45 |
+
margin: auto;
|
| 46 |
+
color: white;
|
| 47 |
+
background: #1565c0;
|
| 48 |
+
border-radius: 100vh;
|
| 49 |
+
}
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def generate_text(message, history, max_tokens=2048, temperature=0.9, top_p=0.95):
|
| 53 |
+
temp = ""
|
| 54 |
+
response = model.create_chat_completion(
|
| 55 |
+
messages=[{"role": "user", "content": message}],
|
| 56 |
+
temperature=temperature,
|
| 57 |
+
max_tokens=max_tokens,
|
| 58 |
+
top_p=top_p,
|
| 59 |
+
stream=True,
|
| 60 |
+
)
|
| 61 |
+
for streamed in response:
|
| 62 |
+
delta = streamed["choices"][0].get("delta", {})
|
| 63 |
+
print(delta)
|
| 64 |
+
text_chunk = delta.get("content", "")
|
| 65 |
+
|
| 66 |
+
temp += text_chunk
|
| 67 |
+
yield temp
|
| 68 |
+
|
| 69 |
+
def apply_gradient_color(mesh_text):
|
| 70 |
+
temp_file = tempfile.NamedTemporaryFile(suffix=".obj", delete=False).name
|
| 71 |
+
with open(temp_file, "w") as f:
|
| 72 |
+
f.write(mesh_text)
|
| 73 |
+
mesh = trimesh.load_mesh(temp_file, file_type='obj')
|
| 74 |
+
|
| 75 |
+
vertices = mesh.vertices
|
| 76 |
+
y_values = vertices[:, 1]
|
| 77 |
+
y_normalized = (y_values - y_values.min()) / (y_values.max() - y_values.min())
|
| 78 |
+
colors = np.zeros((len(vertices), 4))
|
| 79 |
+
colors[:, 0] = y_normalized
|
| 80 |
+
colors[:, 2] = 1 - y_normalized
|
| 81 |
+
colors[:, 3] = 1.0
|
| 82 |
+
mesh.visual.vertex_colors = colors
|
| 83 |
+
|
| 84 |
+
glb_path = temp_file.replace(".obj", ".glb")
|
| 85 |
+
with open(glb_path, "wb") as f:
|
| 86 |
+
f.write(export_glb(mesh))
|
| 87 |
+
return glb_path
|
| 88 |
+
|
| 89 |
+
with gr.Blocks(css=css) as demo:
|
| 90 |
+
gr.Markdown(DESCRIPTION)
|
| 91 |
+
chatbot = gr.ChatInterface(
|
| 92 |
+
generate_text,
|
| 93 |
+
title="LLaMA-Mesh | GGUF Integration",
|
| 94 |
+
description="Supports generating 3D meshes with LLaMA-GGUF.",
|
| 95 |
+
examples=[
|
| 96 |
+
['Create a 3D model of a wooden hammer'],
|
| 97 |
+
['Create a 3D model of a pyramid in OBJ format'],
|
| 98 |
+
['Create a 3D model of a table.'],
|
| 99 |
+
],
|
| 100 |
+
cache_examples=False,
|
| 101 |
+
additional_inputs=[
|
| 102 |
+
gr.Slider(minimum=128, maximum=4096, value=1024, step=1, label="Max new tokens"),
|
| 103 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 104 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
| 105 |
+
],
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
gr.Markdown("### 3D Mesh Visualization")
|
| 109 |
+
mesh_input = gr.Textbox(
|
| 110 |
+
label="3D Mesh Input",
|
| 111 |
+
placeholder="Paste your 3D mesh in OBJ format here...",
|
| 112 |
+
lines=5,
|
| 113 |
+
)
|
| 114 |
+
visualize_button = gr.Button("Visualize 3D Mesh")
|
| 115 |
+
output_model = gr.Model3D(label="3D Mesh Visualization")
|
| 116 |
+
visualize_button.click(
|
| 117 |
+
fn=apply_gradient_color,
|
| 118 |
+
inputs=[mesh_input],
|
| 119 |
+
outputs=[output_model]
|
| 120 |
+
)
|
| 121 |
+
gr.Markdown(LICENSE)
|
| 122 |
+
|
| 123 |
+
# Launch the demo
|
| 124 |
+
if __name__ == "__main__":
|
| 125 |
+
demo.launch()
|