Apple commited on
Commit
b498278
·
1 Parent(s): c02daf6

Initial CADFusion Space with Gradio

Browse files
Files changed (3) hide show
  1. README.md +5 -13
  2. app.py +22 -49
  3. requirements.txt +3 -7
README.md CHANGED
@@ -1,14 +1,6 @@
1
- ---
2
- title: CADFusionInterfaceProvided
3
- emoji: 📚
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 5.44.0
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- short_description: Inference for Cad Fusion Model
12
- ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
1
+ # CADFusion Hugging Face Space
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ This Space runs [Microsoft’s CADFusion](https://github.com/microsoft/CADFusion) on Hugging Face.
4
+ It loads the pretrained checkpoint `microsoft/CADFusion` and wraps it with a Gradio UI.
5
+
6
+ ⚠️ **Note:** The full CADFusion model is large and GPU is required. A100 is recommended.
app.py CHANGED
@@ -1,61 +1,34 @@
1
- # app.py
2
  import gradio as gr
3
  import torch
4
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
 
6
- MODEL_ID = "microsoft/CADFusion"
 
 
7
 
8
- def load_model():
9
- print("Loading tokenizer...")
10
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=True)
11
- print("Trying to load model in 4-bit (bitsandbytes)...")
12
- try:
13
- bnb_config = BitsAndBytesConfig(
14
- load_in_4bit=True,
15
- bnb_4bit_quant_type="nf4",
16
- bnb_4bit_use_double_quant=True,
17
- bnb_4bit_compute_dtype=torch.float16,
18
- )
19
- model = AutoModelForCausalLM.from_pretrained(
20
- MODEL_ID,
21
- quantization_config=bnb_config,
22
- device_map="auto",
23
- trust_remote_code=True,
24
- )
25
- print("Loaded in 4-bit")
26
- except Exception as e:
27
- print("4-bit load failed:", e)
28
- print("Falling back to fp16 (may require larger GPU RAM)...")
29
- model = AutoModelForCausalLM.from_pretrained(
30
- MODEL_ID,
31
- device_map="auto",
32
- torch_dtype=torch.float16,
33
- trust_remote_code=True,
34
- )
35
-
36
- model.eval()
37
- return tokenizer, model
38
 
39
- tokenizer, model = load_model()
40
-
41
- def generate(prompt, max_new_tokens=256):
42
- if prompt is None or prompt.strip() == "":
43
- return "Please provide a text description of the CAD model."
44
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
45
  with torch.no_grad():
46
- out = model.generate(**inputs, max_new_tokens=int(max_new_tokens), do_sample=False)
47
- text = tokenizer.decode(out[0], skip_special_tokens=True)
48
- return text
 
 
 
 
49
 
 
50
  with gr.Blocks() as demo:
51
- gr.Markdown("# CADFusion demo (microsoft/CADFusion)\nEnter a design description and hit Generate.")
52
- with gr.Row():
53
- prompt = gr.Textbox(lines=5, placeholder="e.g. 'a coffee mug with cylindrical body and curved handle'")
54
- tokens = gr.Slider(64, 1024, value=256, label="max_new_tokens")
55
- out = gr.Textbox(lines=20)
56
  btn = gr.Button("Generate")
57
- btn.click(fn=generate, inputs=[prompt, tokens], outputs=out)
58
 
59
  if __name__ == "__main__":
60
  demo.launch()
61
-
 
 
1
  import gradio as gr
2
  import torch
3
+ from cadfusion.models import CADFusionModel
4
 
5
+ # Load model (from HF weights + GitHub code)
6
+ print("Loading CADFusion model...")
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
+ model = CADFusionModel.from_pretrained("microsoft/CADFusion")
10
+ model = model.to(device)
11
+ model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ def generate(prompt):
14
+ """Run CADFusion inference on user prompt"""
 
 
 
 
15
  with torch.no_grad():
16
+ output = model.generate(
17
+ prompt,
18
+ max_new_tokens=256,
19
+ temperature=0.7,
20
+ top_p=0.9,
21
+ )
22
+ return output
23
 
24
+ # Gradio UI
25
  with gr.Blocks() as demo:
26
+ gr.Markdown("## 🏗️ CADFusion Demo\nEnter a CAD prompt below:")
27
+ inp = gr.Textbox(label="Your CAD prompt")
28
+ out = gr.Textbox(label="Model Output")
29
+
 
30
  btn = gr.Button("Generate")
31
+ btn.click(fn=generate, inputs=inp, outputs=out)
32
 
33
  if __name__ == "__main__":
34
  demo.launch()
 
requirements.txt CHANGED
@@ -1,9 +1,5 @@
1
- gradio>=3.30
2
- transformers>=4.32
3
- bitsandbytes
4
  torch
 
5
  accelerate
6
- safetensors
7
- sentencepiece
8
- huggingface-hub
9
-
 
 
 
 
1
  torch
2
+ transformers
3
  accelerate
4
+ gradio
5
+ git+https://github.com/microsoft/CADFusion.git