Csplk commited on
Commit
1ba6b3f
·
1 Parent(s): e8bb082

Testing gnerative daggr script output

Browse files
Files changed (4) hide show
  1. app.py +59 -65
  2. daggr_gen/daggr_gen.py +615 -0
  3. daggr_gen/sam_3d_obj.py +69 -0
  4. misc/s3do.py +223 -0
app.py CHANGED
@@ -1,75 +1,69 @@
1
- from daggr import FnNode, GradioNode, InferenceNode, Graph
2
- from daggr.state import get_daggr_files_dir
 
 
 
 
3
 
 
4
  import gradio as gr
5
- import numpy as np
6
- from PIL import Image
7
- from typing import Any
8
- import uuid
9
 
10
- def downscale_image_to_file(image: Any, scale: float = 0.25) -> str | None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- pil_img = Image.open(image)
13
- scale_f = max(0.05, min(1.0, float(scale)))
14
- w, h = pil_img.size
15
- new_w = max(1, int(w * scale_f))
16
- new_h = max(1, int(h * scale_f))
17
- resized = pil_img.resize((new_w, new_h), resample=Image.LANCZOS)
18
- out_path = get_daggr_files_dir() / f"{uuid.uuid4()}.png"
19
- resized.save(out_path)
20
- return str(out_path)
21
 
22
- background_remover = GradioNode(
23
- "merve/background-removal",
24
- api_name="/image",
25
- run_locally=True,
26
- inputs={
27
- "image": gr.Image(),
28
- },
29
- outputs={
30
- "original_image": None,
31
- "final_image": gr.Image(
32
- label="Final Image"
33
- ),
34
- },
35
- )
36
-
37
- downscaler = FnNode(
38
- downscale_image_to_file,
39
- name="Downscale image for Inference",
40
- inputs={
41
- "image": background_remover.final_image,
42
- "scale": gr.Slider(
43
- label="Downscale factor",
44
- minimum=0.25,
45
- maximum=0.75,
46
- step=0.05,
47
- value=0.25,
48
- ),
49
- },
50
- outputs={
51
- "image": gr.Image(label="Downscaled Image", type="filepath"),
52
- },
53
- )
54
 
55
- trellis_3d = GradioNode(
56
- "microsoft/TRELLIS.2",
57
- api_name="/image_to_3d",
58
- inputs={
59
- "image": downscaler.image,
60
- "ss_guidance_strength": 7.5,
61
- "ss_sampling_steps": 12,
62
- },
63
- outputs={
64
- "glb": gr.HTML(label="3D Asset (GLB preview)"),
65
- },
66
- )
67
 
68
- graph = Graph(
69
- name="Image to 3D Asset Pipeline",
70
- nodes=[background_remover, downscaler, trellis_3d],
71
  )
72
 
73
-
74
  if __name__ == "__main__":
75
- graph.launch()
 
 
 
 
 
 
 
1
+ '''
2
+ Auto-generated Daggr Node
3
+ Space: HorizonRobotics/EmbodiedGen-Image-to-3D
4
+ API: /image_to_3d
5
+ Endpoints available: /start_session, /lambda, /lambda_1, /preprocess_image_fn, /lambda_2...
6
+ '''
7
 
8
+ from daggr import GradioNode
9
  import gradio as gr
10
+ from daggr import Graph
 
 
 
11
 
12
+ # === WIRING GUIDE for embodied_gen_image_to_3_d ===
13
+ # Inputs (what this node expects):
14
+ # - image: filepath
15
+ # Wire: embodied_gen_image_to_3_d.inputs['image'] = upstream_node.image
16
+ # - seed: float
17
+ # Wire: embodied_gen_image_to_3_d.inputs['seed'] = upstream_node.seed
18
+ # - ss_sampling_steps: float
19
+ # Wire: embodied_gen_image_to_3_d.inputs['ss_sampling_steps'] = upstream_node.ss_sampling_steps
20
+ # - slat_sampling_steps: float
21
+ # Wire: embodied_gen_image_to_3_d.inputs['slat_sampling_steps'] = upstream_node.slat_sampling_steps
22
+ # - raw_image_cache: filepath
23
+ # Wire: embodied_gen_image_to_3_d.inputs['raw_image_cache'] = upstream_node.raw_image_cache
24
+ # - ss_guidance_strength: float
25
+ # Wire: embodied_gen_image_to_3_d.inputs['ss_guidance_strength'] = upstream_node.ss_guidance_strength
26
+ # - slat_guidance_strength: float
27
+ # Wire: embodied_gen_image_to_3_d.inputs['slat_guidance_strength'] = upstream_node.slat_guidance_strength
28
+ # - sam_image: filepath
29
+ # Wire: embodied_gen_image_to_3_d.inputs['sam_image'] = upstream_node.sam_image
30
+ #
31
+ # Outputs (what this node produces):
32
+ # - generated_3d_asset: filepath
33
+ # Access: embodied_gen_image_to_3_d.generated_3d_asset
34
+ # Usage: next_node.inputs['generated_3d_asset'] = embodied_gen_image_to_3_d.generated_3d_asset
35
+ # ===========================================
36
 
37
+ embodied_gen_image_to_3_d = GradioNode(
38
+ space_or_url="HorizonRobotics/EmbodiedGen-Image-to-3D", # Space ID
39
+ api_name="/image_to_3d", # API endpoint
 
 
 
 
 
 
40
 
41
+ inputs={
42
+ "image": gr.File(label="Input Image") # UI input - connect to upstream node or provide value,
43
+ "seed": gr.Number(label="Seed") # UI input - connect to upstream node or provide value,
44
+ "ss_sampling_steps": gr.Number(label="Sampling Steps") # UI input - connect to upstream node or provide value,
45
+ "slat_sampling_steps": gr.Number(label="Sampling Steps") # UI input - connect to upstream node or provide value,
46
+ "raw_image_cache": gr.File(label="parameter_7") # UI input - connect to upstream node or provide value,
47
+ "ss_guidance_strength": gr.Number(label="Guidance Strength") # UI input - connect to upstream node or provide value,
48
+ "slat_guidance_strength": gr.Number(label="Guidance Strength") # UI input - connect to upstream node or provide value,
49
+ "sam_image": gr.File(label="SAM Seg Image") # UI input - connect to upstream node or provide value,
50
+ },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ outputs={
53
+ "generated_3d_asset": gr.File(label="Generated 3D Asset"), # Display in node card
54
+ # Use None to hide outputs: "hidden_output": None
55
+ },
 
 
 
 
 
 
 
 
56
 
57
+ # Optional: Transform outputs before downstream flow
58
+ # postprocess=lambda outputs, final: final,
 
59
  )
60
 
61
+ # Example usage
62
  if __name__ == "__main__":
63
+ graph = Graph(
64
+ name="EmbodiedGen-Image-to-3D Workflow",
65
+ nodes=[embodied_gen_image_to_3_d]
66
+ )
67
+ graph.launch()
68
+
69
+ # Or run with: daggr this_file.py
daggr_gen/daggr_gen.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HF Space to Daggr Node Generator
3
+ ================================
4
+
5
+ Automatically generate daggr nodes from Hugging Face Gradio Spaces.
6
+ Extensible architecture supporting future InferenceNode and FnNode generators.
7
+
8
+ Usage:
9
+ python daggr_generator.py "username/space-name" [--api-name /predict] [--output node.py]
10
+ """
11
+
12
+ import argparse
13
+ import json
14
+ import re
15
+ import sys
16
+ from abc import ABC, abstractmethod
17
+ from dataclasses import dataclass, field
18
+ from pathlib import Path
19
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
20
+ from urllib.parse import urlparse
21
+
22
+ try:
23
+ from gradio_client import Client, handle_file
24
+ import gradio as gr
25
+ except ImportError:
26
+ print("Error: Required packages not installed.")
27
+ print("Run: pip install gradio gradio-client daggr")
28
+ sys.exit(1)
29
+
30
+
31
+ @dataclass
32
+ class PortSchema:
33
+ """Represents an input or output port schema."""
34
+ name: str
35
+ python_type: str
36
+ component_type: Optional[str] = None
37
+ label: Optional[str] = None
38
+ default: Any = None
39
+ description: Optional[str] = None
40
+ choices: Optional[List] = None
41
+
42
+ def to_gradio_component(self) -> str:
43
+ """Convert to gradio component code string."""
44
+ type_mapping = {
45
+ "str": "gr.Textbox",
46
+ "int": "gr.Number",
47
+ "float": "gr.Number",
48
+ "bool": "gr.Checkbox",
49
+ "filepath": "gr.File",
50
+ "file": "gr.File",
51
+ "image": "gr.Image",
52
+ "audio": "gr.Audio",
53
+ "video": "gr.Video",
54
+ "dict": "gr.JSON",
55
+ "list": "gr.JSON",
56
+ "dataframe": "gr.Dataframe",
57
+ "model3d": "gr.Model3D",
58
+ "downloadbutton": "gr.File", # Maps to File for download
59
+ "annotatedimage": "gr.AnnotatedImage",
60
+ }
61
+
62
+ comp_base = type_mapping.get(self.python_type, "gr.Textbox")
63
+ params = []
64
+
65
+ if self.label:
66
+ params.append(f'label="{self.label}"')
67
+ if self.default is not None and self.default != "":
68
+ if isinstance(self.default, str):
69
+ params.append(f'value="{self.default}"')
70
+ else:
71
+ params.append(f'value={self.default}')
72
+ if self.choices:
73
+ params.append(f'choices={self.choices}')
74
+
75
+ if comp_base == "gr.Textbox" and self.python_type == "str":
76
+ if len(str(self.default or "")) > 50:
77
+ params.append("lines=3")
78
+
79
+ return f"{comp_base}({', '.join(params)})" if params else comp_base
80
+
81
+
82
+ @dataclass
83
+ class APIEndpoint:
84
+ """Represents a Gradio Space API endpoint."""
85
+ name: str
86
+ route: str
87
+ inputs: List[PortSchema] = field(default_factory=list)
88
+ outputs: List[PortSchema] = field(default_factory=list)
89
+ description: Optional[str] = None
90
+
91
+
92
+ @dataclass
93
+ class NodeTemplate:
94
+ """Generated node configuration."""
95
+ space_id: str
96
+ endpoint: APIEndpoint
97
+ all_endpoints: List[str]
98
+ imports: List[str]
99
+ node_code: str
100
+ wiring_placeholders: List[str]
101
+ metadata: Dict = field(default_factory=dict)
102
+
103
+
104
+ class NodeGenerator(ABC):
105
+ """Abstract base class for daggr node generators."""
106
+
107
+ @abstractmethod
108
+ def can_handle(self, space_info: Dict) -> bool:
109
+ """Check if this generator can handle the given space."""
110
+ pass
111
+
112
+ @abstractmethod
113
+ def generate(self, *args, **kwargs) -> NodeTemplate:
114
+ """Generate node template."""
115
+ pass
116
+
117
+
118
+ class GradioNodeGenerator(NodeGenerator):
119
+ """
120
+ Generator for daggr.GradioNode from HF Space URLs.
121
+ Introspects API and maps components correctly.
122
+ """
123
+
124
+ # Mapping of gradio component types to Python types
125
+ COMPONENT_TYPE_MAP = {
126
+ "textbox": "str",
127
+ "number": "float",
128
+ "slider": "float",
129
+ "checkbox": "bool",
130
+ "checkboxgroup": "list",
131
+ "radio": "str",
132
+ "dropdown": "str",
133
+ "image": "filepath",
134
+ "file": "filepath",
135
+ "audio": "filepath",
136
+ "video": "filepath",
137
+ "dataframe": "dataframe",
138
+ "json": "dict",
139
+ "gallery": "list",
140
+ "chatbot": "list",
141
+ "code": "str",
142
+ "colorpicker": "str",
143
+ "timer": "float",
144
+ "model3d": "model3d",
145
+ "downloadbutton": "filepath",
146
+ "annotatedimage": "annotatedimage",
147
+ }
148
+
149
+ def can_handle(self, space_info: Dict) -> bool:
150
+ """Check if space has Gradio API."""
151
+ return space_info.get("sdk") == "gradio"
152
+
153
+ def _extract_space_id(self, url_or_id: str) -> str:
154
+ """Extract space ID from URL or return as-is if already ID."""
155
+ if url_or_id.startswith("http"):
156
+ parsed = urlparse(url_or_id)
157
+ # Handle https://huggingface.co/spaces/username/space-name
158
+ if "huggingface.co" in parsed.netloc:
159
+ path_parts = parsed.path.strip("/").split("/")
160
+ if len(path_parts) >= 3 and path_parts[0] == "spaces":
161
+ return "/".join(path_parts[1:3])
162
+ # Handle direct space URL
163
+ return parsed.path.strip("/").split("/")[0]
164
+ return url_or_id
165
+
166
+ def _normalize_type(self, type_val) -> str:
167
+ """Normalize Python type from API (handles both strings and dicts)."""
168
+ if type_val is None:
169
+ return "str"
170
+ if isinstance(type_val, str):
171
+ return type_val.lower()
172
+ if isinstance(type_val, dict):
173
+ # Handle complex types like {"type": "union", ...}
174
+ # For now, default to str if complex
175
+ if "type" in type_val:
176
+ if type_val["type"] == "filepath":
177
+ return "filepath"
178
+ elif type_val["type"] == "integer":
179
+ return "int"
180
+ elif type_val["type"] == "float":
181
+ return "float"
182
+ elif type_val["type"] == "boolean":
183
+ return "bool"
184
+ return "str"
185
+ return "str"
186
+
187
+ def _parse_type_info(self, param: Dict) -> Tuple[str, str]:
188
+ """Extract python_type and handle Union/Optional types."""
189
+ raw_type = param.get("python_type")
190
+ python_type = self._normalize_type(raw_type)
191
+
192
+ # Check if it's optional (Union with None)
193
+ if isinstance(raw_type, dict) and raw_type.get("type") == "union":
194
+ # Try to find non-null type
195
+ choices = raw_type.get("choices", [])
196
+ non_none = [c for c in choices if self._normalize_type(c) != "none"]
197
+ if non_none:
198
+ python_type = self._normalize_type(non_none[0])
199
+
200
+ return python_type
201
+
202
+ def _inspect_endpoints(self, client: Client) -> List[APIEndpoint]:
203
+ """Extract all API endpoints from Gradio Client."""
204
+ endpoints = []
205
+
206
+ # Get API info from client
207
+ api_info = client.view_api(return_format="dict")
208
+
209
+ if not api_info or "named_endpoints" not in api_info:
210
+ return endpoints
211
+
212
+ for route, info in api_info["named_endpoints"].items():
213
+ endpoint = APIEndpoint(
214
+ name=info.get("fn", route),
215
+ route=route,
216
+ description=info.get("description", "")
217
+ )
218
+
219
+ # Parse inputs
220
+ for param in info.get("parameters", []):
221
+ comp_type = self._detect_component_type(param)
222
+ python_type = self._parse_type_info(param)
223
+
224
+ port = PortSchema(
225
+ name=param.get("parameter_name", "input"),
226
+ python_type=self.COMPONENT_TYPE_MAP.get(comp_type, python_type),
227
+ component_type=comp_type,
228
+ label=param.get("label"),
229
+ default=param.get("default"),
230
+ description=param.get("description"),
231
+ choices=param.get("choices")
232
+ )
233
+ endpoint.inputs.append(port)
234
+
235
+ # Parse outputs
236
+ returns = info.get("returns", [])
237
+ for i, ret in enumerate(returns):
238
+ comp_type = self._detect_component_type(ret)
239
+ python_type = self._parse_type_info(ret)
240
+
241
+ # Try to get a meaningful name
242
+ ret_name = ret.get("label", "")
243
+ if not ret_name:
244
+ if len(returns) == 1:
245
+ ret_name = "result"
246
+ else:
247
+ ret_name = f"output_{i}"
248
+ # Clean name for Python attribute
249
+ ret_name = re.sub(r'[^a-zA-Z0-9_]', '_', ret_name).lower()
250
+ if ret_name[0].isdigit():
251
+ ret_name = "out_" + ret_name
252
+
253
+ port = PortSchema(
254
+ name=ret_name,
255
+ python_type=self.COMPONENT_TYPE_MAP.get(comp_type, python_type),
256
+ component_type=comp_type,
257
+ label=ret.get("label", f"Output {i+1}"),
258
+ description=ret.get("description")
259
+ )
260
+ endpoint.outputs.append(port)
261
+
262
+ endpoints.append(endpoint)
263
+
264
+ return endpoints
265
+
266
+ def _detect_component_type(self, param: Dict) -> str:
267
+ """Detect Gradio component type from parameter info."""
268
+ label = (param.get("label", "") or "").lower()
269
+
270
+ # Check explicit component field first
271
+ component = param.get("component", "")
272
+ if component and isinstance(component, str):
273
+ return component.lower()
274
+
275
+ # Check for file paths based on label
276
+ if "path" in label or "file" in label:
277
+ if "image" in label:
278
+ return "image"
279
+ elif "audio" in label:
280
+ return "audio"
281
+ elif "video" in label:
282
+ return "video"
283
+ elif "3d" in label or "model" in label or "mesh" in label:
284
+ return "model3d"
285
+ return "file"
286
+
287
+ # Check python_type for hints
288
+ python_type = self._parse_type_info(param)
289
+ if "image" in python_type or "pil" in python_type:
290
+ return "image"
291
+ elif "dataframe" in python_type:
292
+ return "dataframe"
293
+ elif "filepath" in python_type:
294
+ if "image" in label:
295
+ return "image"
296
+ return "file"
297
+
298
+ return "textbox"
299
+
300
+ def generate(
301
+ self,
302
+ space_url: str,
303
+ api_name: Optional[str] = None,
304
+ node_name: Optional[str] = None
305
+ ) -> NodeTemplate:
306
+ """
307
+ Generate GradioNode template from space URL.
308
+
309
+ Args:
310
+ space_url: HF Space URL or ID (e.g., 'black-forest-labs/FLUX.1-schnell')
311
+ api_name: Specific API endpoint to use (auto-selected if None)
312
+ node_name: Custom variable name for the node (auto-generated if None)
313
+ """
314
+ space_id = self._extract_space_id(space_url)
315
+ var_name = node_name or self._to_snake_case(space_id.split("/")[-1])
316
+
317
+ # Connect and inspect
318
+ print(f"🔍 Inspecting space: {space_id}")
319
+ client = Client(space_id)
320
+
321
+ endpoints = self._inspect_endpoints(client)
322
+ if not endpoints:
323
+ raise ValueError(f"No API endpoints found for space: {space_id}")
324
+
325
+ # Select endpoint
326
+ if api_name:
327
+ selected = next((e for e in endpoints if e.route == api_name), None)
328
+ if not selected:
329
+ available = ", ".join([e.route for e in endpoints])
330
+ raise ValueError(f"API endpoint '{api_name}' not found. Available: {available}")
331
+ else:
332
+ # Find best endpoint (one with inputs and outputs, not lambda)
333
+ candidates = [e for e in endpoints if e.inputs or e.outputs]
334
+ candidates = [e for e in candidates if not e.route.startswith("/lambda")]
335
+ selected = candidates[0] if candidates else endpoints[0]
336
+
337
+ print(f"✓ Found {len(endpoints)} endpoint(s), using: {selected.route}")
338
+ if selected.inputs:
339
+ print(f" Inputs: {len(selected.inputs)} ({', '.join([i.name for i in selected.inputs[:3]])}{'...' if len(selected.inputs) > 3 else ''})")
340
+ if selected.outputs:
341
+ print(f" Outputs: {len(selected.outputs)} ({', '.join([o.name for o in selected.outputs[:3]])}{'...' if len(selected.outputs) > 3 else ''})")
342
+
343
+ # Build wiring placeholders
344
+ wiring = self._generate_wiring_docs(selected, var_name)
345
+
346
+ # Generate code
347
+ code = self._render_node_code(space_id, var_name, selected)
348
+
349
+ return NodeTemplate(
350
+ space_id=space_id,
351
+ endpoint=selected,
352
+ all_endpoints=[e.route for e in endpoints],
353
+ imports=["from daggr import GradioNode", "import gradio as gr"],
354
+ node_code=code,
355
+ wiring_placeholders=wiring,
356
+ metadata={"generator": "GradioNodeGenerator", "client_kwargs": {}}
357
+ )
358
+
359
+ def _to_snake_case(self, name: str) -> str:
360
+ """Convert space name to valid Python variable name."""
361
+ # Remove special chars, convert to snake_case
362
+ clean = re.sub(r'[^a-zA-Z0-9]', '_', name)
363
+ clean = re.sub(r'([A-Z])', r'_\1', clean).lower()
364
+ clean = re.sub(r'_+', '_', clean).strip('_')
365
+ return clean or "node"
366
+
367
+ def _generate_wiring_docs(self, endpoint: APIEndpoint, var_name: str) -> List[str]:
368
+ """Generate documentation for wiring inputs/outputs."""
369
+ docs = []
370
+ docs.append(f"# === WIRING GUIDE for {var_name} ===")
371
+
372
+ if endpoint.inputs:
373
+ docs.append("# Inputs (what this node expects):")
374
+ for inp in endpoint.inputs:
375
+ example = f"upstream_node.{inp.name}" if inp.python_type != "str" else f'"{inp.default or "value"}"'
376
+ default_info = f" [default: {inp.default}]" if inp.default is not None else ""
377
+ docs.append(f"# - {inp.name}: {inp.python_type}{default_info}")
378
+ docs.append(f"# Wire: {var_name}.inputs['{inp.name}'] = {example}")
379
+ else:
380
+ docs.append("# Inputs: None (no parameters required)")
381
+
382
+ if endpoint.outputs:
383
+ docs.append("#")
384
+ docs.append("# Outputs (what this node produces):")
385
+ for out in endpoint.outputs:
386
+ docs.append(f"# - {out.name}: {out.python_type}")
387
+ docs.append(f"# Access: {var_name}.{out.name}")
388
+ docs.append(f"# Usage: next_node.inputs['{out.name}'] = {var_name}.{out.name}")
389
+ else:
390
+ docs.append("# Outputs: None")
391
+
392
+ docs.append("# ===========================================")
393
+ return docs
394
+
395
+ def _render_node_code(self, space_id: str, var_name: str, endpoint: APIEndpoint) -> str:
396
+ """Render the actual Python code for the GradioNode."""
397
+ lines = []
398
+
399
+ # Node definition with docstring
400
+ lines.append(f'{var_name} = GradioNode(')
401
+ lines.append(f' space_or_url="{space_id}", # Space ID')
402
+ lines.append(f' api_name="{endpoint.route}", # API endpoint')
403
+
404
+ if endpoint.description:
405
+ lines.append(f' # Description: {endpoint.description}')
406
+ lines.append(f'')
407
+
408
+ # Inputs section
409
+ if endpoint.inputs:
410
+ lines.append(f' inputs={{')
411
+ for inp in endpoint.inputs:
412
+ # Determine default value representation
413
+ if inp.default is not None:
414
+ if isinstance(inp.default, (int, float, bool)):
415
+ default_val = f"{inp.default} # Fixed value"
416
+ elif isinstance(inp.default, str):
417
+ default_val = f'"{inp.default}" # Fixed value'
418
+ else:
419
+ default_val = f"{inp.default} # Fixed value"
420
+ else:
421
+ # Suggest gradio component for UI input
422
+ comp = inp.to_gradio_component()
423
+ default_val = f"{comp} # UI input - connect to upstream node or provide value"
424
+
425
+ # Clean up multiline descriptions
426
+ comment = ""
427
+ if inp.description:
428
+ desc = inp.description.replace(chr(10), " ")[:50]
429
+ comment = f" # {desc}"
430
+
431
+ lines.append(f' "{inp.name}": {default_val},{comment}')
432
+
433
+ lines.append(f' }},')
434
+ else:
435
+ lines.append(f' inputs={{}}, # No inputs required')
436
+
437
+ lines.append(f'')
438
+
439
+ # Outputs section
440
+ if endpoint.outputs:
441
+ lines.append(f' outputs={{')
442
+ for out in endpoint.outputs:
443
+ comp = out.to_gradio_component()
444
+ lines.append(f' "{out.name}": {comp}, # Display in node card')
445
+ lines.append(f' # Use None to hide outputs: "hidden_output": None')
446
+ lines.append(f' }},')
447
+ else:
448
+ lines.append(f' outputs={{}}, # No outputs')
449
+
450
+ # Optional flags
451
+ lines.append(f'')
452
+ lines.append(f' # Optional: Transform outputs before downstream flow')
453
+ lines.append(f' # postprocess=lambda outputs, final: final,')
454
+
455
+ lines.append(f')')
456
+
457
+ return "\n".join(lines)
458
+
459
+
460
+ class InferenceNodeGenerator(NodeGenerator):
461
+ """
462
+ Future generator for daggr.InferenceNode (HF Inference Providers).
463
+ Placeholder for extension.
464
+ """
465
+
466
+ def can_handle(self, space_info: Dict) -> bool:
467
+ return False
468
+
469
+ def generate(self, model_id: str, **kwargs) -> NodeTemplate:
470
+ raise NotImplementedError("InferenceNode generator coming in next revision")
471
+
472
+
473
+ class FnNodeGenerator(NodeGenerator):
474
+ """
475
+ Future generator for daggr.FnNode (custom Python functions).
476
+ Placeholder for extension.
477
+ """
478
+
479
+ def can_handle(self, space_info: Dict) -> bool:
480
+ return False
481
+
482
+ def generate(self, func: Callable, **kwargs) -> NodeTemplate:
483
+ raise NotImplementedError("FnNode generator coming in next revision")
484
+
485
+
486
+ class DaggrGenerator:
487
+ """
488
+ Main orchestrator for generating daggr workflows.
489
+ Supports multiple node types and provides extensible registry.
490
+ """
491
+
492
+ def __init__(self):
493
+ self.generators: Dict[str, NodeGenerator] = {
494
+ "gradio": GradioNodeGenerator(),
495
+ "inference": InferenceNodeGenerator(),
496
+ "function": FnNodeGenerator(),
497
+ }
498
+
499
+ def generate_from_space(
500
+ self,
501
+ space_url: str,
502
+ output_file: Optional[str] = None,
503
+ api_name: Optional[str] = None,
504
+ node_name: Optional[str] = None,
505
+ include_boilerplate: bool = True
506
+ ) -> str:
507
+ """
508
+ Generate daggr node from HF Space.
509
+
510
+ Args:
511
+ space_url: HF Space URL or ID
512
+ output_file: Optional file to write (prints to stdout if None)
513
+ api_name: Specific API endpoint to use
514
+ node_name: Custom variable name for node
515
+ include_boilerplate: Include imports and example usage
516
+
517
+ Returns:
518
+ Generated Python code as string
519
+ """
520
+ generator = self.generators["gradio"]
521
+
522
+ try:
523
+ template = generator.generate(space_url, api_name, node_name)
524
+ code = self._assemble_code(template, include_boilerplate)
525
+
526
+ if output_file:
527
+ Path(output_file).write_text(code)
528
+ print(f"\nGenerated node written to: {output_file}")
529
+
530
+ return code
531
+
532
+ except Exception as e:
533
+ print(f"\nError generating node: {e}")
534
+ raise
535
+
536
+ def _assemble_code(self, template: NodeTemplate, include_boilerplate: bool) -> str:
537
+ """Assemble final Python script."""
538
+ lines = []
539
+
540
+ if include_boilerplate:
541
+ lines.append("'''")
542
+ lines.append(f'Auto-generated Daggr Node')
543
+ lines.append(f'Space: {template.space_id}')
544
+ lines.append(f'API: {template.endpoint.route}')
545
+ lines.append(f'Endpoints available: {", ".join(template.all_endpoints[:5])}{"..." if len(template.all_endpoints) > 5 else ""}')
546
+ lines.append("'''")
547
+ lines.append('')
548
+ lines.extend(template.imports)
549
+ lines.append('from daggr import Graph')
550
+ lines.append('')
551
+
552
+ # Add wiring documentation
553
+ lines.extend(template.wiring_placeholders)
554
+ lines.append('')
555
+
556
+ # Add the node code
557
+ lines.append(template.node_code)
558
+ lines.append('')
559
+
560
+ if include_boilerplate:
561
+ # Extract variable name from first line of node code
562
+ var_line = template.node_code.split('\n')[0]
563
+ var_name = var_line.split('=')[0].strip()
564
+
565
+ # Add example graph setup
566
+ space_short = template.space_id.split("/")[-1]
567
+ lines.append(f'# Example usage')
568
+ lines.append(f'if __name__ == "__main__":')
569
+ lines.append(f' graph = Graph(')
570
+ lines.append(f' name="{space_short} Workflow",')
571
+ lines.append(f' nodes=[{var_name}]')
572
+ lines.append(f' )')
573
+ lines.append(f' graph.launch()')
574
+ lines.append('')
575
+ lines.append(f' # Or run with: daggr this_file.py')
576
+
577
+ return "\n".join(lines)
578
+
579
+ def register_generator(self, name: str, generator: NodeGenerator):
580
+ """Register a new generator for extensibility."""
581
+ self.generators[name] = generator
582
+ print(f"Registered new generator: {name}")
583
+
584
+
585
+ def main():
586
+ parser = argparse.ArgumentParser(
587
+ description="Generate daggr nodes from Hugging Face Gradio Spaces"
588
+ )
589
+ parser.add_argument("space", help="HF Space URL or ID (e.g., 'user/space-name')")
590
+ parser.add_argument("--api-name", "-a", help="Specific API endpoint (default: first substantial endpoint)")
591
+ parser.add_argument("--output", "-o", help="Output Python file (default: stdout)")
592
+ parser.add_argument("--node-name", "-n", help="Variable name for node (default: auto)")
593
+ parser.add_argument("--no-boilerplate", action="store_true",
594
+ help="Generate only node definition")
595
+
596
+ args = parser.parse_args()
597
+
598
+ generator = DaggrGenerator()
599
+ code = generator.generate_from_space(
600
+ args.space,
601
+ output_file=args.output,
602
+ api_name=args.api_name,
603
+ node_name=args.node_name,
604
+ include_boilerplate=not args.no_boilerplate
605
+ )
606
+
607
+ if not args.output:
608
+ print("\n" + "="*60)
609
+ print("GENERATED DAGGR NODE")
610
+ print("="*60)
611
+ print(code)
612
+
613
+
614
+ if __name__ == "__main__":
615
+ main()
daggr_gen/sam_3d_obj.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Auto-generated Daggr Node
3
+ Space: HorizonRobotics/EmbodiedGen-Image-to-3D
4
+ API: /image_to_3d
5
+ Endpoints available: /start_session, /lambda, /lambda_1, /preprocess_image_fn, /lambda_2...
6
+ '''
7
+
8
+ from daggr import GradioNode
9
+ import gradio as gr
10
+ from daggr import Graph
11
+
12
+ # === WIRING GUIDE for embodied_gen_image_to_3_d ===
13
+ # Inputs (what this node expects):
14
+ # - image: filepath
15
+ # Wire: embodied_gen_image_to_3_d.inputs['image'] = upstream_node.image
16
+ # - seed: float
17
+ # Wire: embodied_gen_image_to_3_d.inputs['seed'] = upstream_node.seed
18
+ # - ss_sampling_steps: float
19
+ # Wire: embodied_gen_image_to_3_d.inputs['ss_sampling_steps'] = upstream_node.ss_sampling_steps
20
+ # - slat_sampling_steps: float
21
+ # Wire: embodied_gen_image_to_3_d.inputs['slat_sampling_steps'] = upstream_node.slat_sampling_steps
22
+ # - raw_image_cache: filepath
23
+ # Wire: embodied_gen_image_to_3_d.inputs['raw_image_cache'] = upstream_node.raw_image_cache
24
+ # - ss_guidance_strength: float
25
+ # Wire: embodied_gen_image_to_3_d.inputs['ss_guidance_strength'] = upstream_node.ss_guidance_strength
26
+ # - slat_guidance_strength: float
27
+ # Wire: embodied_gen_image_to_3_d.inputs['slat_guidance_strength'] = upstream_node.slat_guidance_strength
28
+ # - sam_image: filepath
29
+ # Wire: embodied_gen_image_to_3_d.inputs['sam_image'] = upstream_node.sam_image
30
+ #
31
+ # Outputs (what this node produces):
32
+ # - generated_3d_asset: filepath
33
+ # Access: embodied_gen_image_to_3_d.generated_3d_asset
34
+ # Usage: next_node.inputs['generated_3d_asset'] = embodied_gen_image_to_3_d.generated_3d_asset
35
+ # ===========================================
36
+
37
+ embodied_gen_image_to_3_d = GradioNode(
38
+ space_or_url="HorizonRobotics/EmbodiedGen-Image-to-3D", # Space ID
39
+ api_name="/image_to_3d", # API endpoint
40
+
41
+ inputs={
42
+ "image": gr.File(label="Input Image") # UI input - connect to upstream node or provide value,
43
+ "seed": gr.Number(label="Seed") # UI input - connect to upstream node or provide value,
44
+ "ss_sampling_steps": gr.Number(label="Sampling Steps") # UI input - connect to upstream node or provide value,
45
+ "slat_sampling_steps": gr.Number(label="Sampling Steps") # UI input - connect to upstream node or provide value,
46
+ "raw_image_cache": gr.File(label="parameter_7") # UI input - connect to upstream node or provide value,
47
+ "ss_guidance_strength": gr.Number(label="Guidance Strength") # UI input - connect to upstream node or provide value,
48
+ "slat_guidance_strength": gr.Number(label="Guidance Strength") # UI input - connect to upstream node or provide value,
49
+ "sam_image": gr.File(label="SAM Seg Image") # UI input - connect to upstream node or provide value,
50
+ },
51
+
52
+ outputs={
53
+ "generated_3d_asset": gr.File(label="Generated 3D Asset"), # Display in node card
54
+ # Use None to hide outputs: "hidden_output": None
55
+ },
56
+
57
+ # Optional: Transform outputs before downstream flow
58
+ # postprocess=lambda outputs, final: final,
59
+ )
60
+
61
+ # Example usage
62
+ if __name__ == "__main__":
63
+ graph = Graph(
64
+ name="EmbodiedGen-Image-to-3D Workflow",
65
+ nodes=[embodied_gen_image_to_3_d]
66
+ )
67
+ graph.launch()
68
+
69
+ # Or run with: daggr this_file.py
misc/s3do.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from daggr import FnNode, GradioNode, InferenceNode, Graph
2
+ from daggr.state import get_daggr_files_dir
3
+
4
+ import gradio as gr
5
+ import numpy as np
6
+ from PIL import Image
7
+ from typing import Any
8
+ import uuid
9
+
10
+ def downscale_image_to_file(image: Any, scale: float = 0.25) -> str | None:
11
+
12
+ pil_img = Image.open(image)
13
+ scale_f = max(0.05, min(1.0, float(scale)))
14
+ w, h = pil_img.size
15
+ new_w = max(1, int(w * scale_f))
16
+ new_h = max(1, int(h * scale_f))
17
+ resized = pil_img.resize((new_w, new_h), resample=Image.LANCZOS)
18
+ out_path = get_daggr_files_dir() / f"{uuid.uuid4()}.png"
19
+ resized.save(out_path)
20
+ return str(out_path)
21
+
22
+ background_remover = GradioNode(
23
+ "merve/background-removal",
24
+ api_name="/image",
25
+ run_locally=True,
26
+ inputs={
27
+ "image": gr.Image(),
28
+ },
29
+ outputs={
30
+ "original_image": None,
31
+ "final_image": gr.Image(
32
+ label="Final Image"
33
+ ),
34
+ },
35
+ )
36
+
37
+ downscaler = FnNode(
38
+ downscale_image_to_file,
39
+ name="Downscale image for Inference",
40
+ inputs={
41
+ "image": background_remover.final_image,
42
+ "scale": gr.Slider(
43
+ label="Downscale factor",
44
+ minimum=0.25,
45
+ maximum=0.75,
46
+ step=0.05,
47
+ value=0.25,
48
+ ),
49
+ },
50
+ outputs={
51
+ "image": gr.Image(label="Downscaled Image", type="filepath"),
52
+ },
53
+ )
54
+
55
+ trellis_3d = GradioNode(
56
+ "microsoft/TRELLIS.2",
57
+ api_name="/image_to_3d",
58
+ inputs={
59
+ "image": downscaler.image,
60
+ "ss_guidance_strength": 7.5,
61
+ "ss_sampling_steps": 12,
62
+ },
63
+ outputs={
64
+ "glb": gr.HTML(label="3D Asset (GLB preview)"),
65
+ },
66
+ )
67
+
68
+ sam3d_obj = GradioNode(
69
+ "HorizonRobotics/EmbodiedGen-Image-to-3D",
70
+ api_name="/extract_3d_representations_v3",
71
+ inputs=[
72
+
73
+ ]
74
+ )
75
+
76
+ graph = Graph(
77
+ name="Image to 3D Asset Pipeline",
78
+ nodes=[background_remover, downscaler, trellis_3d],
79
+ )
80
+
81
+ '''
82
+ from gradio_client import Client, file
83
+
84
+ client = Client("HorizonRobotics/EmbodiedGen-Image-to-3D")
85
+
86
+ client.predict(
87
+ enable_delight=None,
88
+ texture_size=true,
89
+ api_name="/extract_3d_representations_v3"
90
+ )
91
+
92
+ client.predict(
93
+ api_name="/lambda_4"
94
+ )
95
+
96
+ client.predict(
97
+ gs_path="/home/user/app/sessions/imageto3d/29hqqc189st/sample_gs_aligned.ply",
98
+ mesh_obj_path="/home/user/app/sessions/imageto3d/29hqqc189st/sample.obj",
99
+ asset_cat_text="",
100
+ height_range_text="",
101
+ mass_range_text="",
102
+ asset_version_text="",
103
+ api_name="/extract_urdf"
104
+ )
105
+
106
+ client.predict(
107
+ api_name="/lambda_5"
108
+ )
109
+ '''
110
+
111
+ '''
112
+ from gradio_client import Client, file
113
+
114
+ client = Client("HorizonRobotics/EmbodiedGen-Image-to-3D")
115
+
116
+ client.predict(
117
+ api_name="/lambda_2"
118
+ )
119
+
120
+ client.predict(
121
+ content=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/1219da499ed7b9468eca3ab819eb09a47479748a66a61f8608006b92a4a635a7/chairelect.png'),
122
+ api_name="/active_btn_by_content"
123
+ )
124
+
125
+ client.predict(
126
+ image=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/1219da499ed7b9468eca3ab819eb09a47479748a66a61f8608006b92a4a635a7/chairelect.png'),
127
+ rmbg_tag="rembg",
128
+ api_name="/preprocess_image_fn"
129
+ )
130
+
131
+ client.predict(
132
+ api_name="/lambda_2"
133
+ )
134
+
135
+ client.predict(
136
+ content=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/f0b1343c3d64f50b7a08ce3027056ba9259d96960e58625a1df07922e4a3a3f4/image.png'),
137
+ api_name="/active_btn_by_content"
138
+ )
139
+
140
+ client.predict(
141
+ randomize_seed=False,
142
+ seed=0,
143
+ api_name="/get_seed"
144
+ )
145
+
146
+ client.predict(
147
+ image=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/f0b1343c3d64f50b7a08ce3027056ba9259d96960e58625a1df07922e4a3a3f4/image.png'),
148
+ seed=0,
149
+ ss_sampling_steps=25,
150
+ slat_sampling_steps=25,
151
+ raw_image_cache=handle_file('https://horizonrobotics-embodiedgen-image-to-3d.hf.space/gradio_api/file=/tmp/gradio/a7f55099fbfd47c44667d5e3eeee8818bf41ab1a5a70fc9bed2d5ce3c68f7015/image.png'),
152
+ ss_guidance_strength=7.5,
153
+ slat_guidance_strength=3,
154
+ sam_image=None,
155
+ api_name="/image_to_3d"
156
+ )
157
+
158
+ client.predict(
159
+ enable_delight=None,
160
+ texture_size=true,
161
+ api_name="/extract_3d_representations_v3"
162
+ )
163
+
164
+ client.predict(
165
+ api_name="/lambda_4"
166
+ )
167
+
168
+ client.predict(
169
+ gs_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample_gs_aligned.ply",
170
+ mesh_obj_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample.obj",
171
+ asset_cat_text="chair",
172
+ height_range_text="0.5",
173
+ mass_range_text="6",
174
+ asset_version_text="0.0.1",
175
+ api_name="/extract_urdf"
176
+ )
177
+
178
+ client.predict(
179
+ gs_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample_gs_aligned.ply",
180
+ mesh_obj_path="/home/user/app/sessions/imageto3d/1kxl1n8ek38/sample.obj",
181
+ asset_cat_text="chair",
182
+ height_range_text="0.5-0.7",
183
+ mass_range_text="2.1-3.5",
184
+ asset_version_text="v0.0.1",
185
+ api_name="/extract_urdf"
186
+ )
187
+
188
+ client.predict(
189
+ api_name="/lambda_5"
190
+ )
191
+
192
+ '''
193
+
194
+
195
+ '''
196
+ from gradio_client import Client, file
197
+
198
+ client = Client("prithivMLmods/Z-Image-Turbo-LoRA-DLC")
199
+
200
+ client.predict(
201
+ width=1024,
202
+ height=1024,
203
+ api_name="/update_selection"
204
+ )
205
+
206
+ client.predict(
207
+ prompt="Pull a purple plumb out ya butt",
208
+ image_input=None,
209
+ image_strength=0.75,
210
+ cfg_scale=0,
211
+ steps=9,
212
+ randomize_seed=None,
213
+ seed=true,
214
+ width=256386538,
215
+ height=1024,
216
+ lora_scale=1024,
217
+ api_name="/run_lora"
218
+ )
219
+ '''
220
+
221
+ if __name__ == "__main__":
222
+ graph.launch()
223
+