File size: 22,857 Bytes
0533bc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b800513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0533bc0
 
 
 
 
 
 
 
 
 
 
b800513
 
 
 
 
 
 
 
 
0533bc0
 
 
 
 
 
 
 
 
 
 
 
 
b800513
 
 
 
 
 
 
 
 
 
 
0533bc0
 
 
 
 
 
 
 
 
 
 
b800513
 
 
 
 
 
 
 
0533bc0
 
 
b800513
 
 
 
 
 
0533bc0
 
 
 
 
 
 
b800513
 
 
 
 
 
 
 
 
 
0533bc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b800513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0533bc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
# Project EmbodiedGen
#
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.


import argparse
import json
import logging
import os
import re

import json_repair
from embodied_gen.utils.enum import (
    LayoutInfo,
    RobotItemEnum,
    Scene3DItemEnum,
    SpatialRelationEnum,
)
from embodied_gen.utils.gpt_clients import GPT_CLIENT, GPTclient
from embodied_gen.utils.process_media import SceneTreeVisualizer

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


__all__ = [
    "LayoutDesigner",
    "LAYOUT_DISASSEMBLER",
    "LAYOUT_GRAPHER",
    "LAYOUT_DESCRIBER",
]


DISTRACTOR_NUM = 2  # Maximum number of distractor objects allowed
LAYOUT_DISASSEMBLE_PROMPT = f"""
    You are an intelligent 3D scene planner. Given a natural language
    description of a robotic task, output a structured description of
    an interactive 3D scene.

    The output must include the following fields:
    - task: A high-level task type (e.g., "single-arm pick",
        "dual-arm grasping", "pick and place", "object sorting").
    - {Scene3DItemEnum.ROBOT}: The name or type of robot involved. If not mentioned,
        use {RobotItemEnum.FRANKA} as default.
    - {Scene3DItemEnum.BACKGROUND}: The room or indoor environment where the task happens
        (e.g., Kitchen, Bedroom, Living Room, Workshop, Office).
    - {Scene3DItemEnum.CONTEXT}: A indoor object involved in the manipulation
        (e.g., Table, Shelf, Desk, Bed, Cabinet).
    - {Scene3DItemEnum.MANIPULATED_OBJS}: The main object(s) that the robot directly interacts with.
    - {Scene3DItemEnum.DISTRACTOR_OBJS}: Other objects that naturally belong to the scene but are not part of the main task.

    Constraints:
    - The {Scene3DItemEnum.BACKGROUND} must logically match the described task.
    - The {Scene3DItemEnum.CONTEXT} must fit within the {Scene3DItemEnum.BACKGROUND}. (e.g., a bedroom may include a table or bed, but not a workbench.)
    - The {Scene3DItemEnum.CONTEXT} must be a concrete indoor object, such as a "table",
        "shelf", "desk", or "bed". It must not be an abstract concept (e.g., "area", "space", "zone")
        or structural surface (e.g., "floor", "ground"). If the input describes an interaction near
        the floor or vague space, you must infer a plausible object like a "table", "cabinet", or "storage box" instead.
    - {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} objects must be plausible,
        and semantically compatible with the {Scene3DItemEnum.CONTEXT} and {Scene3DItemEnum.BACKGROUND}.
    - {Scene3DItemEnum.DISTRACTOR_OBJS} must not confuse or overlap with the manipulated objects.
    - {Scene3DItemEnum.DISTRACTOR_OBJS} number limit: {DISTRACTOR_NUM} distractors maximum.
    - All {Scene3DItemEnum.BACKGROUND} are limited to indoor environments.
    - {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} are rigid bodies and not include flexible objects.
    - {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} must be common
        household or office items or furniture, not abstract concepts, not too small like needle.
    - If the input includes a plural or grouped object (e.g., "pens", "bottles", "plates", "fruit"),
        you must decompose it into multiple individual instances (e.g., ["pen1", "pen2"], ["apple", "pear"]).
    - Containers that hold objects (e.g., "bowl of apples", "box of tools") must
        be separated into individual items (e.g., ["bowl", "apple1", "apple2"]).
    - Do not include transparent objects such as "glass", "plastic", etc.
    - All {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} must be child node of {Scene3DItemEnum.CONTEXT}.
    - The output must be in compact JSON format and use Markdown syntax, just like the output in the example below.

    Examples:

    Input:
    "Pick up the marker from the table and put it in the bowl robot {RobotItemEnum.UR5}."
    Output:
    ```json
    {{
        "task_desc": "Pick up the marker from the table and put it in the bowl.",
        "task": "pick and place",
        "{Scene3DItemEnum.ROBOT}": "{RobotItemEnum.UR5}",
        "{Scene3DItemEnum.BACKGROUND}": "kitchen",
        "{Scene3DItemEnum.CONTEXT}": "table",
        "{Scene3DItemEnum.MANIPULATED_OBJS}": ["marker"],
        "{Scene3DItemEnum.DISTRACTOR_OBJS}": ["mug", "notebook", "bowl"]
    }}
    ```

    Input:
    "Put the rubik's cube on the top of the shelf."
    Output:
    ```json
    {{
        "task_desc": "Put the rubik's cube on the top of the shelf.",
        "task": "pick and place",
        "{Scene3DItemEnum.ROBOT}": "{RobotItemEnum.FRANKA}",
        "{Scene3DItemEnum.BACKGROUND}": "bedroom",
        "{Scene3DItemEnum.CONTEXT}": "shelf",
        "{Scene3DItemEnum.MANIPULATED_OBJS}": ["rubik's cube"],
        "{Scene3DItemEnum.DISTRACTOR_OBJS}": ["pen", "cup", "toy car"]
    }}
    ```

    Input:
    "Remove all the objects from the white basket and put them on the table."
    Output:
    ```json
    {{
        "task_desc": "Remove all the objects from the white basket and put them on the table, robot {RobotItemEnum.PIPER}.",
        "task": "pick and place",
        "{Scene3DItemEnum.ROBOT}": "{RobotItemEnum.PIPER}",
        "{Scene3DItemEnum.BACKGROUND}": "office",
        "{Scene3DItemEnum.CONTEXT}": "table",
        "{Scene3DItemEnum.MANIPULATED_OBJS}": ["banana", "mobile phone"],
        "{Scene3DItemEnum.DISTRACTOR_OBJS}": ["plate", "white basket"]
    }}
    ```

    Input:
    "Pick up the rope on the chair and put it in the box."
    Output:
    ```json
    {{
        "task_desc": "Pick up the rope on the chair and put it in the box, robot {RobotItemEnum.FRANKA}.",
        "task": "pick and place",
        "{Scene3DItemEnum.ROBOT}": "{RobotItemEnum.FRANKA}",
        "{Scene3DItemEnum.BACKGROUND}": "living room",
        "{Scene3DItemEnum.CONTEXT}": "chair",
        "{Scene3DItemEnum.MANIPULATED_OBJS}": ["rope", "box"],
        "{Scene3DItemEnum.DISTRACTOR_OBJS}": ["magazine"]
    }}
    ```

    Input:
    "Pick up the seal tape and plastic from the counter and put them in the open drawer and close it."
    Output:
    ```json
    {{
        "task_desc": "Pick up the seal tape and plastic from the counter and put them in the open drawer and close it.",
        "task": "pick and place",
        "robot": "franka",
        "background": "kitchen",
        "context": "counter",
        "manipulated_objs": ["seal tape", "plastic", "opened drawer"],
        "distractor_objs": ["scissors"]
    }}
    ```

    Input:
    "Put the pens in the grey bowl."
    Output:
    ```json
    {{
        "task_desc": "Put the pens in the grey bowl.",
        "task": "pick and place",
        "robot": "franka",
        "background": "office",
        "context": "table",
        "manipulated_objs": ["pen1", "pen2", "grey bowl"],
        "distractor_objs": ["notepad", "cup"]
    }}
    ```

"""


LAYOUT_HIERARCHY_PROMPT = f"""
    You are a 3D scene layout reasoning expert.
    Your task is to generate a spatial relationship dictionary in multiway tree
    that describes how objects are arranged in a 3D environment
    based on a given task description and object list.

    Input in JSON format containing the task description, task type,
    {Scene3DItemEnum.ROBOT}, {Scene3DItemEnum.BACKGROUND}, {Scene3DItemEnum.CONTEXT},
    and a list of objects, including {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS}.

    ### Supported Spatial Relations:
    - "{SpatialRelationEnum.ON}": The child object bottom is directly on top of the parent object top.
    - "{SpatialRelationEnum.INSIDE}": The child object is inside the context object.
    - "{SpatialRelationEnum.IN}": The {Scene3DItemEnum.ROBOT} in the {Scene3DItemEnum.BACKGROUND}.
    - "{SpatialRelationEnum.FLOOR}": The child object bottom is on the floor of the {Scene3DItemEnum.BACKGROUND}.

    ### Rules:
    - The {Scene3DItemEnum.CONTEXT} object must be "{SpatialRelationEnum.FLOOR}" the {Scene3DItemEnum.BACKGROUND}.
    - {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} must be either
        "{SpatialRelationEnum.ON}" or "{SpatialRelationEnum.INSIDE}" the {Scene3DItemEnum.CONTEXT}
    - Or "{SpatialRelationEnum.FLOOR}" {Scene3DItemEnum.BACKGROUND}.
    - Use "{SpatialRelationEnum.INSIDE}" only if the parent is a container-like object (e.g., shelf, rack, cabinet).
    - Do not define relationship edges between objects, only for the child and parent nodes.
    - {Scene3DItemEnum.ROBOT} must "{SpatialRelationEnum.IN}" the {Scene3DItemEnum.BACKGROUND}.
    - Ensure that each object appears only once in the layout tree, and its spatial relationship is defined with only one parent.
    - Ensure a valid multiway tree structure with a maximum depth of 2 levels suitable for a 3D scene layout representation.
    - Only output the final output in JSON format, using Markdown syntax as in examples.

    ### Example
    Input:
    {{
        "task_desc": "Pick up the marker from the table and put it in the bowl.",
        "task": "pick and place",
        "{Scene3DItemEnum.ROBOT}": "{RobotItemEnum.FRANKA}",
        "{Scene3DItemEnum.BACKGROUND}": "kitchen",
        "{Scene3DItemEnum.CONTEXT}": "table",
        "{Scene3DItemEnum.MANIPULATED_OBJS}": ["marker", "bowl"],
        "{Scene3DItemEnum.DISTRACTOR_OBJS}": ["mug", "chair"]
    }}
    Intermediate Think:
        table {SpatialRelationEnum.FLOOR} kitchen
        chair {SpatialRelationEnum.FLOOR} kitchen
        {RobotItemEnum.FRANKA} {SpatialRelationEnum.IN} kitchen
        marker {SpatialRelationEnum.ON} table
        bowl {SpatialRelationEnum.ON} table
        mug {SpatialRelationEnum.ON} table
    Final Output:
    ```json
    {{
        "kitchen": [
            ["table", "{SpatialRelationEnum.FLOOR}"],
            ["chair", "{SpatialRelationEnum.FLOOR}"],
            ["{RobotItemEnum.FRANKA}", "{SpatialRelationEnum.IN}"]
        ],
        "table": [
            ["marker", "{SpatialRelationEnum.ON}"],
            ["bowl", "{SpatialRelationEnum.ON}"],
            ["mug", "{SpatialRelationEnum.ON}"]
        ]
    }}
    ```

    Input:
    {{
        "task_desc": "Put the marker on top of the book.",
        "task": "pick and place",
        "{Scene3DItemEnum.ROBOT}": "{RobotItemEnum.UR5}",
        "{Scene3DItemEnum.BACKGROUND}": "office",
        "{Scene3DItemEnum.CONTEXT}": "desk",
        "{Scene3DItemEnum.MANIPULATED_OBJS}": ["marker", "book"],
        "{Scene3DItemEnum.DISTRACTOR_OBJS}": ["pen holder", "notepad"]
    }}
    Intermediate Think:
        desk {SpatialRelationEnum.FLOOR} office
        {RobotItemEnum.UR5} {SpatialRelationEnum.IN} office
        marker {SpatialRelationEnum.ON} desk
        book {SpatialRelationEnum.ON} desk
        pen holder {SpatialRelationEnum.ON} desk
        notepad {SpatialRelationEnum.ON} desk
    Final Output:
    ```json
    {{
        "office": [
            ["desk", "{SpatialRelationEnum.FLOOR}"],
            ["{RobotItemEnum.UR5}", "{SpatialRelationEnum.IN}"]
        ],
        "desk": [
            ["marker", "{SpatialRelationEnum.ON}"],
            ["book", "{SpatialRelationEnum.ON}"],
            ["pen holder", "{SpatialRelationEnum.ON}"],
            ["notepad", "{SpatialRelationEnum.ON}"]
        ]
    }}
    ```

    Input:
    {{
        "task_desc": "Put the rubik's cube on the top of the shelf.",
        "task": "pick and place",
        "{Scene3DItemEnum.ROBOT}": "{RobotItemEnum.UR5}",
        "{Scene3DItemEnum.BACKGROUND}": "bedroom",
        "{Scene3DItemEnum.CONTEXT}": "shelf",
        "{Scene3DItemEnum.MANIPULATED_OBJS}": ["rubik's cube"],
        "{Scene3DItemEnum.DISTRACTOR_OBJS}": ["toy car", "pen"]
    }}
    Intermediate Think:
        shelf {SpatialRelationEnum.FLOOR} bedroom
        {RobotItemEnum.UR5} {SpatialRelationEnum.IN} bedroom
        rubik's cube {SpatialRelationEnum.INSIDE} shelf
        toy car {SpatialRelationEnum.INSIDE} shelf
        pen {SpatialRelationEnum.INSIDE} shelf
    Final Output:
    ```json
    {{
        "bedroom": [
            ["shelf", "{SpatialRelationEnum.FLOOR}"],
            ["{RobotItemEnum.UR5}", "{SpatialRelationEnum.IN}"]
        ],
        "shelf": [
            ["rubik's cube", "{SpatialRelationEnum.INSIDE}"],
            ["toy car", "{SpatialRelationEnum.INSIDE}"],
            ["pen", "{SpatialRelationEnum.INSIDE}"]
        ]
    }}
    ```

    Input:
    {{
        "task_desc": "Put the marker in the cup on the counter.",
        "task": "pick and place",
        "robot": "franka",
        "background": "kitchen",
        "context": "counter",
        "manipulated_objs": ["marker", "cup"],
        "distractor_objs": ["plate", "spoon"]
    }}
    Intermediate Think:
        counter {SpatialRelationEnum.FLOOR} kitchen
        {RobotItemEnum.FRANKA} {SpatialRelationEnum.IN} kitchen
        marker {SpatialRelationEnum.ON} counter
        cup {SpatialRelationEnum.ON} counter
        plate {SpatialRelationEnum.ON} counter
        spoon {SpatialRelationEnum.ON} counter
    Final Output:
    ```json
    {{
        "kitchen": [
            ["counter", "{SpatialRelationEnum.FLOOR}"],
            ["{RobotItemEnum.FRANKA}", "{SpatialRelationEnum.IN}"]
        ],
        "counter": [
            ["marker", "{SpatialRelationEnum.ON}"],
            ["cup", "{SpatialRelationEnum.ON}"],
            ["plate", "{SpatialRelationEnum.ON}"],
            ["spoon", "{SpatialRelationEnum.ON}"]
        ]
    }}
    ```
"""


LAYOUT_DESCRIBER_PROMPT = """
    You are a 3D asset style descriptor.

    Given a task description and a dictionary where the key is the object content and
    the value is the object type, output a JSON dictionary with each object paired
    with a concise, styled visual description suitable for 3D asset generation.

    Generation Guidelines:
    - For each object, brainstorm multiple style candidates before selecting the final
        description. Vary phrasing, material, texture, color, and spatial details.
    - Each description must be a maximum of 15 words, including color, style, materials.
    - Descriptions should be visually grounded, specific, and reflect surface texture and structure.
    - For objects marked as "context", explicitly mention the object is standalone, has an empty top.
    - Use rich style descriptors: e.g., "scratched brown wooden desk" etc.
    - Ensure all object styles align with the task's overall context and environment.

    Format your output in JSON like the example below.

    Example Input:
    "Pick up the rope on the chair and put it in the box. {'living room': 'background', 'chair': 'context',
        'rope': 'manipulated_objs', 'box': 'manipulated_objs', 'magazine': 'distractor_objs'}"

    Example Output:
    ```json
    {
        "living room": "modern cozy living room with soft sunlight and light grey carpet",
        "chair": "standalone dark oak chair with no surroundings and clean empty seat",
        "rope": "twisted hemp rope with rough fibers and dusty beige texture",
        "box": "slightly crumpled cardboard box with open flaps and brown textured surface",
        "magazine": "celebrity magazine with glossy red cover and large bold title"
    }
    ```
"""


class LayoutDesigner(object):
    """A class for querying GPT-based scene layout reasoning and formatting responses.

    Attributes:
        prompt (str): The system prompt for GPT.
        verbose (bool): Whether to log responses.
        gpt_client (GPTclient): The GPT client instance.

    Methods:
        query(prompt, params): Query GPT with a prompt and parameters.
        format_response(response): Parse and clean JSON response.
        format_response_repair(response): Repair and parse JSON response.
        save_output(output, save_path): Save output to file.
        __call__(prompt, save_path, params): Query and process output.
    """

    def __init__(
        self,
        gpt_client: GPTclient,
        system_prompt: str,
        verbose: bool = False,
    ) -> None:
        self.prompt = system_prompt.strip()
        self.verbose = verbose
        self.gpt_client = gpt_client

    def query(self, prompt: str, params: dict = None) -> str:
        """Query GPT with the system prompt and user prompt.

        Args:
            prompt (str): User prompt.
            params (dict, optional): GPT parameters.

        Returns:
            str: GPT response.
        """
        full_prompt = self.prompt + f"\n\nInput:\n\"{prompt}\""

        response = self.gpt_client.query(
            text_prompt=full_prompt,
            params=params,
        )

        if self.verbose:
            logger.info(f"Response: {response}")

        return response

    def format_response(self, response: str) -> dict:
        """Format and parse GPT response as JSON.

        Args:
            response (str): Raw GPT response.

        Returns:
            dict: Parsed JSON output.

        Raises:
            json.JSONDecodeError: If parsing fails.
        """
        cleaned = re.sub(r"^```json\s*|\s*```$", "", response.strip())
        try:
            output = json.loads(cleaned)
        except json.JSONDecodeError as e:
            raise json.JSONDecodeError(
                f"Error: {e}, failed to parse JSON response: {response}"
            )

        return output

    def format_response_repair(self, response: str) -> dict:
        """Repair and parse possibly broken JSON response.

        Args:
            response (str): Raw GPT response.

        Returns:
            dict: Parsed JSON output.
        """
        return json_repair.loads(response)

    def save_output(self, output: dict, save_path: str) -> None:
        """Save output dictionary to a file.

        Args:
            output (dict): Output data.
            save_path (str): Path to save the file.
        """
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        with open(save_path, 'w') as f:
            json.dump(output, f, indent=4)

    def __call__(
        self, prompt: str, save_path: str = None, params: dict = None
    ) -> dict | str:
        """Query GPT and process the output.

        Args:
            prompt (str): User prompt.
            save_path (str, optional): Path to save output.
            params (dict, optional): GPT parameters.

        Returns:
            dict | str: Output data.
        """
        response = self.query(prompt, params=params)
        output = self.format_response_repair(response)
        self.save_output(output, save_path) if save_path else None

        return output


LAYOUT_DISASSEMBLER = LayoutDesigner(
    gpt_client=GPT_CLIENT, system_prompt=LAYOUT_DISASSEMBLE_PROMPT
)
LAYOUT_GRAPHER = LayoutDesigner(
    gpt_client=GPT_CLIENT, system_prompt=LAYOUT_HIERARCHY_PROMPT
)
LAYOUT_DESCRIBER = LayoutDesigner(
    gpt_client=GPT_CLIENT, system_prompt=LAYOUT_DESCRIBER_PROMPT
)


def build_scene_layout(
    task_desc: str, output_path: str = None, gpt_params: dict = None
) -> LayoutInfo:
    """Build a 3D scene layout from a natural language task description.

    This function uses GPT-based reasoning to generate a structured scene layout,
    including object hierarchy, spatial relations, and style descriptions.

    Args:
        task_desc (str): Natural language description of the robotic task.
        output_path (str, optional): Path to save the visualized scene tree.
        gpt_params (dict, optional): Parameters for GPT queries.

    Returns:
        LayoutInfo: Structured layout information for the scene.

    Example:
        ```py
        from embodied_gen.models.layout import build_scene_layout
        layout_info = build_scene_layout(
            task_desc="Put the apples on the table on the plate",
            output_path="outputs/scene_tree.jpg",
        )
        print(layout_info)
        ```
    """
    layout_relation = LAYOUT_DISASSEMBLER(task_desc, params=gpt_params)
    layout_tree = LAYOUT_GRAPHER(layout_relation, params=gpt_params)
    object_mapping = Scene3DItemEnum.object_mapping(layout_relation)
    obj_prompt = f'{layout_relation["task_desc"]} {object_mapping}'
    objs_desc = LAYOUT_DESCRIBER(obj_prompt, params=gpt_params)
    layout_info = LayoutInfo(
        layout_tree, layout_relation, objs_desc, object_mapping
    )

    if output_path is not None:
        visualizer = SceneTreeVisualizer(layout_info)
        visualizer.render(save_path=output_path)
        logger.info(f"Scene hierarchy tree saved to {output_path}")

    return layout_info


def parse_args():
    parser = argparse.ArgumentParser(description="3D Scene Layout Designer")
    parser.add_argument(
        "--task_desc",
        type=str,
        default="Put the apples on the table on the plate",
        help="Natural language description of the robotic task",
    )
    parser.add_argument(
        "--save_root",
        type=str,
        default="outputs/layout_tree",
        help="Path to save the layout output",
    )
    return parser.parse_args()


if __name__ == "__main__":
    from embodied_gen.utils.enum import LayoutInfo
    from embodied_gen.utils.process_media import SceneTreeVisualizer

    args = parse_args()
    params = {
        "temperature": 1.0,
        "top_p": 0.95,
        "frequency_penalty": 0.3,
        "presence_penalty": 0.5,
    }
    layout_relation = LAYOUT_DISASSEMBLER(args.task_desc, params=params)
    layout_tree = LAYOUT_GRAPHER(layout_relation, params=params)

    object_mapping = Scene3DItemEnum.object_mapping(layout_relation)
    obj_prompt = f'{layout_relation["task_desc"]} {object_mapping}'

    objs_desc = LAYOUT_DESCRIBER(obj_prompt, params=params)

    layout_info = LayoutInfo(layout_tree, layout_relation, objs_desc)

    visualizer = SceneTreeVisualizer(layout_info)
    os.makedirs(args.save_root, exist_ok=True)
    scene_graph_path = f"{args.save_root}/scene_tree.jpg"
    visualizer.render(save_path=scene_graph_path)
    with open(f"{args.save_root}/layout.json", "w") as f:
        json.dump(layout_info.to_dict(), f, indent=4)

    print(f"Scene hierarchy tree saved to {scene_graph_path}")
    print(f"Disassembled Layout: {layout_relation}")
    print(f"Layout Graph: {layout_tree}")
    print(f"Layout Descriptions: {objs_desc}")