# Code to generate task documentation automatically TASK_CATEGORIES_TO_INCLUDE = [ "tabletop", "humanoid", "mobile_manipulation", "quadruped", "control", "drawing", ] TASK_CATEGORIES_NAME_MAP = {"tabletop": "table_top_gripper"} GENERATED_TASKS_DOCS_FOLDER = "tasks" GLOBAL_TASK_HEADER = """ [asset-badge]: https://img.shields.io/badge/download%20asset-yes-blue.svg [dense-reward-badge]: https://img.shields.io/badge/dense%20reward-yes-green.svg [sparse-reward-badge]: https://img.shields.io/badge/sparse%20reward-yes-green.svg [no-dense-reward-badge]: https://img.shields.io/badge/dense%20reward-no-red.svg [no-sparse-reward-badge]: https://img.shields.io/badge/sparse%20reward-no-red.svg [demos-badge]: https://img.shields.io/badge/demos-yes-green.svg """ GLOBAL_TASK_POST_HEADER = """ The document here has both a high-level overview/list of all tasks in a table as well as detailed task cards with video demonstrations after. """ TASK_CATEGORIES_HEADERS = { "tabletop": """# Table-Top 2 Finger Gripper Tasks These are tasks situated on table and involve a two-finger gripper arm robot manipulating objects on the surface.""", "humanoid": """# Humanoid Tasks Both real-world humanoids and the Mujoco humanoid are supported in ManiSkill, and we are still in the process of adding more tasks. Humanoid category of tasks generally considers control of robots with two legs and two arms.""", "mobile_manipulation": """# Mobile Manipulation Tasks These are tasks where a mobile manipulator is used to manipulate objects. This cateogry primarily uses robots with mobile bases like Fetch or Stretch robots. For additional tasks, including scene-level mobile manipulation, please check out the [external benchmarks/tasks page](../external/index.md). """, "quadruped": """# Quadruped Tasks These are tasks where a quadruped robot is used for locomotion and/or manipulation. This cateogry primarily uses robots with four legs like the ANYmal or Unitree go robots.""", "control": """# Control Tasks These are classic control tasks where the objective is to control a robot to reach a particular state, similar to the [DM Control suite](https://github.com/deepmind/dm_control) but with GPU parallelized simulation and rendering.""", "drawing": """# Drawing Tasks These are tasks where the robot is controlled to draw a specific shape or pattern.""", } import urllib.request import mani_skill.envs from mani_skill.utils.download_demo import DATASET_SOURCES from mani_skill.utils.registration import REGISTERED_ENVS import os import importlib import inspect from pathlib import Path import cv2 import tempfile def main(): base_dir = Path(__file__).parent / "source" # Get the path to mani_skill/envs/tasks tasks_dir = Path(mani_skill.envs.__file__).parent / "tasks" # Dictionary to store task info task_info = {} # Walk through all subfolders in tasks directory for root, dirs, files in os.walk(tasks_dir): for file in files: if file.endswith(".py") and not file.startswith("__"): # Get relative import path rel_path = os.path.relpath(os.path.join(root, file), tasks_dir.parent) module_path = rel_path.replace(os.sep, ".")[:-3] # Remove .py # Import the module try: module = importlib.import_module(f"mani_skill.envs.{module_path}") # Find all classes defined in this module classes = inspect.getmembers(module, inspect.isclass) # Store classes that are defined in this module (not imported) local_classes = [ cls for name, cls in classes if cls.__module__ == f"mani_skill.envs.{module_path}" ] if local_classes: task_info[module_path] = local_classes except Exception as e: print(f"Error importing {module_path}: {e}") # Filter to only include registered environment classes and those with docstrings filtered_task_info = {} for module_path, classes in task_info.items(): registered_classes = [] for cls in classes: # Check if this class is registered as an environment for env_id, env_spec in REGISTERED_ENVS.items(): if env_spec.cls == cls: registered_classes.append(dict(env_id=env_id, cls=cls)) break if registered_classes: filtered_task_info[module_path] = registered_classes task_info = filtered_task_info # Categorize tasks by their type categorized_tasks = {k: [] for k in TASK_CATEGORIES_TO_INCLUDE} for module_path in task_info.keys(): parts = module_path.split(".") if parts[0] == "tasks": category = parts[1] if category in categorized_tasks: categorized_tasks[category].append(module_path) # Generate documentation for each category and module print("\nTask Documentation:") for category, modules in categorized_tasks.items(): print(f"\n{category}:") # Create directory if it doesn't exist category_name = TASK_CATEGORIES_NAME_MAP.get(category, category) os.makedirs( f"{base_dir}/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}", exist_ok=True ) # Delete existing index.md file for this category if os.path.exists( f"{base_dir}/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md" ): os.remove(f"{base_dir}/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md") if category in TASK_CATEGORIES_HEADERS: with open( f"{base_dir}/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", "w" ) as f: f.write(GLOBAL_TASK_HEADER) f.write(TASK_CATEGORIES_HEADERS[category]) f.write(GLOBAL_TASK_POST_HEADER) # Generate the short TLDR table of tasks env_id_to_thumbnail_path = {} with open( f"{base_dir}/{GENERATED_TASKS_DOCS_FOLDER}/{category_name}/index.md", "a" ) as f: f.write("\n## Task Table\n") f.write( "Table of all tasks/environments in this category. Task column is the environment ID, Preview is a thumbnail pair of the first and last frames of an example success demonstration. Max steps is the task's default max episode steps, generally tuned for RL workflows." ) f.write('\n
Task | ')
f.write('\nPreview | ')
f.write('\nDense Reward | ')
f.write('\nSuccess/Fail Conditions | ')
f.write('\nDemos | ')
f.write('\nMax Episode Steps | ')
f.write("\n
|---|---|---|---|---|---|
| ' ) f.write( f"\n | {thumbnail if thumbnail != '' else ''} {thumbnail_last if thumbnail_last != '' else ''} | "
)
f.write(f"\n{dense} | ")
f.write(f"\n{sparse} | ")
f.write(f"\n{demos} | ")
f.write(f"\n{max_eps_steps} | ")
f.write(f"\n