YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/model-cards#model-card-metadata)
OPEN_PI Qing
This project is based on OPEN_PI.
Conda Environment
[Server 端] 部署(显存要求>=24G)
# 1. 创建并激活环境
conda create -n openpi_env python=3.11 -y
conda activate openpi_env
conda install -c conda-forge ffmpeg # 需要安装 ffmpeg 7以上的版本, 不然 uv sync 会报错
# 2. 克隆项目代码
cd openpi/
# 3. 安装项目依赖(需科学上网)
pip install uv
export http_proxy="http://127.0.0.1:7890" https_proxy="http://127.0.0.1:7890"
GIT_LFS_SKIP_SMUDGE=1 uv sync --index-url https://pypi.org/simple --verbose
# 4. 下载并解压模型权重(10G)
export http_proxy=http://192.168.32.28:18000 export https_proxy=http://192.168.32.28:18000
wget https://www.modelscope.cn/models/masheng/pi0-fine-Tuned-Models/resolve/master/pi0_fast_libero.zip
unzip pi0_fast_libero.zip -d $HOME/.cache/openpi/openpi-assets/checkpoints/
# 5. 启动推理服务(会自动连接到HuggingFace)
export HF_ENDPOINT=https://hf-mirror.com
uv run scripts/serve_policy.py --env LIBERO
[Client 端] 部署(显存要求>=24G)
# 1. 创建并激活环境
conda create -n pi0_demo python=3.10
conda activate pi0_demo
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi
git clone https://github.com/yueduduo/pi0_fast_deploy.git
cd pi0_fast_deploy
pip install -r requirements.txt --index-url https://pypi.org/simple
训练脚本
1. 准备数据
OpenPI 使用 LeRobot v2.0 数据集格式。你需要将自己的数据转换成这个格式。OpenPI 提供了示例脚本,你可以修改它来适配你的数据:
# Libero 数据集转换示例(参考)
/mnt/pfs/.../openpi/examples/libero/convert_libero_data_to_lerobot.py
# ALOHA 数据集转换示例(参考)
/mnt/pfs/.../openpi/examples/aloha_real/convert_aloha_data_to_lerobot.py
mkdir -p /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi/examples/franka_real
# 然后创建文件 convert_franka_data_to_lerobot.py
可以 转换所有 task 的数据 or 只转换部分 task 的数据(例如 pick_and_place) => 成 LeRobot 格式:统一的 Parquet + 视频文件
conda activate openpi_env
# Install libstdcxx-ng in your conda environment
conda install -c conda-forge libstdcxx-ng
在转换之前,需要把 img 文件夹下的图片都转换成 224x224 大小
(1) 转换所有的 tasks
# 转换所有任务
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
conda activate openpi_env
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
export http_proxy=http://192.168.32.28:18000 export https_proxy=http://192.168.32.28:18000
uv run examples/franka_real/convert_franka_data_to_lerobot.py \
--data_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/raw_224/franka_real \
--output_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot \
--repo_id qingpowuwu/franka_real-three_tasks \
--push_to_hub
(2) 只转换单个 task
# (1) 只转换 pick_up_milk 任务
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
conda activate openpi_env
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
export http_proxy=http://192.168.32.28:18000 export https_proxy=http://192.168.32.28:18000
uv run examples/franka_real/convert_franka_data_to_lerobot.py \
--data_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/raw_224/franka_real \
--output_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot \
--repo_id qingpowuwu/franka_real-pick_milk \
--task_dirs pick_up_milk \
--push_to_hub
# (2) 只转换 stack_cup 任务
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
conda activate openpi_env
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
export http_proxy=http://192.168.32.28:18000 export https_proxy=http://192.168.32.28:18000
uv run examples/franka_real/convert_franka_data_to_lerobot.py \
--data_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/raw_224/franka_real \
--output_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot \
--repo_id qingpowuwu/franka_real-stack_cup \
--task_dirs stack_cup \
--push_to_hub
# (3) 只转换 tennis_bucket_upright 任务
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
conda activate openpi_env
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
export http_proxy=http://192.168.32.28:18000 export https_proxy=http://192.168.32.28:18000
uv run examples/franka_real/convert_franka_data_to_lerobot.py \
--data_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/raw_224/franka_real \
--output_dir /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot \
--repo_id qingpowuwu/franka_real-tennis_bucket_upright \
--task_dirs tennis_bucket_upright \
--push_to_hub
之后可以检查 lerobot 的本地目录
# 设置 LeRobot 本地数据目录
export HF_LEROBOT_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot
export HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu
# 验证数据集可以被找到
uv run python - << 'PY'
import os
from pathlib import Path
lerobot_home = Path(os.environ.get('HF_LEROBOT_HOME', '~/.cache/huggingface/lerobot'))
dataset_path = lerobot_home / "qingpowuwu/franka_real-three_tasks"
print(f"HF_LEROBOT_HOME: {lerobot_home}")
print(f"Dataset path: {dataset_path}")
print(f"Exists: {dataset_path.exists()}")
if dataset_path.exists():
info_json = dataset_path / "meta/info.json"
print(f"info.json exists: {info_json.exists()}")
if info_json.exists():
import json
with open(info_json) as f:
info = json.load(f)
print(f"✓ Dataset: {info['robot_type']}, {info['total_episodes']} episodes, {info['total_frames']} frames")
PY
之后可以检查 huggingface 数据, 并且会下载
# 设置 LeRobot 本地数据目录
export HF_LEROBOT_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot
export HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
uv run python - << 'PY'
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
# 检查其中一个数据集
dataset = LeRobotDataset(
repo_id="qingpowuwu/franka_real-three_tasks",
root=None, # 从 Hub 下载
)
print("Dataset info:")
print(f" Episodes: {dataset.num_episodes}")
print(f" Frames: {dataset.num_frames}")
# 打印第一个样本的键名
sample = dataset[0]
print("\nSample keys:")
for key in sorted(sample.keys()):
print(f" {key}: {sample[key].shape if hasattr(sample[key], 'shape') else type(sample[key])}")
PY
2. 更新 src/openpi/policies 文件夹 &
- 在
src/openpi/policies文件夹 下面创建franka_policy.py - 更新
src/openpi/training/config.py代码
3. 计算归一化
这里会用到 openpi/src/openpi/training/config.py 里面定义好的 TrainConfig, 例如我们这里使用 pi0_franka 这个配置。所以对于不同的 dataset 需要更新 config.py 里面的 repo_id。
# 设置环境变量
export HF_LEROBOT_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot
export TMPDIR=/mnt/pfs/world_foundational_model/qingpo.wuwu/tmp
export TEMP=$TMPDIR
export TMP=$TMPDIR
export XDG_CACHE_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/.cache
export HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
conda activate openpi_env
# 运行归一化统计计算 (记得需要更新一下 `Project_10_SimBench_VLA/1_Ours/Openpi/openpi/src/openpi/training/config.py` 里面的 repo_id 成你想要的 dataset)
uv run scripts/compute_norm_stats.py --config-name pi0_franka
# 结果会保存到
# Project_10_SimBench_VLA/1_Ours/Openpi/openpi/assets/pi0_franka/qingpowuwu/franka_real-pick_milk/norm_stats.json
# Project_10_SimBench_VLA/1_Ours/Openpi/openpi/assets/pi0_franka/qingpowuwu/franka_real-stack_cup/norm_stats.json
# Project_10_SimBench_VLA/1_Ours/Openpi/openpi/assets/pi0_franka/qingpowuwu/franka_real-tennis_bucket_upright/norm_stats.json
# Project_10_SimBench_VLA/1_Ours/Openpi/openpi/assets/pi0_franka/qingpowuwu/franka_real-three_tasks/norm_stats.json
# 里面
4. 训练
提前下载 pre-trained 模型权重到本地缓存目录:
export http_proxy=http://192.168.32.28:18000 export https_proxy=http://192.168.32.28:18000
# 下载 Pi0 和 Pi0-FAST 的预训练权重到本地缓存目录
gsutil -m cp -r gs://openpi-assets/checkpoints/pi0_base /mnt/pfs/world_foundational_model/qingpo.wuwu/.cache/openpi/openpi-assets/checkpoints/
gsutil -m cp -r gs://openpi-assets/checkpoints/pi0_fast_base /mnt/pfs/world_foundational_model/qingpo.wuwu/.cache/openpi/openpi-assets/checkpoints/
# 下载 droid 的 Pi0.5 预训练权重到本地缓存目录
gsutil -m cp -r gs://openpi-assets/checkpoints/pi05_droid /mnt/pfs/world_foundational_model/qingpo.wuwu/.cache/openpi/openpi-assets/checkpoints/
# (1) Pi0 全量微调
export HF_LEROBOT_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot
export TMPDIR=/mnt/pfs/world_foundational_model/qingpo.wuwu/tmp
export TEMP=$TMPDIR
export TMP=$TMPDIR
export XDG_CACHE_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/.cache
export HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
conda activate openpi_env
# 记住把 training/config.py 里面把 repo_id 改成你想要训练的 dataset
uv run scripts/train.py pi0_franka --exp-name exp1-franka_real-pick_milk --overwrite --fsdp-devices 4 # 使用 4 个 GPU
uv run scripts/train.py pi0_franka --exp-name exp2-franka_real-stack_cup --overwrite --fsdp-devices 4 # 使用 4 个 GPU
uv run scripts/train.py pi0_franka --exp-name exp3-franka_real-tennis_bucket_upright --overwrite --fsdp-devices 4 # 使用 4 个 GPU
uv run scripts/train.py pi0_franka --exp-name exp4-franka_real-three_tasks --overwrite --fsdp-devices 4 # 使用 4 个 GPU
5. 推理
# Start inference server, default port 8000
uv run scripts/serve_policy.py policy:checkpoint \
--policy.config=pi0_franka \
--policy.dir=checkpoints/pi0_franka/exp1-franka_real-pick_milk/39999
# (1) Pi0 全量微调的推理代码
export HF_LEROBOT_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot
export TMPDIR=/mnt/pfs/world_foundational_model/qingpo.wuwu/tmp
export TEMP=$TMPDIR
export TMP=$TMPDIR
export XDG_CACHE_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/.cache
export HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
cd /mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/1_Ours/Openpi/openpi
conda activate openpi_env
# 记住把 training/config.py 里面把 repo_id 改成你想要推理的 dataset
uv run scripts/serve_policy.py policy:checkpoint \
--policy.config=pi0_franka \
--policy.dir=checkpoints/pi0_franka/exp1-franka_real-pick_milk/39999
uv run scripts/serve_policy.py policy:checkpoint \
--policy.config=pi0_franka \
--policy.dir=checkpoints/pi0_franka/exp2-franka_real-stack_cup/39999
uv run scripts/serve_policy.py policy:checkpoint \
--policy.config=pi0_franka \
--policy.dir=checkpoints/pi0_franka/exp3-franka_real-tennis_bucket_upright/39999
uv run scripts/serve_policy.py policy:checkpoint \
--policy.config=pi0_franka \
--policy.dir=checkpoints/pi0_franka/exp4-franka_real-three_tasks/39999
uv run scripts/visualize_predictions.py pi0_franka \
--checkpoint-dir checkpoints/pi0_franka/exp1-franka_real-pick_milk/39999 \
--output-dir replay_visualizations \
--num-episodes 5
export http_proxy="http://127.0.0.1:7890" https_proxy="http://127.0.0.1:7890"
export WANDB_API_KEY="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"
export HF_TOKEN="HF_TOKEN_REDACTED"
export HF_HOME="/mnt/pfs/world_foundational_model/qingpo.wuwu/hf_cache"
export TORCH_HOME="/mnt/pfs/world_foundational_model/qingpo.wuwu/cache_torch_hub"
export CONDA_PREFIX="/mnt/pfs/world_foundational_model/qingpo.wuwu/conda"
export HF_LEROBOT_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot
export TMPDIR=/mnt/pfs/world_foundational_model/qingpo.wuwu/tmp
export TEMP=$TMPDIR
export TMP=$TMPDIR
export XDG_CACHE_HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu/.cache
export HOME=/mnt/pfs/world_foundational_model/qingpo.wuwu
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH # 设置 LD_LIBRARY_PATH 优先使用 conda 的库
# 首先检查数据集中实际的相机名称
uv run python << 'EOF'
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
dataset = LeRobotDataset(
repo_id="qingpowuwu/franka_real-pick_milk",
root="/mnt/pfs/world_foundational_model/qingpo.wuwu/Project_10_SimBench_VLA/0_Datasets/lerobot/franka_real"
)
# 打印数据集特征
print("Dataset features:")
for key, value in dataset.features.items():
print(f" {key}: {value}")
# 加载一个样本查看
sample = dataset[0]
print("\nSample keys:", sample.keys())
if hasattr(sample, 'observation'):
print("Observation keys:", sample.observation.keys() if hasattr(sample.observation, 'keys') else dir(sample.observation))
EOF
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
🙋
Ask for provider support