Upload folder using huggingface_hub
Browse files- README.md +18 -0
- online_bspline_tokenizer.py +12 -9
- processor_config.json +1 -0
README.md
CHANGED
|
@@ -79,6 +79,7 @@ print(f"Reconstructed shape: {reconstructed.shape}") # [10, 50, 7]
|
|
| 79 |
| vocab_size | Discrete vocabulary size (256 = 8-bit tokens) | 256 |
|
| 80 |
| degree | B-spline polynomial degree (3=cubic, provides smooth trajectories) | 3 |
|
| 81 |
| gripper_dof | Number of gripper DOFs, assumed to be at the end. Used for forced knot placement | 1 |
|
|
|
|
| 82 |
| do_pad | Whether to pad control points to fixed length | True |
|
| 83 |
| device | Torch device ("cuda" or "cpu") | "cuda" |
|
| 84 |
|
|
@@ -90,6 +91,23 @@ The extra dimension is for time knots. For example, with default settings (50 ou
|
|
| 90 |
|
| 91 |
**Key Difference from BEAST**: BEST uses adaptive compression where `out_seq_len` can vary based on trajectory complexity, while BEAST uses fixed `num_basis` control points.
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
## API Reference
|
| 94 |
|
| 95 |
### Encoding Methods
|
|
|
|
| 79 |
| vocab_size | Discrete vocabulary size (256 = 8-bit tokens) | 256 |
|
| 80 |
| degree | B-spline polynomial degree (3=cubic, provides smooth trajectories) | 3 |
|
| 81 |
| gripper_dof | Number of gripper DOFs, assumed to be at the end. Used for forced knot placement | 1 |
|
| 82 |
+
| absolute_tol | Absolute tolerance for B-spline fitting error (e.g., 0.01 radians). Controls fitting accuracy and compression ratio. If set, overrides relative tolerance. | 0.01 |
|
| 83 |
| do_pad | Whether to pad control points to fixed length | True |
|
| 84 |
| device | Torch device ("cuda" or "cpu") | "cuda" |
|
| 85 |
|
|
|
|
| 91 |
|
| 92 |
**Key Difference from BEAST**: BEST uses adaptive compression where `out_seq_len` can vary based on trajectory complexity, while BEAST uses fixed `num_basis` control points.
|
| 93 |
|
| 94 |
+
### Absolute Tolerance (absolute_tol)
|
| 95 |
+
|
| 96 |
+
The `absolute_tol` parameter controls the fitting accuracy of the B-spline approximation:
|
| 97 |
+
|
| 98 |
+
- **Definition**: Maximum allowed L∞ error between the B-spline reconstruction and the original trajectory
|
| 99 |
+
- **Default**: 0.01 (appropriate for LIBERO tasks in radians)
|
| 100 |
+
- **Effect on Compression**:
|
| 101 |
+
- Lower values (e.g., 0.001): Tighter fitting, more control points needed, larger token count
|
| 102 |
+
- Higher values (e.g., 0.1): Looser fitting, fewer control points, smaller token count
|
| 103 |
+
- **Recommendation**:
|
| 104 |
+
- LIBERO manipulation: 0.01 (default)
|
| 105 |
+
- High-precision tasks: 0.001-0.005
|
| 106 |
+
- General arm motion: 0.01-0.05
|
| 107 |
+
- Speed-optimized: 0.05-0.1
|
| 108 |
+
|
| 109 |
+
**Priority**: When both `absolute_tol` and `tol_ratio` are applicable, `absolute_tol` takes precedence for fitting error threshold.
|
| 110 |
+
|
| 111 |
## API Reference
|
| 112 |
|
| 113 |
### Encoding Methods
|
online_bspline_tokenizer.py
CHANGED
|
@@ -216,13 +216,15 @@ class BestBSpline:
|
|
| 216 |
joint_dof: Number of joint DOFs
|
| 217 |
gripper_dof: Number of gripper DOFs (从后往前数)
|
| 218 |
check_step: Downsampling step for constraint checking acceleration
|
|
|
|
| 219 |
is_multi_process: Whether to use multiprocessing for batch compression (default: False)
|
| 220 |
"""
|
| 221 |
-
def __init__(self, degree: int = 3, joint_dof: int = 6, gripper_dof: int = 1, check_step=1, use_gurobi: bool = False, is_multi_process: bool = False):
|
| 222 |
self.degree = degree
|
| 223 |
self.joint_dof = joint_dof
|
| 224 |
self.gripper_dof = gripper_dof
|
| 225 |
self.check_step = check_step # 降采样步长,用于加速
|
|
|
|
| 226 |
self.use_gurobi = use_gurobi
|
| 227 |
self.is_multi_process = is_multi_process
|
| 228 |
|
|
@@ -240,17 +242,15 @@ class BestBSpline:
|
|
| 240 |
return forced_knot_times
|
| 241 |
|
| 242 |
def fit(self, trajectory: torch.Tensor, cpu_cores: int = min(mp.cpu_count(), 16),
|
| 243 |
-
tol_ratio: float = 0.03,
|
| 244 |
-
time_limit: int = 10, max_length: Optional[int] = None) -> Tuple[List[int], List[List[float]]]:
|
| 245 |
"""
|
| 246 |
-
|
| 247 |
-
|
| 248 |
|
| 249 |
Args:
|
| 250 |
trajectory: Input trajectory [seq_len, num_dof]
|
| 251 |
cpu_cores: Number of CPU cores for parallel MILP solving
|
| 252 |
tol_ratio: Tolerance ratio for relative fitting error (eps = d_range × tol_ratio)
|
| 253 |
-
absolute_tol: Absolute tolerance for fitting error. If set, overrides tol_ratio
|
| 254 |
time_limit: Time limit for MILP solver in seconds
|
| 255 |
max_length: Optional maximum length for the B-spline representation
|
| 256 |
Returns:
|
|
@@ -335,8 +335,8 @@ class BestBSpline:
|
|
| 335 |
# 拟合误差约束
|
| 336 |
for d in range(self.joint_dof):
|
| 337 |
# 使用绝对误差或相对误差
|
| 338 |
-
if absolute_tol is not None:
|
| 339 |
-
eps = float(absolute_tol)
|
| 340 |
else:
|
| 341 |
d_range = float(np.max(joint_traj[:, d]) - np.min(joint_traj[:, d]))
|
| 342 |
if d_range < 1e-6: d_range = 1.0
|
|
@@ -519,11 +519,13 @@ class BestTokenizer(torch.nn.Module, ProcessorMixin):
|
|
| 519 |
|
| 520 |
def __init__(self, num_dof: int = 7, in_seq_len: int = 10, out_seq_len: int = 5,
|
| 521 |
vocab_size: int = 256, degree: int = 3, gripper_dof: int = 1,
|
| 522 |
-
|
|
|
|
| 523 |
super().__init__()
|
| 524 |
self.in_seq_len = in_seq_len
|
| 525 |
self.out_seq_len = out_seq_len
|
| 526 |
self.vocab_size = vocab_size
|
|
|
|
| 527 |
# DOF distribution
|
| 528 |
self.gripper_dof = gripper_dof # 0 means 没有 gripper
|
| 529 |
self.joint_dof = num_dof - self.gripper_dof
|
|
@@ -535,6 +537,7 @@ class BestTokenizer(torch.nn.Module, ProcessorMixin):
|
|
| 535 |
degree=degree,
|
| 536 |
joint_dof=self.joint_dof,
|
| 537 |
gripper_dof=self.gripper_dof,
|
|
|
|
| 538 |
use_gurobi=use_gurobi,
|
| 539 |
is_multi_process=is_multi_process,
|
| 540 |
)
|
|
|
|
| 216 |
joint_dof: Number of joint DOFs
|
| 217 |
gripper_dof: Number of gripper DOFs (从后往前数)
|
| 218 |
check_step: Downsampling step for constraint checking acceleration
|
| 219 |
+
absolute_tol: Absolute tolerance for fitting error (default: 0.01)
|
| 220 |
is_multi_process: Whether to use multiprocessing for batch compression (default: False)
|
| 221 |
"""
|
| 222 |
+
def __init__(self, degree: int = 3, joint_dof: int = 6, gripper_dof: int = 1, check_step=1, absolute_tol: float = 0.01, use_gurobi: bool = False, is_multi_process: bool = False):
|
| 223 |
self.degree = degree
|
| 224 |
self.joint_dof = joint_dof
|
| 225 |
self.gripper_dof = gripper_dof
|
| 226 |
self.check_step = check_step # 降采样步长,用于加速
|
| 227 |
+
self.absolute_tol = absolute_tol # 绝对容差
|
| 228 |
self.use_gurobi = use_gurobi
|
| 229 |
self.is_multi_process = is_multi_process
|
| 230 |
|
|
|
|
| 242 |
return forced_knot_times
|
| 243 |
|
| 244 |
def fit(self, trajectory: torch.Tensor, cpu_cores: int = min(mp.cpu_count(), 16),
|
| 245 |
+
tol_ratio: float = 0.03, time_limit: int = 10, max_length: Optional[int] = None) -> Tuple[List[int], List[List[float]]]:
|
|
|
|
| 246 |
"""
|
| 247 |
+
Fit B-spline to trajectory using least squares with MILP optimization.
|
| 248 |
+
Uses self.absolute_tol for fitting error tolerance.
|
| 249 |
|
| 250 |
Args:
|
| 251 |
trajectory: Input trajectory [seq_len, num_dof]
|
| 252 |
cpu_cores: Number of CPU cores for parallel MILP solving
|
| 253 |
tol_ratio: Tolerance ratio for relative fitting error (eps = d_range × tol_ratio)
|
|
|
|
| 254 |
time_limit: Time limit for MILP solver in seconds
|
| 255 |
max_length: Optional maximum length for the B-spline representation
|
| 256 |
Returns:
|
|
|
|
| 335 |
# 拟合误差约束
|
| 336 |
for d in range(self.joint_dof):
|
| 337 |
# 使用绝对误差或相对误差
|
| 338 |
+
if self.absolute_tol is not None:
|
| 339 |
+
eps = float(self.absolute_tol)
|
| 340 |
else:
|
| 341 |
d_range = float(np.max(joint_traj[:, d]) - np.min(joint_traj[:, d]))
|
| 342 |
if d_range < 1e-6: d_range = 1.0
|
|
|
|
| 519 |
|
| 520 |
def __init__(self, num_dof: int = 7, in_seq_len: int = 10, out_seq_len: int = 5,
|
| 521 |
vocab_size: int = 256, degree: int = 3, gripper_dof: int = 1,
|
| 522 |
+
absolute_tol: float = 0.01, do_pad: bool = True, use_gurobi: bool = False,
|
| 523 |
+
is_multi_process: bool = False, device: str = "cuda"):
|
| 524 |
super().__init__()
|
| 525 |
self.in_seq_len = in_seq_len
|
| 526 |
self.out_seq_len = out_seq_len
|
| 527 |
self.vocab_size = vocab_size
|
| 528 |
+
self.absolute_tol = absolute_tol
|
| 529 |
# DOF distribution
|
| 530 |
self.gripper_dof = gripper_dof # 0 means 没有 gripper
|
| 531 |
self.joint_dof = num_dof - self.gripper_dof
|
|
|
|
| 537 |
degree=degree,
|
| 538 |
joint_dof=self.joint_dof,
|
| 539 |
gripper_dof=self.gripper_dof,
|
| 540 |
+
absolute_tol=absolute_tol,
|
| 541 |
use_gurobi=use_gurobi,
|
| 542 |
is_multi_process=is_multi_process,
|
| 543 |
)
|
processor_config.json
CHANGED
|
@@ -9,6 +9,7 @@
|
|
| 9 |
"vocab_size": 256,
|
| 10 |
"degree": 3,
|
| 11 |
"gripper_dof": 1,
|
|
|
|
| 12 |
"do_pad": true,
|
| 13 |
"use_gurobi": false,
|
| 14 |
"is_multi_process": false,
|
|
|
|
| 9 |
"vocab_size": 256,
|
| 10 |
"degree": 3,
|
| 11 |
"gripper_dof": 1,
|
| 12 |
+
"absolute_tol": 0.01,
|
| 13 |
"do_pad": true,
|
| 14 |
"use_gurobi": false,
|
| 15 |
"is_multi_process": false,
|