Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- action_tokenizer.py +431 -0
- config.json +320 -0
- configuration_spatialvla.py +119 -0
- generation_config.json +8 -0
- global_step1500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- global_step1500/mp_rank_00_model_states.pt +3 -0
- latest +1 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling_gemma2.py +1283 -0
- modeling_spatialvla.py +599 -0
- preprocessor_config.json +28 -0
- processing_spatialvla.py +259 -0
- processor_config.json +327 -0
- rng_state.pth +3 -0
- special_tokens_map.json +39 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
- zero_to_fp32.py +674 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
action_tokenizer.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
action_tokenizer.py
|
| 3 |
+
|
| 4 |
+
Extension class; wraps base LLM/VLM tokenizer with logic to discretize and tokenize continuous robot actions.
|
| 5 |
+
"""
|
| 6 |
+
from typing import List, Union, Dict, Optional
|
| 7 |
+
import numpy as np
|
| 8 |
+
from transformers import PreTrainedTokenizerBase
|
| 9 |
+
from scipy.stats import norm
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
ACTION_TOKEN = '<ACTION{:05d}>'
|
| 13 |
+
|
| 14 |
+
class ActionTokenizer:
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
tokenizer: PreTrainedTokenizerBase,
|
| 18 |
+
num_bins: int = 256,
|
| 19 |
+
min_action: int = -1,
|
| 20 |
+
max_action: int = 1,
|
| 21 |
+
):
|
| 22 |
+
self._vocab_size = num_bins
|
| 23 |
+
self.tokenizer = tokenizer
|
| 24 |
+
self.min_action, self.max_action = min_action, max_action
|
| 25 |
+
self.bin_centers = np.linspace(min_action, max_action, num_bins)
|
| 26 |
+
|
| 27 |
+
# add special action tokens to language tokenizer
|
| 28 |
+
token_list = [ACTION_TOKEN.format(i) for i in range(self._vocab_size)]
|
| 29 |
+
self.token_array = np.array(token_list)
|
| 30 |
+
|
| 31 |
+
num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
|
| 32 |
+
print(f"Add {num_new_tokens} TRANSLATION TOKENS, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
|
| 33 |
+
|
| 34 |
+
self.action_token_begin_idx = self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
|
| 35 |
+
self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
|
| 36 |
+
|
| 37 |
+
def __call__(self, action: np.ndarray) -> List[str]:
|
| 38 |
+
"""Discretize continuous actions to tokens.
|
| 39 |
+
action: np.ndarray, (n, 7), continuous actions in Cartesian or Spherical coordinates.
|
| 40 |
+
return: np.ndarray, (n, 7), tokens.
|
| 41 |
+
"""
|
| 42 |
+
action = np.clip(action, a_min=float(self.min_action), a_max=float(self.max_action))
|
| 43 |
+
ids = np.digitize(action, self.bin_centers, right=True) # [0, 255]
|
| 44 |
+
return self.token_array[ids]
|
| 45 |
+
|
| 46 |
+
def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
|
| 47 |
+
"""decode token ids to continuous actions.
|
| 48 |
+
action_token_id: np.ndarray, (n, 7), token ids.
|
| 49 |
+
return: np.ndarray, (n, 7), continuous actions
|
| 50 |
+
"""
|
| 51 |
+
ids = action_token_id - self.action_token_begin_idx
|
| 52 |
+
ids = np.clip(ids, a_min=0, a_max=self._vocab_size - 1)
|
| 53 |
+
return self.bin_centers[ids]
|
| 54 |
+
|
| 55 |
+
@property
|
| 56 |
+
def vocab_size(self) -> int:
|
| 57 |
+
return self._vocab_size
|
| 58 |
+
|
| 59 |
+
class TranslationTokenizer:
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
tokenizer: PreTrainedTokenizerBase,
|
| 63 |
+
num_bins: Dict,
|
| 64 |
+
bin_policy: Optional[Dict] = None,
|
| 65 |
+
use_spherical: bool = True,
|
| 66 |
+
):
|
| 67 |
+
self.tokenizer = tokenizer
|
| 68 |
+
self.num_theta_bins = num_bins["theta_bins"]
|
| 69 |
+
self.num_phi_bins = num_bins["phi_bins"]
|
| 70 |
+
self.num_r_bins = num_bins["r_bins"]
|
| 71 |
+
self.use_spherical = use_spherical
|
| 72 |
+
|
| 73 |
+
# for indexing
|
| 74 |
+
self.NP = self.num_phi_bins * self.num_r_bins
|
| 75 |
+
|
| 76 |
+
# add special action tokens to language tokenizer
|
| 77 |
+
self._vocab_size = self.num_theta_bins * self.num_phi_bins * self.num_r_bins
|
| 78 |
+
token_list = [ACTION_TOKEN.format(i) for i in range(self._vocab_size)]
|
| 79 |
+
self.token_array = np.array(token_list)
|
| 80 |
+
|
| 81 |
+
num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
|
| 82 |
+
print(f"Add {num_new_tokens} TRANSLATION TOKENS, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
|
| 83 |
+
|
| 84 |
+
self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
|
| 85 |
+
self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
|
| 86 |
+
self.set_bins(bin_policy)
|
| 87 |
+
|
| 88 |
+
def set_bins(self, bin_policy):
|
| 89 |
+
self.theta_bins = np.array(bin_policy["theta_bins"])
|
| 90 |
+
self.phi_bins = np.array(bin_policy["phi_bins"])
|
| 91 |
+
self.r_bins = np.array(bin_policy["r_bins"])
|
| 92 |
+
|
| 93 |
+
def cartesian_to_spherical(self, x, y, z):
|
| 94 |
+
theta = np.arctan2(np.sqrt(x**2 + y**2), z) # polar angle
|
| 95 |
+
phi = np.arctan2(y, x) # azimuthal angle
|
| 96 |
+
r = np.sqrt(x**2 + y**2 + z**2)
|
| 97 |
+
return theta, phi, r
|
| 98 |
+
|
| 99 |
+
def spherical_to_cartesian(self, theta, phi, r):
|
| 100 |
+
x = r * np.sin(theta) * np.cos(phi)
|
| 101 |
+
y = r * np.sin(theta) * np.sin(phi)
|
| 102 |
+
z = r * np.cos(theta)
|
| 103 |
+
return x, y, z
|
| 104 |
+
|
| 105 |
+
def __call__(self, action: np.ndarray) -> List[str]:
|
| 106 |
+
"""Discretize continuous actions to tokens.
|
| 107 |
+
action: np.ndarray, (n, 3), continuous actions in Cartesian or Spherical coordinates.
|
| 108 |
+
return: np.ndarray, (n,), tokens.
|
| 109 |
+
"""
|
| 110 |
+
if self.use_spherical:
|
| 111 |
+
theta, phi, r = self.cartesian_to_spherical(action[:, 0], action[:, 1], action[:, 2])
|
| 112 |
+
else:
|
| 113 |
+
theta, phi, r = action[:, 0], action[:, 1], action[:, 2]
|
| 114 |
+
|
| 115 |
+
disc_theta = np.digitize(theta, self.theta_bins[1:-1]) # b
|
| 116 |
+
disc_phi = np.digitize(phi, self.phi_bins[1:-1])
|
| 117 |
+
disc_r = np.digitize(r, self.r_bins[1:-1])
|
| 118 |
+
ids = disc_theta * self.NP + disc_phi * self.num_r_bins + disc_r
|
| 119 |
+
return self.token_array[ids]
|
| 120 |
+
|
| 121 |
+
def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
|
| 122 |
+
"""decode token ids to continuous actions.
|
| 123 |
+
action_token_id: np.ndarray, (n,), token ids.
|
| 124 |
+
return: np.ndarray, (n, 3), continuous actions
|
| 125 |
+
"""
|
| 126 |
+
action_token_id = np.clip(action_token_id, self.token_start_idx, self.token_end_idx)
|
| 127 |
+
ids = action_token_id - self.token_start_idx
|
| 128 |
+
disc_theta, disc_phi, disc_r = ids // self.NP, (ids % self.NP) // self.num_r_bins, ids % self.num_r_bins
|
| 129 |
+
|
| 130 |
+
theta = 0.5 * (self.theta_bins[disc_theta] + self.theta_bins[disc_theta + 1])
|
| 131 |
+
phi = 0.5 * (self.phi_bins[disc_phi] + self.phi_bins[disc_phi + 1])
|
| 132 |
+
r = 0.5 * (self.r_bins[disc_r] + self.r_bins[disc_r + 1])
|
| 133 |
+
|
| 134 |
+
# clip action to [-1, 1], due to the spherical coordinate action space is the circumscribed sphere of the Cartesian action space.
|
| 135 |
+
x, y, z = self.spherical_to_cartesian(theta, phi, r) if self.use_spherical else (theta, phi, r)
|
| 136 |
+
x, y, z = np.clip([x, y, z], -1, 1)
|
| 137 |
+
return np.stack((x, y, z), axis=1)
|
| 138 |
+
|
| 139 |
+
@property
|
| 140 |
+
def vocab_size(self) -> int:
|
| 141 |
+
return self._vocab_size
|
| 142 |
+
|
| 143 |
+
class RotationTokenizer:
|
| 144 |
+
def __init__(
|
| 145 |
+
self,
|
| 146 |
+
tokenizer: PreTrainedTokenizerBase,
|
| 147 |
+
num_bins: Dict,
|
| 148 |
+
bin_policy: Optional[Dict] = None,
|
| 149 |
+
array_begin_idx=None,
|
| 150 |
+
):
|
| 151 |
+
self.tokenizer = tokenizer
|
| 152 |
+
self.num_roll_bins = num_bins["roll_bins"] # M
|
| 153 |
+
self.num_pitch_bins = num_bins["pitch_bins"] # N
|
| 154 |
+
self.num_yaw_bins = num_bins["yaw_bins"] # P
|
| 155 |
+
self.array_begin_idx = array_begin_idx
|
| 156 |
+
|
| 157 |
+
# for indexing
|
| 158 |
+
self.NP = self.num_pitch_bins * self.num_yaw_bins
|
| 159 |
+
|
| 160 |
+
# add special action tokens to language tokenizer
|
| 161 |
+
self._vocab_size = self.num_roll_bins * self.num_pitch_bins * self.num_yaw_bins
|
| 162 |
+
token_list = [ACTION_TOKEN.format(i + self.array_begin_idx) for i in range(self._vocab_size)]
|
| 163 |
+
self.token_array = np.array(token_list)
|
| 164 |
+
|
| 165 |
+
num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
|
| 166 |
+
print(f"Add {num_new_tokens} ROTATION TOKENS to tokenizer, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
|
| 167 |
+
|
| 168 |
+
self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
|
| 169 |
+
self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
|
| 170 |
+
self.set_bins(bin_policy)
|
| 171 |
+
|
| 172 |
+
def set_bins(self, bin_policy):
|
| 173 |
+
self.roll_bins = np.array(bin_policy["roll_bins"])
|
| 174 |
+
self.pitch_bins = np.array(bin_policy["pitch_bins"])
|
| 175 |
+
self.yaw_bins = np.array(bin_policy["yaw_bins"])
|
| 176 |
+
|
| 177 |
+
def __call__(self, action: np.ndarray) -> List[str]:
|
| 178 |
+
"""Discretize continuous actions to tokens.
|
| 179 |
+
action: np.ndarray, (n, 3), continuous actions in Cartesian or Spherical coordinates.
|
| 180 |
+
return: np.ndarray, (n,), tokens.
|
| 181 |
+
"""
|
| 182 |
+
roll, pitch, yaw = action[:, 0], action[:, 1], action[:, 2]
|
| 183 |
+
disc_roll = np.clip(np.digitize(roll, self.roll_bins) - 1, 0, self.num_roll_bins - 1)
|
| 184 |
+
disc_pitch = np.clip(np.digitize(pitch, self.pitch_bins) - 1, 0, self.num_pitch_bins - 1)
|
| 185 |
+
disc_yaw = np.clip(np.digitize(yaw, self.yaw_bins) - 1, 0, self.num_yaw_bins - 1)
|
| 186 |
+
|
| 187 |
+
ids = disc_roll * self.NP + disc_pitch * self.num_yaw_bins + disc_yaw
|
| 188 |
+
return self.token_array[ids]
|
| 189 |
+
|
| 190 |
+
def decode_token_ids_to_actions(self, action_token_id: Union[np.int64, np.ndarray]) -> np.ndarray:
|
| 191 |
+
"""decode token ids to continuous actions.
|
| 192 |
+
action_token_id: np.ndarray, (n,), token ids.
|
| 193 |
+
return: np.ndarray, (n, 3), continuous actions
|
| 194 |
+
"""
|
| 195 |
+
action_token_id = np.clip(action_token_id, a_min=self.token_start_idx, a_max=self.token_end_idx)
|
| 196 |
+
ids = action_token_id - self.token_start_idx
|
| 197 |
+
disc_roll, disc_pitch, disc_yaw = ids // self.NP, (ids % self.NP) // self.num_yaw_bins, ids % self.num_yaw_bins
|
| 198 |
+
|
| 199 |
+
roll = 0.5 * (self.roll_bins[disc_roll] + self.roll_bins[disc_roll + 1])
|
| 200 |
+
pitch = 0.5 * (self.pitch_bins[disc_pitch] + self.pitch_bins[disc_pitch + 1])
|
| 201 |
+
yaw = 0.5 * (self.yaw_bins[disc_yaw] + self.yaw_bins[disc_yaw + 1])
|
| 202 |
+
return np.stack((roll, pitch, yaw), axis=1)
|
| 203 |
+
|
| 204 |
+
@property
|
| 205 |
+
def vocab_size(self) -> int:
|
| 206 |
+
return self._vocab_size
|
| 207 |
+
|
| 208 |
+
class GripperTokenzier:
|
| 209 |
+
def __init__(
|
| 210 |
+
self,
|
| 211 |
+
tokenizer: PreTrainedTokenizerBase,
|
| 212 |
+
num_bins: int = 2,
|
| 213 |
+
array_begin_idx = None,
|
| 214 |
+
) -> None:
|
| 215 |
+
self.tokenizer = tokenizer
|
| 216 |
+
self.num_bins = num_bins
|
| 217 |
+
self.array_begin_idx = array_begin_idx
|
| 218 |
+
token_list = [ACTION_TOKEN.format(i + self.array_begin_idx) for i in range(self.num_bins)]
|
| 219 |
+
self.token_array = np.array(token_list)
|
| 220 |
+
|
| 221 |
+
num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
|
| 222 |
+
print(f"Add {num_new_tokens} GRIPPER TOKENS to tokenizer, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
|
| 223 |
+
|
| 224 |
+
self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
|
| 225 |
+
self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
|
| 226 |
+
|
| 227 |
+
def __call__(self, action: np.ndarray) -> List[str]:
|
| 228 |
+
"""Discretize continuous actions to tokens.
|
| 229 |
+
action: np.ndarray, (n,), continuous actions in Cartesian or Spherical coordinates.
|
| 230 |
+
return: np.ndarray, (n,), tokens.
|
| 231 |
+
"""
|
| 232 |
+
ids = np.where(action >= 0.5, 1, 0)
|
| 233 |
+
return self.token_array[ids]
|
| 234 |
+
|
| 235 |
+
def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
|
| 236 |
+
"""decode token ids to continuous actions.
|
| 237 |
+
action_token_id: np.ndarray, (n,), token ids.
|
| 238 |
+
return: np.ndarray, (n, 1), continuous actions
|
| 239 |
+
"""
|
| 240 |
+
action_token_id = np.clip(action_token_id, self.token_start_idx, self.token_end_idx)
|
| 241 |
+
ids = action_token_id - self.token_start_idx
|
| 242 |
+
actions = np.where(ids == 0, 0., 1.)
|
| 243 |
+
return actions[:, None]
|
| 244 |
+
|
| 245 |
+
@property
|
| 246 |
+
def vocab_size(self) -> int:
|
| 247 |
+
return self.num_bins
|
| 248 |
+
|
| 249 |
+
class SpatialActionTokenizer:
|
| 250 |
+
range_bins = {
|
| 251 |
+
"translation": {
|
| 252 |
+
"theta_bins": (0.0, np.pi),
|
| 253 |
+
"phi_bins": (-np.pi, np.pi),
|
| 254 |
+
"r_bins": (0.0, np.sqrt(3)),
|
| 255 |
+
},
|
| 256 |
+
"rotation": {
|
| 257 |
+
"roll_bins": (-1.0, 1.0),
|
| 258 |
+
"pitch_bins": (-1.0, 1.0),
|
| 259 |
+
"yaw_bins": (-1.0, 1.0),
|
| 260 |
+
},
|
| 261 |
+
}
|
| 262 |
+
def __init__(
|
| 263 |
+
self,
|
| 264 |
+
tokenizer: PreTrainedTokenizerBase,
|
| 265 |
+
num_bins: Dict,
|
| 266 |
+
gs_params: Dict = None,
|
| 267 |
+
bin_policy: Dict = None,
|
| 268 |
+
use_spherical: bool = True,
|
| 269 |
+
min_sigma: float = 0.0,
|
| 270 |
+
min_action: float = -1.0,
|
| 271 |
+
max_action: float = 1.0,
|
| 272 |
+
):
|
| 273 |
+
"""set bin_policy if exist, otherwise, caculate bin_policy from gs_params or use uniform bin grids.
|
| 274 |
+
gs_params: Optional[Dict],
|
| 275 |
+
bin_policy: Optional[Dict],
|
| 276 |
+
"""
|
| 277 |
+
self.tokenizer = tokenizer
|
| 278 |
+
self.min_action, self.max_action = min_action, max_action
|
| 279 |
+
self.num_bins = num_bins
|
| 280 |
+
self.min_sigma = min_sigma
|
| 281 |
+
|
| 282 |
+
# set bin policy
|
| 283 |
+
self.bin_policy = bin_policy if bin_policy else self.get_bin_policy(gs_params, self.min_sigma)
|
| 284 |
+
self.translation_tokenizer = TranslationTokenizer(
|
| 285 |
+
self.tokenizer,
|
| 286 |
+
self.num_bins["translation"],
|
| 287 |
+
self.bin_policy["translation"],
|
| 288 |
+
use_spherical=use_spherical
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
self.rotation_tokenizer = RotationTokenizer(
|
| 292 |
+
self.tokenizer,
|
| 293 |
+
self.num_bins["rotation"],
|
| 294 |
+
self.bin_policy["rotation"],
|
| 295 |
+
array_begin_idx=self.translation_tokenizer.vocab_size,
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
self.gripper_tokenizer = GripperTokenzier(
|
| 299 |
+
self.tokenizer,
|
| 300 |
+
self.num_bins["gripper"],
|
| 301 |
+
array_begin_idx=self.translation_tokenizer.vocab_size + self.rotation_tokenizer.vocab_size
|
| 302 |
+
)
|
| 303 |
+
self._vocab_size = self.translation_tokenizer.vocab_size + self.rotation_tokenizer.vocab_size + self.gripper_tokenizer.vocab_size
|
| 304 |
+
|
| 305 |
+
def __call__(self, action: np.ndarray) -> List[str]:
|
| 306 |
+
"""Discretize continuous actions to tokens.
|
| 307 |
+
action: np.ndarray, (n, 7), continuous actions in Cartesian coordinates.
|
| 308 |
+
return: np.ndarray, (n, 3), tokens.
|
| 309 |
+
"""
|
| 310 |
+
if len(action.shape) == 1:
|
| 311 |
+
assert action.shape[0] == 7, f"action dim mismatch, got action shape: {action.shape}"
|
| 312 |
+
action = action.reshape(1, 7)
|
| 313 |
+
assert action.shape[1] == 7, f"action dim mismatch, got action shape: {action.shape}"
|
| 314 |
+
|
| 315 |
+
action = np.clip(action, a_min=self.min_action, a_max=self.max_action)
|
| 316 |
+
trans_tokens = self.translation_tokenizer(action[:, :3]) # (n,)
|
| 317 |
+
rot_tokens = self.rotation_tokenizer(action[:, 3:6]) # (n,)
|
| 318 |
+
grip_tokens = self.gripper_tokenizer(action[:, 6]) # (n,)
|
| 319 |
+
return np.stack((trans_tokens, rot_tokens, grip_tokens), axis=1) # (n, 3)
|
| 320 |
+
|
| 321 |
+
def decode_token_ids_to_actions(self, action_token_ids: np.ndarray) -> np.ndarray:
|
| 322 |
+
"""decode token ids to continuous actions.
|
| 323 |
+
action_token_ids: np.ndarray, (n, 3), token ids.
|
| 324 |
+
"""
|
| 325 |
+
if len(action_token_ids.shape) == 1:
|
| 326 |
+
assert action_token_ids.shape[0] == 3, f"action token id numbers mismatich, need 3 got {action_token_ids.shape[0]}"
|
| 327 |
+
action_token_ids = action_token_ids.reshape(1, 3)
|
| 328 |
+
assert action_token_ids.shape[1] == 3, f"token id numbers mismatich, need 3 got {action_token_ids.shape[1]}"
|
| 329 |
+
|
| 330 |
+
trans_action = self.translation_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 0]) # (n, 3)
|
| 331 |
+
rot_action = self.rotation_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 1]) # (n, 3)
|
| 332 |
+
grip_action = self.gripper_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 2]) # (n, 1)
|
| 333 |
+
return np.concatenate((trans_action, rot_action, grip_action), axis=1) # (n, 7)
|
| 334 |
+
|
| 335 |
+
@property
|
| 336 |
+
def vocab_size(self) -> int:
|
| 337 |
+
return self._vocab_size
|
| 338 |
+
|
| 339 |
+
@property
|
| 340 |
+
def action_token_begin_idx(self) -> int:
|
| 341 |
+
return self.translation_tokenizer.token_start_idx
|
| 342 |
+
|
| 343 |
+
def get_bin_policy(self, gs_params=None, min_sigma=0.0):
|
| 344 |
+
bin_policy = {
|
| 345 |
+
"translation": {"theta_bins": None, "phi_bins": None, "r_bins": None},
|
| 346 |
+
"rotation": {"roll_bins": None, "pitch_bins": None, "yaw_bins": None}
|
| 347 |
+
}
|
| 348 |
+
if gs_params is None:
|
| 349 |
+
for bin_type in self.range_bins.keys():
|
| 350 |
+
for bin_key in self.range_bins[bin_type].keys():
|
| 351 |
+
bin_policy[bin_type][bin_key] = np.linspace(*self.range_bins[bin_type][bin_key], self.num_bins[bin_type][bin_key] + 1)
|
| 352 |
+
print(f"use unifrom bin grids ... \n{bin_policy}")
|
| 353 |
+
else:
|
| 354 |
+
for bin_type in self.range_bins.keys():
|
| 355 |
+
for bin_key in self.range_bins[bin_type].keys():
|
| 356 |
+
mu = gs_params[bin_key.split("_")[0].lower()]["mu"]
|
| 357 |
+
sigma = max(gs_params[bin_key.split("_")[0].lower()]["sigma"], min_sigma)
|
| 358 |
+
bin_bound_prob = np.linspace(
|
| 359 |
+
norm.cdf(self.range_bins[bin_type][bin_key][0], loc=mu, scale=sigma),
|
| 360 |
+
norm.cdf(self.range_bins[bin_type][bin_key][1], loc=mu, scale=sigma),
|
| 361 |
+
self.num_bins[bin_type][bin_key] + 1,
|
| 362 |
+
)
|
| 363 |
+
bin_boundary = norm.ppf(bin_bound_prob, loc=mu, scale=sigma)
|
| 364 |
+
bin_policy[bin_type][bin_key] = np.clip(
|
| 365 |
+
bin_boundary,
|
| 366 |
+
self.range_bins[bin_type][bin_key][0],
|
| 367 |
+
self.range_bins[bin_type][bin_key][1],
|
| 368 |
+
).tolist() # for serialize
|
| 369 |
+
print(f"caculate bin grids from gaussians \n{bin_policy}")
|
| 370 |
+
return bin_policy
|
| 371 |
+
|
| 372 |
+
def get_norm_meshgrid(self, bin_policy):
|
| 373 |
+
grids = []
|
| 374 |
+
policy = {k1: {k2: np.array(v2) for k2, v2 in v1.items()} for k1, v1 in bin_policy.items()}
|
| 375 |
+
# NOTE: use unify k,v order of range_bins (tpr, rpy)
|
| 376 |
+
for bin_type in self.range_bins.keys():
|
| 377 |
+
bounds = []
|
| 378 |
+
for bin_key in self.range_bins[bin_type].keys():
|
| 379 |
+
minb, maxb = self.range_bins[bin_type][bin_key][0], self.range_bins[bin_type][bin_key][1]
|
| 380 |
+
bin_boundary = policy[bin_type][bin_key]
|
| 381 |
+
bin_center = (bin_boundary[:-1] + bin_boundary[1:]) / 2
|
| 382 |
+
bin_center = np.concatenate([np.array([minb]),bin_center,np.array([maxb])]) # padding
|
| 383 |
+
bin_center = (bin_center - minb) / (maxb - minb) # nomalize (m, n, k)
|
| 384 |
+
bounds.append(bin_center)
|
| 385 |
+
# generate grids
|
| 386 |
+
grid_x, grid_y, grid_z = np.meshgrid(*bounds)
|
| 387 |
+
grids += [np.stack([grid_x, grid_y, grid_z], -1).reshape(-1, 3)]
|
| 388 |
+
return grids[0], grids[1] # (N, 3)
|
| 389 |
+
|
| 390 |
+
def spatial_embedding_adaption(self, gs_params, embeddings: torch.nn.Embedding, min_sigma=0.0, adpt_feature=False):
|
| 391 |
+
"""
|
| 392 |
+
gs_params0, gs_params1: Dict
|
| 393 |
+
embeddings: tensor (S,E)
|
| 394 |
+
"""
|
| 395 |
+
from scipy.interpolate import griddata
|
| 396 |
+
new_policy = self.get_bin_policy(gs_params, min_sigma=min_sigma)
|
| 397 |
+
trans_grids0, rot_grids0 = self.get_norm_meshgrid(self.bin_policy)
|
| 398 |
+
trans_grids1, rot_grids1 = self.get_norm_meshgrid(new_policy)
|
| 399 |
+
|
| 400 |
+
print("overwrite bin policy and tokenizer bins ...")
|
| 401 |
+
self.bin_policy = new_policy
|
| 402 |
+
self.min_sigma = min_sigma
|
| 403 |
+
self.translation_tokenizer.set_bins(new_policy["translation"])
|
| 404 |
+
self.rotation_tokenizer.set_bins(new_policy["rotation"])
|
| 405 |
+
|
| 406 |
+
if adpt_feature:
|
| 407 |
+
emb_data = embeddings.weight.data # (S, e)
|
| 408 |
+
_, E = emb_data.shape
|
| 409 |
+
|
| 410 |
+
# translation
|
| 411 |
+
m, n, k = (self.num_bins["translation"][k] for k in ["theta_bins", "phi_bins", "r_bins"])
|
| 412 |
+
N = m*n*k
|
| 413 |
+
trans_emb_data = emb_data[:N,].reshape(m, n, k, -1).permute(3, 0, 1, 2) # (e, m, n, k)
|
| 414 |
+
pad_emb = torch.nn.functional.pad(trans_emb_data, (1, 1, 1, 1, 1, 1), "replicate").permute(1, 2, 3, 0).reshape(-1, E)
|
| 415 |
+
adpt_trans_emb = griddata(trans_grids0, pad_emb.float(), trans_grids1, method='linear')
|
| 416 |
+
adpt_trans_emb = adpt_trans_emb.reshape(m+2, n+2, k+2, E)[1:-1, 1:-1, 1:-1,]
|
| 417 |
+
|
| 418 |
+
# rotation
|
| 419 |
+
m1, n1, k1 = (self.num_bins["rotation"][k] for k in ["roll_bins", "pitch_bins", "yaw_bins"])
|
| 420 |
+
M = m1*n1*k1
|
| 421 |
+
rot_emb_data = emb_data[N : N + M,].reshape(m1, n1, k1, -1).permute(3, 0, 1, 2) # (e, m, n, k)
|
| 422 |
+
pad_emb = torch.nn.functional.pad(rot_emb_data, (1, 1, 1, 1, 1, 1), "replicate").permute(1, 2, 3, 0).reshape(-1, E)
|
| 423 |
+
adpt_rot_emb = griddata(rot_grids0, pad_emb.float(), rot_grids1, method='linear')
|
| 424 |
+
adpt_rot_emb = adpt_rot_emb.reshape(m1+2, n1+2, k1+2, E)[1:-1, 1:-1, 1:-1,]
|
| 425 |
+
|
| 426 |
+
# set data
|
| 427 |
+
device, dtype = embeddings.weight.data.device, embeddings.weight.data.dtype
|
| 428 |
+
embeddings.weight.data[:N] = torch.Tensor(adpt_trans_emb.reshape(-1, E), device=device).to(dtype)
|
| 429 |
+
embeddings.weight.data[N:N+M] = torch.Tensor(adpt_rot_emb.reshape(-1, E), device=device).to(dtype)
|
| 430 |
+
print("DONE! adapt spatial embedding to new gaussian distributation finished.")
|
| 431 |
+
print(embeddings.weight.data)
|
config.json
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "/mnt/data1/datasets/vla/vla-finetuning/spatialvla-4b-224-sft-bridge",
|
| 3 |
+
"_vocab_size": 265347,
|
| 4 |
+
"action_token_begin_idx": 257153,
|
| 5 |
+
"architectures": [
|
| 6 |
+
"SpatialVLAForConditionalGeneration"
|
| 7 |
+
],
|
| 8 |
+
"auto_map": {
|
| 9 |
+
"AutoConfig": "configuration_spatialvla.SpatialVLAConfig",
|
| 10 |
+
"AutoModel": "modeling_spatialvla.SpatialVLAForConditionalGeneration"
|
| 11 |
+
},
|
| 12 |
+
"bos_token_id": 2,
|
| 13 |
+
"ego3d_patch_reso": 2,
|
| 14 |
+
"eos_token_id": 1,
|
| 15 |
+
"hidden_size": 2048,
|
| 16 |
+
"image_token_index": 257152,
|
| 17 |
+
"model_type": "spatialvla",
|
| 18 |
+
"n_freqs": 8,
|
| 19 |
+
"num_hidden_layers": 26,
|
| 20 |
+
"pad_token_id": 0,
|
| 21 |
+
"projection_dim": 2304,
|
| 22 |
+
"spatial_token_num": 8194,
|
| 23 |
+
"text_config": {
|
| 24 |
+
"_attn_implementation_autoset": true,
|
| 25 |
+
"architectures": [
|
| 26 |
+
"Gemma2ForCausalLM"
|
| 27 |
+
],
|
| 28 |
+
"eos_token_id": [
|
| 29 |
+
1,
|
| 30 |
+
107
|
| 31 |
+
],
|
| 32 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 33 |
+
"hidden_size": 2304,
|
| 34 |
+
"intermediate_size": 9216,
|
| 35 |
+
"model_type": "gemma2",
|
| 36 |
+
"num_hidden_layers": 26,
|
| 37 |
+
"num_image_tokens": 256,
|
| 38 |
+
"num_key_value_heads": 4,
|
| 39 |
+
"tie_word_embeddings": false,
|
| 40 |
+
"torch_dtype": "bfloat16",
|
| 41 |
+
"vocab_size": 265347
|
| 42 |
+
},
|
| 43 |
+
"torch_dtype": "bfloat16",
|
| 44 |
+
"transformers_version": "4.47.0",
|
| 45 |
+
"use_spatial_token": true,
|
| 46 |
+
"use_vision_zoe": true,
|
| 47 |
+
"vision_config": {
|
| 48 |
+
"hidden_size": 1152,
|
| 49 |
+
"intermediate_size": 4304,
|
| 50 |
+
"model_type": "siglip_vision_model",
|
| 51 |
+
"num_attention_heads": 16,
|
| 52 |
+
"num_hidden_layers": 27,
|
| 53 |
+
"num_image_tokens": 256,
|
| 54 |
+
"num_positions": 256,
|
| 55 |
+
"patch_size": 14,
|
| 56 |
+
"projection_dim": 2304,
|
| 57 |
+
"torch_dtype": "bfloat16",
|
| 58 |
+
"vision_use_head": false
|
| 59 |
+
},
|
| 60 |
+
"vision_zoe_config": {
|
| 61 |
+
"_attn_implementation_autoset": true,
|
| 62 |
+
"_name_or_path": "Intel/zoedepth-nyu-kitti",
|
| 63 |
+
"add_cross_attention": false,
|
| 64 |
+
"add_projection": false,
|
| 65 |
+
"architectures": [
|
| 66 |
+
"ZoeDepthForDepthEstimation"
|
| 67 |
+
],
|
| 68 |
+
"attractor_alpha": 1000,
|
| 69 |
+
"attractor_gamma": 2,
|
| 70 |
+
"attractor_kind": "mean",
|
| 71 |
+
"backbone": null,
|
| 72 |
+
"backbone_config": {
|
| 73 |
+
"_attn_implementation_autoset": false,
|
| 74 |
+
"_name_or_path": "",
|
| 75 |
+
"add_cross_attention": false,
|
| 76 |
+
"add_fpn": false,
|
| 77 |
+
"architectures": null,
|
| 78 |
+
"attention_probs_dropout_prob": 0.0,
|
| 79 |
+
"auxiliary_channels": 256,
|
| 80 |
+
"auxiliary_concat_input": false,
|
| 81 |
+
"auxiliary_loss_weight": 0.4,
|
| 82 |
+
"auxiliary_num_convs": 1,
|
| 83 |
+
"bad_words_ids": null,
|
| 84 |
+
"begin_suppress_tokens": null,
|
| 85 |
+
"bos_token_id": null,
|
| 86 |
+
"chunk_size_feed_forward": 0,
|
| 87 |
+
"cross_attention_hidden_size": null,
|
| 88 |
+
"decoder_start_token_id": null,
|
| 89 |
+
"diversity_penalty": 0.0,
|
| 90 |
+
"do_sample": false,
|
| 91 |
+
"drop_path_rate": 0.1,
|
| 92 |
+
"early_stopping": false,
|
| 93 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 94 |
+
"eos_token_id": null,
|
| 95 |
+
"exponential_decay_length_penalty": null,
|
| 96 |
+
"finetuning_task": null,
|
| 97 |
+
"forced_bos_token_id": null,
|
| 98 |
+
"forced_eos_token_id": null,
|
| 99 |
+
"hidden_act": "gelu",
|
| 100 |
+
"hidden_dropout_prob": 0.0,
|
| 101 |
+
"hidden_size": 1024,
|
| 102 |
+
"id2label": {
|
| 103 |
+
"0": "LABEL_0",
|
| 104 |
+
"1": "LABEL_1"
|
| 105 |
+
},
|
| 106 |
+
"image_size": 384,
|
| 107 |
+
"initializer_range": 0.02,
|
| 108 |
+
"intermediate_size": 4096,
|
| 109 |
+
"is_decoder": false,
|
| 110 |
+
"is_encoder_decoder": false,
|
| 111 |
+
"label2id": {
|
| 112 |
+
"LABEL_0": 0,
|
| 113 |
+
"LABEL_1": 1
|
| 114 |
+
},
|
| 115 |
+
"layer_norm_eps": 1e-12,
|
| 116 |
+
"layer_scale_init_value": 0.1,
|
| 117 |
+
"length_penalty": 1.0,
|
| 118 |
+
"max_length": 20,
|
| 119 |
+
"min_length": 0,
|
| 120 |
+
"model_type": "beit",
|
| 121 |
+
"no_repeat_ngram_size": 0,
|
| 122 |
+
"num_attention_heads": 16,
|
| 123 |
+
"num_beam_groups": 1,
|
| 124 |
+
"num_beams": 1,
|
| 125 |
+
"num_channels": 3,
|
| 126 |
+
"num_hidden_layers": 24,
|
| 127 |
+
"num_return_sequences": 1,
|
| 128 |
+
"out_features": [
|
| 129 |
+
"stage6",
|
| 130 |
+
"stage12",
|
| 131 |
+
"stage18",
|
| 132 |
+
"stage24"
|
| 133 |
+
],
|
| 134 |
+
"out_indices": [
|
| 135 |
+
6,
|
| 136 |
+
12,
|
| 137 |
+
18,
|
| 138 |
+
24
|
| 139 |
+
],
|
| 140 |
+
"output_attentions": false,
|
| 141 |
+
"output_hidden_states": false,
|
| 142 |
+
"output_scores": false,
|
| 143 |
+
"pad_token_id": null,
|
| 144 |
+
"patch_size": 16,
|
| 145 |
+
"pool_scales": [
|
| 146 |
+
1,
|
| 147 |
+
2,
|
| 148 |
+
3,
|
| 149 |
+
6
|
| 150 |
+
],
|
| 151 |
+
"prefix": null,
|
| 152 |
+
"problem_type": null,
|
| 153 |
+
"pruned_heads": {},
|
| 154 |
+
"remove_invalid_values": false,
|
| 155 |
+
"repetition_penalty": 1.0,
|
| 156 |
+
"reshape_hidden_states": false,
|
| 157 |
+
"return_dict": true,
|
| 158 |
+
"return_dict_in_generate": false,
|
| 159 |
+
"semantic_loss_ignore_index": 255,
|
| 160 |
+
"sep_token_id": null,
|
| 161 |
+
"stage_names": [
|
| 162 |
+
"stem",
|
| 163 |
+
"stage1",
|
| 164 |
+
"stage2",
|
| 165 |
+
"stage3",
|
| 166 |
+
"stage4",
|
| 167 |
+
"stage5",
|
| 168 |
+
"stage6",
|
| 169 |
+
"stage7",
|
| 170 |
+
"stage8",
|
| 171 |
+
"stage9",
|
| 172 |
+
"stage10",
|
| 173 |
+
"stage11",
|
| 174 |
+
"stage12",
|
| 175 |
+
"stage13",
|
| 176 |
+
"stage14",
|
| 177 |
+
"stage15",
|
| 178 |
+
"stage16",
|
| 179 |
+
"stage17",
|
| 180 |
+
"stage18",
|
| 181 |
+
"stage19",
|
| 182 |
+
"stage20",
|
| 183 |
+
"stage21",
|
| 184 |
+
"stage22",
|
| 185 |
+
"stage23",
|
| 186 |
+
"stage24"
|
| 187 |
+
],
|
| 188 |
+
"suppress_tokens": null,
|
| 189 |
+
"task_specific_params": null,
|
| 190 |
+
"temperature": 1.0,
|
| 191 |
+
"tf_legacy_loss": false,
|
| 192 |
+
"tie_encoder_decoder": false,
|
| 193 |
+
"tie_word_embeddings": true,
|
| 194 |
+
"tokenizer_class": null,
|
| 195 |
+
"top_k": 50,
|
| 196 |
+
"top_p": 1.0,
|
| 197 |
+
"torch_dtype": null,
|
| 198 |
+
"torchscript": false,
|
| 199 |
+
"typical_p": 1.0,
|
| 200 |
+
"use_absolute_position_embeddings": false,
|
| 201 |
+
"use_auxiliary_head": true,
|
| 202 |
+
"use_bfloat16": false,
|
| 203 |
+
"use_mask_token": false,
|
| 204 |
+
"use_mean_pooling": true,
|
| 205 |
+
"use_relative_position_bias": true,
|
| 206 |
+
"use_shared_relative_position_bias": false,
|
| 207 |
+
"vocab_size": 8192
|
| 208 |
+
},
|
| 209 |
+
"backbone_hidden_size": 1024,
|
| 210 |
+
"bad_words_ids": null,
|
| 211 |
+
"batch_norm_eps": 1e-05,
|
| 212 |
+
"begin_suppress_tokens": null,
|
| 213 |
+
"bin_centers_type": "softplus",
|
| 214 |
+
"bin_configurations": [
|
| 215 |
+
{
|
| 216 |
+
"max_depth": 10.0,
|
| 217 |
+
"min_depth": 0.001,
|
| 218 |
+
"n_bins": 64,
|
| 219 |
+
"name": "nyu"
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"max_depth": 80.0,
|
| 223 |
+
"min_depth": 0.001,
|
| 224 |
+
"n_bins": 64,
|
| 225 |
+
"name": "kitti"
|
| 226 |
+
}
|
| 227 |
+
],
|
| 228 |
+
"bin_embedding_dim": 128,
|
| 229 |
+
"bos_token_id": null,
|
| 230 |
+
"bottleneck_features": 256,
|
| 231 |
+
"chunk_size_feed_forward": 0,
|
| 232 |
+
"cross_attention_hidden_size": null,
|
| 233 |
+
"decoder_start_token_id": null,
|
| 234 |
+
"diversity_penalty": 0.0,
|
| 235 |
+
"do_sample": false,
|
| 236 |
+
"early_stopping": false,
|
| 237 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 238 |
+
"eos_token_id": null,
|
| 239 |
+
"exponential_decay_length_penalty": null,
|
| 240 |
+
"finetuning_task": null,
|
| 241 |
+
"forced_bos_token_id": null,
|
| 242 |
+
"forced_eos_token_id": null,
|
| 243 |
+
"fusion_hidden_size": 256,
|
| 244 |
+
"head_in_index": -1,
|
| 245 |
+
"hidden_act": "gelu",
|
| 246 |
+
"id2label": {
|
| 247 |
+
"0": "LABEL_0",
|
| 248 |
+
"1": "LABEL_1"
|
| 249 |
+
},
|
| 250 |
+
"initializer_range": 0.02,
|
| 251 |
+
"is_decoder": false,
|
| 252 |
+
"is_encoder_decoder": false,
|
| 253 |
+
"label2id": {
|
| 254 |
+
"LABEL_0": 0,
|
| 255 |
+
"LABEL_1": 1
|
| 256 |
+
},
|
| 257 |
+
"length_penalty": 1.0,
|
| 258 |
+
"max_length": 20,
|
| 259 |
+
"max_temp": 50.0,
|
| 260 |
+
"min_length": 0,
|
| 261 |
+
"min_temp": 0.0212,
|
| 262 |
+
"model_type": "zoedepth",
|
| 263 |
+
"neck_hidden_sizes": [
|
| 264 |
+
256,
|
| 265 |
+
512,
|
| 266 |
+
1024,
|
| 267 |
+
1024
|
| 268 |
+
],
|
| 269 |
+
"no_repeat_ngram_size": 0,
|
| 270 |
+
"num_attractors": [
|
| 271 |
+
16,
|
| 272 |
+
8,
|
| 273 |
+
4,
|
| 274 |
+
1
|
| 275 |
+
],
|
| 276 |
+
"num_beam_groups": 1,
|
| 277 |
+
"num_beams": 1,
|
| 278 |
+
"num_patch_transformer_layers": 4,
|
| 279 |
+
"num_relative_features": 32,
|
| 280 |
+
"num_return_sequences": 1,
|
| 281 |
+
"output_attentions": false,
|
| 282 |
+
"output_hidden_states": false,
|
| 283 |
+
"output_scores": false,
|
| 284 |
+
"pad_token_id": null,
|
| 285 |
+
"patch_transformer_hidden_size": 128,
|
| 286 |
+
"patch_transformer_intermediate_size": 1024,
|
| 287 |
+
"patch_transformer_num_attention_heads": 4,
|
| 288 |
+
"prefix": null,
|
| 289 |
+
"problem_type": null,
|
| 290 |
+
"pruned_heads": {},
|
| 291 |
+
"readout_type": "project",
|
| 292 |
+
"reassemble_factors": [
|
| 293 |
+
4,
|
| 294 |
+
2,
|
| 295 |
+
1,
|
| 296 |
+
0.5
|
| 297 |
+
],
|
| 298 |
+
"remove_invalid_values": false,
|
| 299 |
+
"repetition_penalty": 1.0,
|
| 300 |
+
"return_dict": true,
|
| 301 |
+
"return_dict_in_generate": false,
|
| 302 |
+
"sep_token_id": null,
|
| 303 |
+
"suppress_tokens": null,
|
| 304 |
+
"task_specific_params": null,
|
| 305 |
+
"temperature": 1.0,
|
| 306 |
+
"tf_legacy_loss": false,
|
| 307 |
+
"tie_encoder_decoder": false,
|
| 308 |
+
"tie_word_embeddings": true,
|
| 309 |
+
"tokenizer_class": null,
|
| 310 |
+
"top_k": 50,
|
| 311 |
+
"top_p": 1.0,
|
| 312 |
+
"torch_dtype": "bfloat16",
|
| 313 |
+
"torchscript": false,
|
| 314 |
+
"typical_p": 1.0,
|
| 315 |
+
"use_batch_norm_in_fusion_residual": false,
|
| 316 |
+
"use_bfloat16": false,
|
| 317 |
+
"use_bias_in_fusion_residual": null,
|
| 318 |
+
"use_pretrained_backbone": false
|
| 319 |
+
}
|
| 320 |
+
}
|
configuration_spatialvla.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 17 |
+
from transformers.utils import logging
|
| 18 |
+
from transformers import CONFIG_MAPPING, AutoConfig
|
| 19 |
+
|
| 20 |
+
logger = logging.get_logger(__name__)
|
| 21 |
+
|
| 22 |
+
class SpatialVLAConfig(PretrainedConfig):
|
| 23 |
+
model_type = "spatialvla"
|
| 24 |
+
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig, "vision_zoe_config": AutoConfig}
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
vision_config=None,
|
| 29 |
+
text_config=None,
|
| 30 |
+
ignore_index=-100,
|
| 31 |
+
image_token_index=256000,
|
| 32 |
+
vocab_size=257152,
|
| 33 |
+
projection_dim=2048,
|
| 34 |
+
hidden_size=2048,
|
| 35 |
+
vision_zoe_config=None,
|
| 36 |
+
action_token_begin_idx=None,
|
| 37 |
+
spatial_token_num=259,
|
| 38 |
+
use_spatial_token=False,
|
| 39 |
+
ego3d_patch_reso=4,
|
| 40 |
+
n_freqs=8,
|
| 41 |
+
use_vision_zoe=True,
|
| 42 |
+
**kwargs,
|
| 43 |
+
):
|
| 44 |
+
self._ignore_index = ignore_index
|
| 45 |
+
self.image_token_index = image_token_index
|
| 46 |
+
self._vocab_size = vocab_size
|
| 47 |
+
self.projection_dim = projection_dim
|
| 48 |
+
self.hidden_size = hidden_size
|
| 49 |
+
self.vision_config = vision_config
|
| 50 |
+
self.is_encoder_decoder = False
|
| 51 |
+
|
| 52 |
+
if isinstance(self.vision_config, dict):
|
| 53 |
+
vision_config["model_type"] = (
|
| 54 |
+
vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
|
| 55 |
+
)
|
| 56 |
+
self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
|
| 57 |
+
elif vision_config is None:
|
| 58 |
+
self.vision_config = CONFIG_MAPPING["siglip_vision_model"](
|
| 59 |
+
intermediate_size=4096,
|
| 60 |
+
hidden_size=1152,
|
| 61 |
+
patch_size=14,
|
| 62 |
+
image_size=224,
|
| 63 |
+
num_hidden_layers=27,
|
| 64 |
+
num_attention_heads=16,
|
| 65 |
+
vocab_size=257152,
|
| 66 |
+
vision_use_head=False,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
self.text_config = text_config
|
| 70 |
+
if isinstance(self.text_config, dict):
|
| 71 |
+
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "gemma2"
|
| 72 |
+
self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
|
| 73 |
+
elif text_config is None:
|
| 74 |
+
self.text_config = CONFIG_MAPPING["gemma2"](
|
| 75 |
+
hidden_size=2048,
|
| 76 |
+
num_hidden_layers=18,
|
| 77 |
+
intermediate_size=16384,
|
| 78 |
+
num_attention_heads=8,
|
| 79 |
+
num_key_value_heads=1,
|
| 80 |
+
is_encoder_decoder=False,
|
| 81 |
+
vocab_size=vocab_size,
|
| 82 |
+
)
|
| 83 |
+
self.text_config.num_image_tokens = (self.vision_config.image_size // self.vision_config.patch_size) ** 2
|
| 84 |
+
self.vision_config.projection_dim = projection_dim
|
| 85 |
+
|
| 86 |
+
# vision zoe config
|
| 87 |
+
self.vision_zoe_config = vision_zoe_config
|
| 88 |
+
if isinstance(self.vision_zoe_config, dict):
|
| 89 |
+
vision_zoe_config["model_type"] = vision_zoe_config["model_type"] if "model_type" in vision_zoe_config else "zoedepth"
|
| 90 |
+
self.vision_zoe_config = CONFIG_MAPPING[vision_zoe_config["model_type"]](**vision_zoe_config)
|
| 91 |
+
else:
|
| 92 |
+
pass
|
| 93 |
+
|
| 94 |
+
# additional attributes
|
| 95 |
+
self.action_token_begin_idx = action_token_begin_idx
|
| 96 |
+
self.spatial_token_num = spatial_token_num
|
| 97 |
+
self.use_spatial_token = use_spatial_token
|
| 98 |
+
self.ego3d_patch_reso = ego3d_patch_reso
|
| 99 |
+
self.n_freqs = n_freqs
|
| 100 |
+
self.use_vision_zoe = use_vision_zoe
|
| 101 |
+
|
| 102 |
+
super().__init__(**kwargs)
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def ignore_index(self):
|
| 106 |
+
warnings.warn(
|
| 107 |
+
"The `ignore_index` attribute is deprecated and will be removed in v4.47.",
|
| 108 |
+
FutureWarning,
|
| 109 |
+
)
|
| 110 |
+
return self._ignore_index
|
| 111 |
+
|
| 112 |
+
@ignore_index.setter
|
| 113 |
+
def ignore_index(self, value):
|
| 114 |
+
self._ignore_index = value
|
| 115 |
+
|
| 116 |
+
def to_dict(self):
|
| 117 |
+
output = super().to_dict()
|
| 118 |
+
output.pop("_ignore_index", None)
|
| 119 |
+
return output
|
generation_config.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 2,
|
| 4 |
+
"cache_implementation": "hybrid",
|
| 5 |
+
"eos_token_id": 1,
|
| 6 |
+
"pad_token_id": 0,
|
| 7 |
+
"transformers_version": "4.47.0"
|
| 8 |
+
}
|
global_step1500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7cc77a7b4e090619ccaf9e644968c00f7cf21aa7518a686a04e4c591e36b9828
|
| 3 |
+
size 13497318524
|
global_step1500/mp_rank_00_model_states.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4bbae45bafabe6c2d34ef51f4d02acda1136fbf706da94fc03b9c50069d7c7ed
|
| 3 |
+
size 8056300410
|
latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step1500
|
model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d2323d8f682402232e99e6007c9b6dd0c23d81086cf2fae65a485b8a8368606
|
| 3 |
+
size 4969426016
|
model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ad3f49e667e57a004fa212faa5f21477166b5c5ce2bfa3b5ac0e20986aa09c0
|
| 3 |
+
size 3086476734
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modeling_gemma2.py
ADDED
|
@@ -0,0 +1,1283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# custom gemma2 to support flash_attention_2,
|
| 2 |
+
# source from https://github.com/huggingface/transformers/blob/v4.47.0/src/transformers/models/gemma2/modeling_gemma2.py
|
| 3 |
+
# coding=utf-8
|
| 4 |
+
# Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
#
|
| 7 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 8 |
+
# you may not use this file except in compliance with the License.
|
| 9 |
+
# You may obtain a copy of the License at
|
| 10 |
+
#
|
| 11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 12 |
+
#
|
| 13 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 14 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 15 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 16 |
+
# See the License for the specific language governing permissions and
|
| 17 |
+
# limitations under the License.
|
| 18 |
+
from typing import List, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
|
| 23 |
+
from transformers.activations import ACT2FN
|
| 24 |
+
from transformers.cache_utils import Cache, HybridCache
|
| 25 |
+
from transformers.generation import GenerationMixin
|
| 26 |
+
from transformers.modeling_outputs import (
|
| 27 |
+
BaseModelOutputWithPast,
|
| 28 |
+
CausalLMOutputWithPast,
|
| 29 |
+
SequenceClassifierOutputWithPast,
|
| 30 |
+
TokenClassifierOutput,
|
| 31 |
+
)
|
| 32 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 33 |
+
from transformers.utils import (
|
| 34 |
+
add_code_sample_docstrings,
|
| 35 |
+
add_start_docstrings,
|
| 36 |
+
add_start_docstrings_to_model_forward,
|
| 37 |
+
is_flash_attn_2_available,
|
| 38 |
+
is_flash_attn_greater_or_equal,
|
| 39 |
+
is_torch_greater_or_equal,
|
| 40 |
+
logging,
|
| 41 |
+
replace_return_docstrings,
|
| 42 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 43 |
+
)
|
| 44 |
+
from transformers import Gemma2Config
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
if is_flash_attn_2_available():
|
| 48 |
+
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
| 49 |
+
|
| 50 |
+
if is_torch_greater_or_equal("2.5"):
|
| 51 |
+
from torch.nn.attention.flex_attention import flex_attention
|
| 52 |
+
|
| 53 |
+
logger = logging.get_logger(__name__)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
_CHECKPOINT_FOR_DOC = "google/gemma2-7b"
|
| 57 |
+
_CONFIG_FOR_DOC = "Gemma2Config"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class Gemma2RMSNorm(nn.Module):
|
| 61 |
+
def __init__(self, dim: int, eps: float = 1e-6):
|
| 62 |
+
super().__init__()
|
| 63 |
+
self.eps = eps
|
| 64 |
+
self.weight = nn.Parameter(torch.zeros(dim))
|
| 65 |
+
|
| 66 |
+
def _norm(self, x):
|
| 67 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
| 68 |
+
|
| 69 |
+
def forward(self, x):
|
| 70 |
+
output = self._norm(x.float())
|
| 71 |
+
# Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16)
|
| 72 |
+
# See https://github.com/huggingface/transformers/pull/29402
|
| 73 |
+
output = output * (1.0 + self.weight.float())
|
| 74 |
+
return output.type_as(x)
|
| 75 |
+
|
| 76 |
+
def extra_repr(self):
|
| 77 |
+
return f"{tuple(self.weight.shape)}, eps={self.eps}"
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class Gemma2MLP(nn.Module):
|
| 81 |
+
def __init__(self, config):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.config = config
|
| 84 |
+
self.hidden_size = config.hidden_size
|
| 85 |
+
self.intermediate_size = config.intermediate_size
|
| 86 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 87 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 88 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 89 |
+
self.act_fn = ACT2FN[config.hidden_activation]
|
| 90 |
+
|
| 91 |
+
def forward(self, x):
|
| 92 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class Gemma2RotaryEmbedding(nn.Module):
|
| 96 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 97 |
+
super().__init__()
|
| 98 |
+
|
| 99 |
+
self.dim = dim
|
| 100 |
+
self.max_position_embeddings = max_position_embeddings
|
| 101 |
+
self.base = base
|
| 102 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim))
|
| 103 |
+
self.register_buffer("inv_freq", tensor=inv_freq, persistent=False)
|
| 104 |
+
|
| 105 |
+
@torch.no_grad()
|
| 106 |
+
def forward(self, x, position_ids, seq_len=None):
|
| 107 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 108 |
+
self.inv_freq.to(x.device)
|
| 109 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
| 110 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 111 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
| 112 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
| 113 |
+
device_type = x.device.type
|
| 114 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| 115 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 116 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 117 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 118 |
+
cos = emb.cos()
|
| 119 |
+
sin = emb.sin()
|
| 120 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def rotate_half(x):
|
| 124 |
+
"""Rotates half the hidden dims of the input."""
|
| 125 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 126 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 127 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| 131 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
q (`torch.Tensor`): The query tensor.
|
| 135 |
+
k (`torch.Tensor`): The key tensor.
|
| 136 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 137 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 138 |
+
position_ids (`torch.Tensor`, *optional*):
|
| 139 |
+
Deprecated and unused.
|
| 140 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 141 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 142 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 143 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 144 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 145 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 146 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 147 |
+
Returns:
|
| 148 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 149 |
+
"""
|
| 150 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
| 151 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
| 152 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 153 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 154 |
+
return q_embed, k_embed
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 158 |
+
"""
|
| 159 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 160 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 161 |
+
"""
|
| 162 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 163 |
+
if n_rep == 1:
|
| 164 |
+
return hidden_states
|
| 165 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 166 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def eager_attention_forward(
|
| 170 |
+
config: Gemma2Config,
|
| 171 |
+
query: torch.Tensor,
|
| 172 |
+
key: torch.Tensor,
|
| 173 |
+
value: torch.Tensor,
|
| 174 |
+
mask: Optional[torch.Tensor],
|
| 175 |
+
**_kwargs,
|
| 176 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 177 |
+
key_states = repeat_kv(key, config.num_key_value_groups)
|
| 178 |
+
value_states = repeat_kv(value, config.num_key_value_groups)
|
| 179 |
+
|
| 180 |
+
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * config.scaling
|
| 181 |
+
|
| 182 |
+
if config.attn_logit_softcapping is not None:
|
| 183 |
+
attn_weights = attn_weights / config.attn_logit_softcapping
|
| 184 |
+
attn_weights = torch.tanh(attn_weights)
|
| 185 |
+
attn_weights = attn_weights * config.attn_logit_softcapping
|
| 186 |
+
if mask is not None: # no matter the length, we just slice it
|
| 187 |
+
causal_mask = mask[:, :, :, : key_states.shape[-2]]
|
| 188 |
+
attn_weights = attn_weights + causal_mask
|
| 189 |
+
|
| 190 |
+
# upcast attention to fp32
|
| 191 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
| 192 |
+
attn_weights = nn.functional.dropout(attn_weights, p=config.attention_dropout, training=config.training)
|
| 193 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 194 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 195 |
+
return attn_output, attn_weights
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def flash_attention_forward(
|
| 199 |
+
config: Gemma2Config,
|
| 200 |
+
query: torch.Tensor,
|
| 201 |
+
key: torch.Tensor,
|
| 202 |
+
value: torch.Tensor,
|
| 203 |
+
mask: Optional[torch.Tensor],
|
| 204 |
+
target_dtype: torch.dtype = torch.float16,
|
| 205 |
+
**_kwargs,
|
| 206 |
+
) -> Tuple[torch.Tensor, None]:
|
| 207 |
+
# NOTE: None mask cause un defined https://github.com/huggingface/transformers/blob/c8c8dffbe45ebef0a8dba4a51024e5e5e498596b/src/transformers/models/gemma2/modeling_gemma2.py#L211
|
| 208 |
+
seq_len = query.shape[2]
|
| 209 |
+
if mask is not None:
|
| 210 |
+
query = query[:, :, :seq_len]
|
| 211 |
+
value = value[:, :, :seq_len]
|
| 212 |
+
|
| 213 |
+
# TODO: These transpose are quite inefficient but Flash Attention requires the layout
|
| 214 |
+
# [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor rotary embedding
|
| 215 |
+
query_states = query.transpose(1, 2)
|
| 216 |
+
key_states = key.transpose(1, 2)
|
| 217 |
+
value_states = value.transpose(1, 2)
|
| 218 |
+
|
| 219 |
+
dropout_rate = config.attention_dropout if config.training else 0.0
|
| 220 |
+
|
| 221 |
+
input_dtype = query_states.dtype
|
| 222 |
+
if input_dtype == torch.float32:
|
| 223 |
+
query_states = query_states.to(target_dtype)
|
| 224 |
+
key_states = key_states.to(target_dtype)
|
| 225 |
+
value_states = value_states.to(target_dtype)
|
| 226 |
+
|
| 227 |
+
attn_output = _flash_attention_forward(
|
| 228 |
+
query_states,
|
| 229 |
+
key_states,
|
| 230 |
+
value_states,
|
| 231 |
+
mask,
|
| 232 |
+
seq_len,
|
| 233 |
+
dropout=dropout_rate,
|
| 234 |
+
softmax_scale=config.scaling,
|
| 235 |
+
is_causal=config.is_causal,
|
| 236 |
+
sliding_window=config.sliding_window,
|
| 237 |
+
use_top_left_mask=config._flash_attn_uses_top_left_mask,
|
| 238 |
+
softcap=config.attn_logit_softcapping if is_flash_attn_greater_or_equal("2.6.0") else None,
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
return attn_output, None
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def flex_attention_forward(
|
| 245 |
+
config: Gemma2Config,
|
| 246 |
+
query: torch.Tensor,
|
| 247 |
+
key: torch.Tensor,
|
| 248 |
+
value: torch.Tensor,
|
| 249 |
+
mask: Optional[torch.Tensor],
|
| 250 |
+
output_attentions: bool = False,
|
| 251 |
+
**_kwargs,
|
| 252 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 253 |
+
def tanh_softcap(score, b, h, q_idx, kv_idx):
|
| 254 |
+
soft_cap = config.attn_logit_softcapping
|
| 255 |
+
score = soft_cap * torch.tanh(score / soft_cap)
|
| 256 |
+
if mask is not None:
|
| 257 |
+
return score + mask[b][0][q_idx][kv_idx]
|
| 258 |
+
return score
|
| 259 |
+
|
| 260 |
+
attn_output = flex_attention(
|
| 261 |
+
query,
|
| 262 |
+
key,
|
| 263 |
+
value,
|
| 264 |
+
score_mod=tanh_softcap,
|
| 265 |
+
enable_gqa=True,
|
| 266 |
+
scale=config.scaling,
|
| 267 |
+
return_lse=output_attentions,
|
| 268 |
+
)
|
| 269 |
+
if not output_attentions:
|
| 270 |
+
attn_weights = None
|
| 271 |
+
else:
|
| 272 |
+
attn_output, attn_weights = attn_output
|
| 273 |
+
|
| 274 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 275 |
+
return attn_output, attn_weights
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def sdpa_attention_forward(
|
| 279 |
+
config: Gemma2Config,
|
| 280 |
+
query: torch.Tensor,
|
| 281 |
+
key: torch.Tensor,
|
| 282 |
+
value: torch.Tensor,
|
| 283 |
+
mask: Optional[torch.Tensor],
|
| 284 |
+
**_kwargs,
|
| 285 |
+
) -> Tuple[torch.Tensor, None]:
|
| 286 |
+
key = repeat_kv(key, config.num_key_value_groups)
|
| 287 |
+
value = repeat_kv(value, config.num_key_value_groups)
|
| 288 |
+
|
| 289 |
+
causal_mask = mask
|
| 290 |
+
if mask is not None:
|
| 291 |
+
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
|
| 292 |
+
|
| 293 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 294 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 295 |
+
if query.device.type == "cuda" and causal_mask is not None:
|
| 296 |
+
query = query.contiguous()
|
| 297 |
+
key = key.contiguous()
|
| 298 |
+
value = value.contiguous()
|
| 299 |
+
|
| 300 |
+
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
| 301 |
+
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
| 302 |
+
is_causal = True if causal_mask is None and query.shape[1] > 1 else False
|
| 303 |
+
|
| 304 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 305 |
+
query,
|
| 306 |
+
key,
|
| 307 |
+
value,
|
| 308 |
+
attn_mask=causal_mask,
|
| 309 |
+
dropout_p=config.attention_dropout if config.training else 0.0,
|
| 310 |
+
is_causal=is_causal,
|
| 311 |
+
scale=config.scaling,
|
| 312 |
+
)
|
| 313 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 314 |
+
return attn_output, None
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
GEMMA2_ATTENTION_FUNCTION = {
|
| 318 |
+
"flash_attention_2": flash_attention_forward,
|
| 319 |
+
"flex_attention": flex_attention_forward,
|
| 320 |
+
"eager": eager_attention_forward,
|
| 321 |
+
"sdpa": sdpa_attention_forward,
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class Gemma2Attention(nn.Module):
|
| 326 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 327 |
+
|
| 328 |
+
def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
|
| 329 |
+
super().__init__()
|
| 330 |
+
self.config = config
|
| 331 |
+
self.layer_idx = layer_idx
|
| 332 |
+
|
| 333 |
+
self.attention_dropout = config.attention_dropout
|
| 334 |
+
self.hidden_size = config.hidden_size
|
| 335 |
+
self.num_heads = config.num_attention_heads
|
| 336 |
+
self.head_dim = config.head_dim
|
| 337 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 338 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 339 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 340 |
+
self.rope_theta = config.rope_theta
|
| 341 |
+
self.is_causal = True
|
| 342 |
+
self.scaling = config.query_pre_attn_scalar**-0.5
|
| 343 |
+
self.sliding_window = config.sliding_window if not bool(layer_idx % 2) else None
|
| 344 |
+
self.attn_logit_softcapping = config.attn_logit_softcapping
|
| 345 |
+
if self.hidden_size % self.num_heads != 0:
|
| 346 |
+
raise ValueError(
|
| 347 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 348 |
+
f" and `num_heads`: {self.num_heads})."
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
| 352 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
| 353 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
| 354 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
|
| 355 |
+
self.rotary_emb = Gemma2RotaryEmbedding(
|
| 356 |
+
self.head_dim,
|
| 357 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 358 |
+
base=self.rope_theta,
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
# NOTE: gemma2 do not include _flash_attn_uses_top_left_mask for flash attention
|
| 362 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 363 |
+
|
| 364 |
+
def forward(
|
| 365 |
+
self,
|
| 366 |
+
hidden_states: torch.Tensor,
|
| 367 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 368 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 369 |
+
past_key_value: Optional[Cache] = None,
|
| 370 |
+
output_attentions: bool = False,
|
| 371 |
+
use_cache: bool = False,
|
| 372 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 373 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 374 |
+
bsz, q_len, _ = hidden_states.size()
|
| 375 |
+
|
| 376 |
+
query_states = self.q_proj(hidden_states)
|
| 377 |
+
key_states = self.k_proj(hidden_states)
|
| 378 |
+
value_states = self.v_proj(hidden_states)
|
| 379 |
+
|
| 380 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 381 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 382 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 383 |
+
|
| 384 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
| 385 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 386 |
+
|
| 387 |
+
if past_key_value is not None:
|
| 388 |
+
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
| 389 |
+
cache_kwargs = {
|
| 390 |
+
"sin": sin,
|
| 391 |
+
"cos": cos,
|
| 392 |
+
"sliding_window": self.sliding_window,
|
| 393 |
+
"cache_position": cache_position,
|
| 394 |
+
}
|
| 395 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 396 |
+
|
| 397 |
+
if output_attentions and self.config._attn_implementation in ["sdpa", "flash_attention_2"]:
|
| 398 |
+
logger.warning_once("Setting `attention_type` to `flex_attention` because `output_attentions=True`")
|
| 399 |
+
attention_type = "flex_attention"
|
| 400 |
+
else:
|
| 401 |
+
attention_type = self.config._attn_implementation
|
| 402 |
+
|
| 403 |
+
attn_output, attn_weights = GEMMA2_ATTENTION_FUNCTION[attention_type](
|
| 404 |
+
self, query_states, key_states, value_states, attention_mask, output_attentions=output_attentions
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
|
| 408 |
+
attn_output = self.o_proj(attn_output)
|
| 409 |
+
|
| 410 |
+
if not output_attentions:
|
| 411 |
+
attn_weights = None
|
| 412 |
+
|
| 413 |
+
return attn_output, attn_weights, past_key_value
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
class Gemma2FlashAttention2(Gemma2Attention):
|
| 417 |
+
def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
|
| 418 |
+
super().__init__(config, layer_idx)
|
| 419 |
+
self.config._attn_implementation = "flash_attention_2"
|
| 420 |
+
logger.warning_once(
|
| 421 |
+
"The `Gemma2FlashAttention2` class is deprecated in favor of simply modifying the `config._attn_implementation`"
|
| 422 |
+
"attribute of the `GemmaAttention` class! It will be removed in v4.48"
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
class Gemma2SdpaAttention(Gemma2Attention):
|
| 427 |
+
def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
|
| 428 |
+
super().__init__(config, layer_idx)
|
| 429 |
+
self.config._attn_implementation = "sdpa"
|
| 430 |
+
logger.warning_once(
|
| 431 |
+
"The `Gemma2FlashAttention2` class is deprecated in favor of simply modifying the `config._attn_implementation`"
|
| 432 |
+
"attribute of the `GemmaAttention` class! It will be removed in v4.48"
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
class Gemma2DecoderLayer(nn.Module):
|
| 437 |
+
def __init__(self, config: Gemma2Config, layer_idx: int):
|
| 438 |
+
super().__init__()
|
| 439 |
+
self.hidden_size = config.hidden_size
|
| 440 |
+
self.config = config
|
| 441 |
+
self.is_sliding = not bool(layer_idx % 2)
|
| 442 |
+
self.self_attn = Gemma2Attention(config=config, layer_idx=layer_idx)
|
| 443 |
+
self.mlp = Gemma2MLP(config)
|
| 444 |
+
self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 445 |
+
self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 446 |
+
|
| 447 |
+
self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 448 |
+
self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 449 |
+
self.sliding_window = config.sliding_window
|
| 450 |
+
|
| 451 |
+
def forward(
|
| 452 |
+
self,
|
| 453 |
+
hidden_states: torch.Tensor,
|
| 454 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 455 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 456 |
+
past_key_value: Optional[Cache] = None,
|
| 457 |
+
output_attentions: Optional[bool] = False,
|
| 458 |
+
use_cache: Optional[bool] = False,
|
| 459 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 460 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 461 |
+
if self.is_sliding and attention_mask is not None: # efficient SDPA and no padding
|
| 462 |
+
# Flash-attn is a 2D tensor
|
| 463 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 464 |
+
if past_key_value is not None: # when decoding
|
| 465 |
+
attention_mask = attention_mask[:, -self.sliding_window :]
|
| 466 |
+
else:
|
| 467 |
+
min_dtype = torch.finfo(hidden_states.dtype).min
|
| 468 |
+
sliding_window_mask = torch.tril(
|
| 469 |
+
torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.sliding_window
|
| 470 |
+
)
|
| 471 |
+
attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask)
|
| 472 |
+
if attention_mask.shape[-1] <= 1: # when decoding
|
| 473 |
+
attention_mask = attention_mask[:, :, :, -self.sliding_window :]
|
| 474 |
+
|
| 475 |
+
residual = hidden_states
|
| 476 |
+
|
| 477 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 478 |
+
|
| 479 |
+
# Self Attention
|
| 480 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 481 |
+
hidden_states=hidden_states,
|
| 482 |
+
attention_mask=attention_mask,
|
| 483 |
+
position_ids=position_ids,
|
| 484 |
+
past_key_value=past_key_value,
|
| 485 |
+
output_attentions=output_attentions,
|
| 486 |
+
use_cache=use_cache,
|
| 487 |
+
cache_position=cache_position,
|
| 488 |
+
)
|
| 489 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 490 |
+
hidden_states = residual + hidden_states
|
| 491 |
+
|
| 492 |
+
residual = hidden_states
|
| 493 |
+
hidden_states = self.pre_feedforward_layernorm(hidden_states)
|
| 494 |
+
hidden_states = self.mlp(hidden_states)
|
| 495 |
+
hidden_states = self.post_feedforward_layernorm(hidden_states)
|
| 496 |
+
hidden_states = residual + hidden_states
|
| 497 |
+
|
| 498 |
+
outputs = (hidden_states,)
|
| 499 |
+
|
| 500 |
+
if output_attentions:
|
| 501 |
+
outputs += (self_attn_weights,)
|
| 502 |
+
|
| 503 |
+
if use_cache:
|
| 504 |
+
outputs += (present_key_value,)
|
| 505 |
+
|
| 506 |
+
return outputs
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
GEMMA2_START_DOCSTRING = r"""
|
| 510 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 511 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 512 |
+
etc.)
|
| 513 |
+
|
| 514 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 515 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 516 |
+
and behavior.
|
| 517 |
+
|
| 518 |
+
Parameters:
|
| 519 |
+
config ([`Gemma2Config`]):
|
| 520 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 521 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 522 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 523 |
+
"""
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
@add_start_docstrings(
|
| 527 |
+
"The bare Gemma2 Model outputting raw hidden-states without any specific head on top.",
|
| 528 |
+
GEMMA2_START_DOCSTRING,
|
| 529 |
+
)
|
| 530 |
+
class Gemma2PreTrainedModel(PreTrainedModel):
|
| 531 |
+
config_class = Gemma2Config
|
| 532 |
+
base_model_prefix = "model"
|
| 533 |
+
supports_gradient_checkpointing = True
|
| 534 |
+
_no_split_modules = ["Gemma2DecoderLayer"]
|
| 535 |
+
_skip_keys_device_placement = ["past_key_values"]
|
| 536 |
+
_supports_flash_attn_2 = True
|
| 537 |
+
_supports_sdpa = True
|
| 538 |
+
_supports_cache_class = True
|
| 539 |
+
_supports_quantized_cache = False
|
| 540 |
+
_supports_static_cache = True
|
| 541 |
+
|
| 542 |
+
def _init_weights(self, module):
|
| 543 |
+
std = self.config.initializer_range
|
| 544 |
+
if isinstance(module, nn.Linear):
|
| 545 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 546 |
+
if module.bias is not None:
|
| 547 |
+
module.bias.data.zero_()
|
| 548 |
+
elif isinstance(module, nn.Embedding):
|
| 549 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 550 |
+
if module.padding_idx is not None:
|
| 551 |
+
module.weight.data[module.padding_idx].zero_()
|
| 552 |
+
|
| 553 |
+
@classmethod
|
| 554 |
+
def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False):
|
| 555 |
+
"""
|
| 556 |
+
Overloads `PreTrainedModel._check_and_enable_sdpa` so as to DISABLE torch SDPA by default on Gemma2 models.
|
| 557 |
+
SDPA reduces the model performance on Gemma2 because of the logits softcapping.
|
| 558 |
+
"""
|
| 559 |
+
config = super()._check_and_enable_sdpa(config, hard_check_only=hard_check_only)
|
| 560 |
+
|
| 561 |
+
# if using the default path -> swap sdpa by eager
|
| 562 |
+
if not hard_check_only and config._attn_implementation == "sdpa":
|
| 563 |
+
config._attn_implementation = "eager"
|
| 564 |
+
|
| 565 |
+
return config
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
GEMMA2_INPUTS_DOCSTRING = r"""
|
| 569 |
+
Args:
|
| 570 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 571 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 572 |
+
it.
|
| 573 |
+
|
| 574 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 575 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 576 |
+
|
| 577 |
+
[What are input IDs?](../glossary#input-ids)
|
| 578 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 579 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 580 |
+
|
| 581 |
+
- 1 for tokens that are **not masked**,
|
| 582 |
+
- 0 for tokens that are **masked**.
|
| 583 |
+
|
| 584 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 585 |
+
|
| 586 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 587 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 588 |
+
|
| 589 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
| 590 |
+
`past_key_values`).
|
| 591 |
+
|
| 592 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 593 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 594 |
+
information on the default strategy.
|
| 595 |
+
|
| 596 |
+
- 1 indicates the head is **not masked**,
|
| 597 |
+
- 0 indicates the head is **masked**.
|
| 598 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 599 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 600 |
+
config.n_positions - 1]`.
|
| 601 |
+
|
| 602 |
+
[What are position IDs?](../glossary#position-ids)
|
| 603 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 604 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 605 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 606 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 607 |
+
|
| 608 |
+
Two formats are allowed:
|
| 609 |
+
- a [`~cache_utils.Cache`] instance, see our
|
| 610 |
+
[kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
|
| 611 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 612 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 613 |
+
cache format.
|
| 614 |
+
|
| 615 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 616 |
+
legacy cache format will be returned.
|
| 617 |
+
|
| 618 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 619 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 620 |
+
of shape `(batch_size, sequence_length)`.
|
| 621 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 622 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 623 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 624 |
+
model's internal embedding lookup matrix.
|
| 625 |
+
use_cache (`bool`, *optional*):
|
| 626 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 627 |
+
`past_key_values`).
|
| 628 |
+
output_attentions (`bool`, *optional*):
|
| 629 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 630 |
+
tensors for more detail.
|
| 631 |
+
output_hidden_states (`bool`, *optional*):
|
| 632 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 633 |
+
more detail.
|
| 634 |
+
return_dict (`bool`, *optional*):
|
| 635 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 636 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| 637 |
+
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
|
| 638 |
+
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
|
| 639 |
+
the complete sequence length.
|
| 640 |
+
"""
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
@add_start_docstrings(
|
| 644 |
+
"The bare Gemma2 Model outputting raw hidden-states without any specific head on top.",
|
| 645 |
+
GEMMA2_START_DOCSTRING,
|
| 646 |
+
)
|
| 647 |
+
class Gemma2Model(Gemma2PreTrainedModel):
|
| 648 |
+
"""
|
| 649 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Gemma2DecoderLayer`]
|
| 650 |
+
|
| 651 |
+
Args:
|
| 652 |
+
config: Gemma2Config
|
| 653 |
+
"""
|
| 654 |
+
|
| 655 |
+
def __init__(self, config: Gemma2Config):
|
| 656 |
+
super().__init__(config)
|
| 657 |
+
self.padding_idx = config.pad_token_id
|
| 658 |
+
self.vocab_size = config.vocab_size
|
| 659 |
+
|
| 660 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 661 |
+
self.layers = nn.ModuleList(
|
| 662 |
+
[Gemma2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 663 |
+
)
|
| 664 |
+
self.norm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 665 |
+
|
| 666 |
+
self.gradient_checkpointing = False
|
| 667 |
+
if getattr(config, "pretraining_tp", 1) != 1:
|
| 668 |
+
logger.warn("`pretraining_tp` is deprecated, please use `model.tensor_parallel` instead.")
|
| 669 |
+
|
| 670 |
+
# Initialize weights and apply final processing
|
| 671 |
+
self.post_init()
|
| 672 |
+
|
| 673 |
+
def get_input_embeddings(self):
|
| 674 |
+
return self.embed_tokens
|
| 675 |
+
|
| 676 |
+
def set_input_embeddings(self, value):
|
| 677 |
+
self.embed_tokens = value
|
| 678 |
+
|
| 679 |
+
@add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
|
| 680 |
+
def forward(
|
| 681 |
+
self,
|
| 682 |
+
input_ids: torch.LongTensor = None,
|
| 683 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 684 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 685 |
+
past_key_values: Optional[HybridCache] = None,
|
| 686 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 687 |
+
use_cache: Optional[bool] = None,
|
| 688 |
+
output_attentions: Optional[bool] = None,
|
| 689 |
+
output_hidden_states: Optional[bool] = None,
|
| 690 |
+
return_dict: Optional[bool] = None,
|
| 691 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 692 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 693 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 694 |
+
output_hidden_states = (
|
| 695 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 696 |
+
)
|
| 697 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 698 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 699 |
+
|
| 700 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 701 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 702 |
+
|
| 703 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 704 |
+
logger.warning_once(
|
| 705 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
|
| 706 |
+
)
|
| 707 |
+
use_cache = False
|
| 708 |
+
|
| 709 |
+
if inputs_embeds is None:
|
| 710 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 711 |
+
|
| 712 |
+
if use_cache and past_key_values is None and not self.training:
|
| 713 |
+
batch_size, seq_len, _ = inputs_embeds.shape
|
| 714 |
+
past_key_values = HybridCache(
|
| 715 |
+
self.config,
|
| 716 |
+
batch_size=batch_size,
|
| 717 |
+
max_cache_len=seq_len,
|
| 718 |
+
device=self.device,
|
| 719 |
+
dtype=inputs_embeds.dtype,
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
if cache_position is None:
|
| 723 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 724 |
+
cache_position = torch.arange(
|
| 725 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
if position_ids is None:
|
| 729 |
+
position_ids = cache_position.unsqueeze(0)
|
| 730 |
+
|
| 731 |
+
causal_mask = self._update_causal_mask(
|
| 732 |
+
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
# embed positions
|
| 736 |
+
hidden_states = inputs_embeds
|
| 737 |
+
|
| 738 |
+
# normalized
|
| 739 |
+
# Gemma2 downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5
|
| 740 |
+
# See https://github.com/huggingface/transformers/pull/29402
|
| 741 |
+
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
|
| 742 |
+
hidden_states = hidden_states * normalizer
|
| 743 |
+
|
| 744 |
+
# decoder layers
|
| 745 |
+
all_hidden_states = () if output_hidden_states else None
|
| 746 |
+
all_self_attns = () if output_attentions else None
|
| 747 |
+
|
| 748 |
+
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
|
| 749 |
+
if output_hidden_states:
|
| 750 |
+
all_hidden_states += (hidden_states,)
|
| 751 |
+
|
| 752 |
+
if self.gradient_checkpointing and self.training:
|
| 753 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 754 |
+
decoder_layer.__call__,
|
| 755 |
+
hidden_states,
|
| 756 |
+
causal_mask,
|
| 757 |
+
position_ids,
|
| 758 |
+
past_key_values,
|
| 759 |
+
output_attentions,
|
| 760 |
+
use_cache,
|
| 761 |
+
cache_position,
|
| 762 |
+
)
|
| 763 |
+
else:
|
| 764 |
+
layer_outputs = decoder_layer(
|
| 765 |
+
hidden_states,
|
| 766 |
+
attention_mask=causal_mask,
|
| 767 |
+
position_ids=position_ids,
|
| 768 |
+
past_key_value=past_key_values,
|
| 769 |
+
output_attentions=output_attentions,
|
| 770 |
+
use_cache=use_cache,
|
| 771 |
+
cache_position=cache_position,
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
hidden_states = layer_outputs[0]
|
| 775 |
+
|
| 776 |
+
if output_attentions:
|
| 777 |
+
all_self_attns += (layer_outputs[1],)
|
| 778 |
+
|
| 779 |
+
hidden_states = self.norm(hidden_states)
|
| 780 |
+
|
| 781 |
+
if output_hidden_states:
|
| 782 |
+
all_hidden_states += (hidden_states,)
|
| 783 |
+
|
| 784 |
+
next_cache = past_key_values if use_cache else None
|
| 785 |
+
|
| 786 |
+
if not return_dict:
|
| 787 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 788 |
+
return BaseModelOutputWithPast(
|
| 789 |
+
last_hidden_state=hidden_states,
|
| 790 |
+
past_key_values=next_cache,
|
| 791 |
+
hidden_states=all_hidden_states,
|
| 792 |
+
attentions=all_self_attns,
|
| 793 |
+
)
|
| 794 |
+
|
| 795 |
+
@torch.no_grad()
|
| 796 |
+
def _update_causal_mask(
|
| 797 |
+
self,
|
| 798 |
+
attention_mask: torch.Tensor,
|
| 799 |
+
input_tensor: torch.Tensor,
|
| 800 |
+
cache_position: torch.Tensor,
|
| 801 |
+
past_key_values: HybridCache,
|
| 802 |
+
output_attentions: bool,
|
| 803 |
+
):
|
| 804 |
+
# Flash Attention currently doesn't support static cache but Gemma2 work only with static cache.
|
| 805 |
+
# So we will pass in attention mask as is in any case, not only when ther's padding. Then we'll use its shape
|
| 806 |
+
# to cut out keys/values trailing 0 used in static cache. This workaround should be compile compatible
|
| 807 |
+
# as it doesn't cause dynamic control issues.
|
| 808 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 809 |
+
return attention_mask
|
| 810 |
+
|
| 811 |
+
dtype, device = input_tensor.dtype, input_tensor.device
|
| 812 |
+
sequence_length = input_tensor.shape[1]
|
| 813 |
+
if isinstance(past_key_values, HybridCache):
|
| 814 |
+
target_length = past_key_values.get_max_cache_shape()
|
| 815 |
+
else:
|
| 816 |
+
target_length = attention_mask.shape[-1] if attention_mask is not None else input_tensor.shape[1]
|
| 817 |
+
|
| 818 |
+
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
|
| 819 |
+
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
| 820 |
+
attention_mask,
|
| 821 |
+
sequence_length=sequence_length,
|
| 822 |
+
target_length=target_length,
|
| 823 |
+
dtype=dtype,
|
| 824 |
+
device=device,
|
| 825 |
+
cache_position=cache_position,
|
| 826 |
+
batch_size=input_tensor.shape[0],
|
| 827 |
+
)
|
| 828 |
+
return causal_mask
|
| 829 |
+
|
| 830 |
+
@staticmethod
|
| 831 |
+
def _prepare_4d_causal_attention_mask_with_cache_position(
|
| 832 |
+
attention_mask: torch.Tensor,
|
| 833 |
+
sequence_length: int,
|
| 834 |
+
target_length: int,
|
| 835 |
+
dtype: torch.dtype,
|
| 836 |
+
device: torch.device,
|
| 837 |
+
cache_position: torch.Tensor,
|
| 838 |
+
batch_size: int,
|
| 839 |
+
**kwargs,
|
| 840 |
+
):
|
| 841 |
+
"""
|
| 842 |
+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
|
| 843 |
+
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
|
| 844 |
+
|
| 845 |
+
Args:
|
| 846 |
+
attention_mask (`torch.Tensor`):
|
| 847 |
+
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
|
| 848 |
+
`(batch_size, 1, query_length, key_value_length)`.
|
| 849 |
+
sequence_length (`int`):
|
| 850 |
+
The sequence length being processed.
|
| 851 |
+
target_length (`int`):
|
| 852 |
+
The target length: when generating with static cache, the mask should be as long as the static cache,
|
| 853 |
+
to account for the 0 padding, the part of the cache that is not filled yet.
|
| 854 |
+
dtype (`torch.dtype`):
|
| 855 |
+
The dtype to use for the 4D attention mask.
|
| 856 |
+
device (`torch.device`):
|
| 857 |
+
The device to plcae the 4D attention mask on.
|
| 858 |
+
cache_position (`torch.Tensor`):
|
| 859 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
| 860 |
+
batch_size (`torch.Tensor`):
|
| 861 |
+
Batch size.
|
| 862 |
+
"""
|
| 863 |
+
if attention_mask is not None and attention_mask.dim() == 4:
|
| 864 |
+
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
|
| 865 |
+
causal_mask = attention_mask
|
| 866 |
+
else:
|
| 867 |
+
min_dtype = torch.finfo(dtype).min
|
| 868 |
+
causal_mask = torch.full(
|
| 869 |
+
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
|
| 870 |
+
)
|
| 871 |
+
if sequence_length != 1:
|
| 872 |
+
causal_mask = torch.triu(causal_mask, diagonal=1)
|
| 873 |
+
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
| 874 |
+
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
|
| 875 |
+
if attention_mask is not None:
|
| 876 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
| 877 |
+
mask_length = attention_mask.shape[-1]
|
| 878 |
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
|
| 879 |
+
padding_mask = padding_mask == 0
|
| 880 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
| 881 |
+
padding_mask, min_dtype
|
| 882 |
+
)
|
| 883 |
+
|
| 884 |
+
return causal_mask
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
class Gemma2ForCausalLM(Gemma2PreTrainedModel, GenerationMixin):
|
| 888 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 889 |
+
_tp_plan = {"lm_head": "colwise_rep"}
|
| 890 |
+
|
| 891 |
+
def __init__(self, config):
|
| 892 |
+
super().__init__(config)
|
| 893 |
+
self.model = Gemma2Model(config)
|
| 894 |
+
self.vocab_size = config.vocab_size
|
| 895 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 896 |
+
|
| 897 |
+
# Initialize weights and apply final processing
|
| 898 |
+
self.post_init()
|
| 899 |
+
|
| 900 |
+
def get_input_embeddings(self):
|
| 901 |
+
return self.model.embed_tokens
|
| 902 |
+
|
| 903 |
+
def set_input_embeddings(self, value):
|
| 904 |
+
self.model.embed_tokens = value
|
| 905 |
+
|
| 906 |
+
def get_output_embeddings(self):
|
| 907 |
+
return self.lm_head
|
| 908 |
+
|
| 909 |
+
def set_output_embeddings(self, new_embeddings):
|
| 910 |
+
self.lm_head = new_embeddings
|
| 911 |
+
|
| 912 |
+
def set_decoder(self, decoder):
|
| 913 |
+
self.model = decoder
|
| 914 |
+
|
| 915 |
+
def get_decoder(self):
|
| 916 |
+
return self.model
|
| 917 |
+
|
| 918 |
+
@add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
|
| 919 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 920 |
+
def forward(
|
| 921 |
+
self,
|
| 922 |
+
input_ids: torch.LongTensor = None,
|
| 923 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 924 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 925 |
+
past_key_values: Optional[HybridCache] = None,
|
| 926 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 927 |
+
labels: Optional[torch.LongTensor] = None,
|
| 928 |
+
use_cache: Optional[bool] = None,
|
| 929 |
+
output_attentions: Optional[bool] = None,
|
| 930 |
+
output_hidden_states: Optional[bool] = None,
|
| 931 |
+
return_dict: Optional[bool] = None,
|
| 932 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 933 |
+
num_logits_to_keep: int = 0,
|
| 934 |
+
**loss_kwargs,
|
| 935 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 936 |
+
r"""
|
| 937 |
+
Args:
|
| 938 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 939 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 940 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 941 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 942 |
+
|
| 943 |
+
num_logits_to_keep (`int`, *optional*):
|
| 944 |
+
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
|
| 945 |
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
| 946 |
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
| 947 |
+
|
| 948 |
+
Returns:
|
| 949 |
+
|
| 950 |
+
Example:
|
| 951 |
+
|
| 952 |
+
```python
|
| 953 |
+
>>> from transformers import AutoTokenizer, GemmaForCausalLM
|
| 954 |
+
|
| 955 |
+
>>> model = GemmaForCausalLM.from_pretrained("google/gemma-2-9b")
|
| 956 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
|
| 957 |
+
|
| 958 |
+
>>> prompt = "What is your favorite condiment?"
|
| 959 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 960 |
+
|
| 961 |
+
>>> # Generate
|
| 962 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 963 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 964 |
+
"What is your favorite condiment?"
|
| 965 |
+
```"""
|
| 966 |
+
|
| 967 |
+
if self.training and self.config._attn_implementation != "eager":
|
| 968 |
+
logger.warning_once(
|
| 969 |
+
"It is strongly recommended to train Gemma2 models with the `eager` attention implementation "
|
| 970 |
+
f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`."
|
| 971 |
+
)
|
| 972 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 973 |
+
output_hidden_states = (
|
| 974 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 975 |
+
)
|
| 976 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 977 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 978 |
+
outputs = self.model(
|
| 979 |
+
input_ids=input_ids,
|
| 980 |
+
attention_mask=attention_mask,
|
| 981 |
+
position_ids=position_ids,
|
| 982 |
+
past_key_values=past_key_values,
|
| 983 |
+
inputs_embeds=inputs_embeds,
|
| 984 |
+
use_cache=use_cache,
|
| 985 |
+
output_attentions=output_attentions,
|
| 986 |
+
output_hidden_states=output_hidden_states,
|
| 987 |
+
return_dict=return_dict,
|
| 988 |
+
cache_position=cache_position,
|
| 989 |
+
)
|
| 990 |
+
|
| 991 |
+
hidden_states = outputs[0]
|
| 992 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 993 |
+
logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
|
| 994 |
+
if self.config.final_logit_softcapping is not None:
|
| 995 |
+
logits = logits / self.config.final_logit_softcapping
|
| 996 |
+
logits = torch.tanh(logits)
|
| 997 |
+
logits = logits * self.config.final_logit_softcapping
|
| 998 |
+
|
| 999 |
+
loss = None
|
| 1000 |
+
if labels is not None:
|
| 1001 |
+
loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
|
| 1002 |
+
|
| 1003 |
+
if not return_dict:
|
| 1004 |
+
output = (logits,) + outputs[1:]
|
| 1005 |
+
return (loss,) + output if loss is not None else output
|
| 1006 |
+
|
| 1007 |
+
return CausalLMOutputWithPast(
|
| 1008 |
+
loss=loss,
|
| 1009 |
+
logits=logits,
|
| 1010 |
+
past_key_values=outputs.past_key_values,
|
| 1011 |
+
hidden_states=outputs.hidden_states,
|
| 1012 |
+
attentions=outputs.attentions,
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
def prepare_inputs_for_generation(
|
| 1016 |
+
self,
|
| 1017 |
+
input_ids,
|
| 1018 |
+
past_key_values=None,
|
| 1019 |
+
attention_mask=None,
|
| 1020 |
+
inputs_embeds=None,
|
| 1021 |
+
cache_position=None,
|
| 1022 |
+
position_ids=None,
|
| 1023 |
+
use_cache=True,
|
| 1024 |
+
num_logits_to_keep=None,
|
| 1025 |
+
**kwargs,
|
| 1026 |
+
):
|
| 1027 |
+
# Overwritten: has a special cache type, `HybridCache`
|
| 1028 |
+
|
| 1029 |
+
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
|
| 1030 |
+
# Exception 1: when passing input_embeds, input_ids may be missing entries
|
| 1031 |
+
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
|
| 1032 |
+
if past_key_values is not None:
|
| 1033 |
+
if inputs_embeds is not None: # Exception 1
|
| 1034 |
+
input_ids = input_ids[:, -cache_position.shape[0] :]
|
| 1035 |
+
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
|
| 1036 |
+
input_ids = input_ids[:, cache_position]
|
| 1037 |
+
if attention_mask is not None and position_ids is None:
|
| 1038 |
+
# create position_ids on the fly for batch generation
|
| 1039 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1040 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1041 |
+
if past_key_values:
|
| 1042 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1043 |
+
# This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s
|
| 1044 |
+
# `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride
|
| 1045 |
+
# during the decoding. Here, simply using `.contiguous()` is not sufficient as in the
|
| 1046 |
+
# batch size = 1 case, `position_ids` is already contiguous but with varying stride
|
| 1047 |
+
# which retriggers a capture.
|
| 1048 |
+
position_ids = position_ids.clone(memory_format=torch.contiguous_format)
|
| 1049 |
+
|
| 1050 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1051 |
+
if inputs_embeds is not None and cache_position[0] == 0:
|
| 1052 |
+
model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
|
| 1053 |
+
else:
|
| 1054 |
+
# The clone here is for the same reason as for `position_ids`.
|
| 1055 |
+
model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
|
| 1056 |
+
|
| 1057 |
+
if (
|
| 1058 |
+
isinstance(past_key_values, HybridCache)
|
| 1059 |
+
and attention_mask.ndim == 2
|
| 1060 |
+
and not self.config._attn_implementation == "flash_attention_2"
|
| 1061 |
+
):
|
| 1062 |
+
if model_inputs["inputs_embeds"] is not None:
|
| 1063 |
+
batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
|
| 1064 |
+
device = model_inputs["inputs_embeds"].device
|
| 1065 |
+
else:
|
| 1066 |
+
batch_size, sequence_length = model_inputs["input_ids"].shape
|
| 1067 |
+
device = model_inputs["input_ids"].device
|
| 1068 |
+
|
| 1069 |
+
attention_mask = self.model._prepare_4d_causal_attention_mask_with_cache_position(
|
| 1070 |
+
attention_mask,
|
| 1071 |
+
sequence_length=sequence_length,
|
| 1072 |
+
target_length=past_key_values.get_max_cache_shape(),
|
| 1073 |
+
dtype=self.lm_head.weight.dtype,
|
| 1074 |
+
device=device,
|
| 1075 |
+
cache_position=cache_position,
|
| 1076 |
+
batch_size=batch_size,
|
| 1077 |
+
)
|
| 1078 |
+
|
| 1079 |
+
if num_logits_to_keep is not None:
|
| 1080 |
+
model_inputs["num_logits_to_keep"] = num_logits_to_keep
|
| 1081 |
+
|
| 1082 |
+
model_inputs.update(
|
| 1083 |
+
{
|
| 1084 |
+
"position_ids": position_ids,
|
| 1085 |
+
"cache_position": cache_position,
|
| 1086 |
+
"past_key_values": past_key_values,
|
| 1087 |
+
"use_cache": use_cache,
|
| 1088 |
+
"attention_mask": attention_mask,
|
| 1089 |
+
}
|
| 1090 |
+
)
|
| 1091 |
+
return model_inputs
|
| 1092 |
+
|
| 1093 |
+
|
| 1094 |
+
@add_start_docstrings(
|
| 1095 |
+
"""
|
| 1096 |
+
The Gemma2 Model transformer with a sequence classification head on top (linear layer).
|
| 1097 |
+
|
| 1098 |
+
[`Gemma2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1099 |
+
(e.g. GPT-2) do.
|
| 1100 |
+
|
| 1101 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1102 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1103 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1104 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1105 |
+
each row of the batch).
|
| 1106 |
+
""",
|
| 1107 |
+
GEMMA2_START_DOCSTRING,
|
| 1108 |
+
)
|
| 1109 |
+
class Gemma2ForSequenceClassification(Gemma2PreTrainedModel):
|
| 1110 |
+
def __init__(self, config):
|
| 1111 |
+
super().__init__(config)
|
| 1112 |
+
self.num_labels = config.num_labels
|
| 1113 |
+
self.model = Gemma2Model(config)
|
| 1114 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1115 |
+
|
| 1116 |
+
# Initialize weights and apply final processing
|
| 1117 |
+
self.post_init()
|
| 1118 |
+
|
| 1119 |
+
def get_input_embeddings(self):
|
| 1120 |
+
return self.model.embed_tokens
|
| 1121 |
+
|
| 1122 |
+
def set_input_embeddings(self, value):
|
| 1123 |
+
self.model.embed_tokens = value
|
| 1124 |
+
|
| 1125 |
+
@add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
|
| 1126 |
+
def forward(
|
| 1127 |
+
self,
|
| 1128 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1129 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1130 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1131 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 1132 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1133 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1134 |
+
use_cache: Optional[bool] = None,
|
| 1135 |
+
output_attentions: Optional[bool] = None,
|
| 1136 |
+
output_hidden_states: Optional[bool] = None,
|
| 1137 |
+
return_dict: Optional[bool] = None,
|
| 1138 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1139 |
+
r"""
|
| 1140 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1141 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1142 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1143 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1144 |
+
"""
|
| 1145 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1146 |
+
|
| 1147 |
+
transformer_outputs = self.model(
|
| 1148 |
+
input_ids,
|
| 1149 |
+
attention_mask=attention_mask,
|
| 1150 |
+
position_ids=position_ids,
|
| 1151 |
+
past_key_values=past_key_values,
|
| 1152 |
+
inputs_embeds=inputs_embeds,
|
| 1153 |
+
use_cache=use_cache,
|
| 1154 |
+
output_attentions=output_attentions,
|
| 1155 |
+
output_hidden_states=output_hidden_states,
|
| 1156 |
+
return_dict=return_dict,
|
| 1157 |
+
)
|
| 1158 |
+
hidden_states = transformer_outputs[0]
|
| 1159 |
+
logits = self.score(hidden_states)
|
| 1160 |
+
|
| 1161 |
+
if input_ids is not None:
|
| 1162 |
+
batch_size = input_ids.shape[0]
|
| 1163 |
+
else:
|
| 1164 |
+
batch_size = inputs_embeds.shape[0]
|
| 1165 |
+
|
| 1166 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1167 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1168 |
+
if self.config.pad_token_id is None:
|
| 1169 |
+
sequence_lengths = -1
|
| 1170 |
+
else:
|
| 1171 |
+
if input_ids is not None:
|
| 1172 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 1173 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 1174 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 1175 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 1176 |
+
else:
|
| 1177 |
+
sequence_lengths = -1
|
| 1178 |
+
|
| 1179 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1180 |
+
|
| 1181 |
+
loss = None
|
| 1182 |
+
if labels is not None:
|
| 1183 |
+
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
|
| 1184 |
+
|
| 1185 |
+
if not return_dict:
|
| 1186 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1187 |
+
return ((loss,) + output) if loss is not None else output
|
| 1188 |
+
|
| 1189 |
+
return SequenceClassifierOutputWithPast(
|
| 1190 |
+
loss=loss,
|
| 1191 |
+
logits=pooled_logits,
|
| 1192 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1193 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1194 |
+
attentions=transformer_outputs.attentions,
|
| 1195 |
+
)
|
| 1196 |
+
|
| 1197 |
+
|
| 1198 |
+
@add_start_docstrings(
|
| 1199 |
+
"""
|
| 1200 |
+
The Gemma2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
|
| 1201 |
+
output) e.g. for Named-Entity-Recognition (NER) tasks.
|
| 1202 |
+
""",
|
| 1203 |
+
GEMMA2_START_DOCSTRING,
|
| 1204 |
+
)
|
| 1205 |
+
class Gemma2ForTokenClassification(Gemma2PreTrainedModel):
|
| 1206 |
+
def __init__(self, config):
|
| 1207 |
+
super().__init__(config)
|
| 1208 |
+
self.num_labels = config.num_labels
|
| 1209 |
+
self.model = Gemma2Model(config)
|
| 1210 |
+
if getattr(config, "classifier_dropout", None) is not None:
|
| 1211 |
+
classifier_dropout = config.classifier_dropout
|
| 1212 |
+
elif getattr(config, "hidden_dropout", None) is not None:
|
| 1213 |
+
classifier_dropout = config.hidden_dropout
|
| 1214 |
+
else:
|
| 1215 |
+
classifier_dropout = 0.1
|
| 1216 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 1217 |
+
self.score = nn.Linear(config.hidden_size, config.num_labels)
|
| 1218 |
+
|
| 1219 |
+
# Initialize weights and apply final processing
|
| 1220 |
+
self.post_init()
|
| 1221 |
+
|
| 1222 |
+
def get_input_embeddings(self):
|
| 1223 |
+
return self.model.embed_tokens
|
| 1224 |
+
|
| 1225 |
+
def set_input_embeddings(self, value):
|
| 1226 |
+
self.model.embed_tokens = value
|
| 1227 |
+
|
| 1228 |
+
@add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
|
| 1229 |
+
@add_code_sample_docstrings(
|
| 1230 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1231 |
+
output_type=TokenClassifierOutput,
|
| 1232 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1233 |
+
)
|
| 1234 |
+
def forward(
|
| 1235 |
+
self,
|
| 1236 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1237 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1238 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1239 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1240 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1241 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1242 |
+
use_cache: Optional[bool] = None,
|
| 1243 |
+
output_attentions: Optional[bool] = None,
|
| 1244 |
+
output_hidden_states: Optional[bool] = None,
|
| 1245 |
+
return_dict: Optional[bool] = None,
|
| 1246 |
+
) -> Union[Tuple, TokenClassifierOutput]:
|
| 1247 |
+
r"""
|
| 1248 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1249 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1250 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1251 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1252 |
+
"""
|
| 1253 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1254 |
+
|
| 1255 |
+
outputs = self.model(
|
| 1256 |
+
input_ids,
|
| 1257 |
+
attention_mask=attention_mask,
|
| 1258 |
+
position_ids=position_ids,
|
| 1259 |
+
past_key_values=past_key_values,
|
| 1260 |
+
inputs_embeds=inputs_embeds,
|
| 1261 |
+
use_cache=use_cache,
|
| 1262 |
+
output_attentions=output_attentions,
|
| 1263 |
+
output_hidden_states=output_hidden_states,
|
| 1264 |
+
return_dict=return_dict,
|
| 1265 |
+
)
|
| 1266 |
+
sequence_output = outputs[0]
|
| 1267 |
+
sequence_output = self.dropout(sequence_output)
|
| 1268 |
+
logits = self.score(sequence_output)
|
| 1269 |
+
|
| 1270 |
+
loss = None
|
| 1271 |
+
if labels is not None:
|
| 1272 |
+
loss = self.loss_function(logits, labels, self.config)
|
| 1273 |
+
|
| 1274 |
+
if not return_dict:
|
| 1275 |
+
output = (logits,) + outputs[2:]
|
| 1276 |
+
return ((loss,) + output) if loss is not None else output
|
| 1277 |
+
|
| 1278 |
+
return TokenClassifierOutput(
|
| 1279 |
+
loss=loss,
|
| 1280 |
+
logits=logits,
|
| 1281 |
+
hidden_states=outputs.hidden_states,
|
| 1282 |
+
attentions=outputs.attentions,
|
| 1283 |
+
)
|
modeling_spatialvla.py
ADDED
|
@@ -0,0 +1,599 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
from dataclasses import dataclass
|
| 16 |
+
from typing import List, Optional, Tuple, Union, Dict
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import torch
|
| 20 |
+
import torch.utils.checkpoint
|
| 21 |
+
from torch import nn
|
| 22 |
+
from torch.linalg import inv
|
| 23 |
+
import torchvision.transforms.functional as TF
|
| 24 |
+
import torch.nn.functional as F
|
| 25 |
+
from transformers.cache_utils import Cache, HybridCache, StaticCache
|
| 26 |
+
from transformers.generation import GenerationMixin
|
| 27 |
+
from transformers.modeling_utils import PreTrainedModel, PretrainedConfig
|
| 28 |
+
from transformers.utils import (
|
| 29 |
+
ModelOutput,
|
| 30 |
+
logging,
|
| 31 |
+
)
|
| 32 |
+
from .configuration_spatialvla import SpatialVLAConfig
|
| 33 |
+
from .modeling_gemma2 import Gemma2ForCausalLM
|
| 34 |
+
from transformers import AutoModel, ZoeDepthForDepthEstimation
|
| 35 |
+
|
| 36 |
+
SIGLIP_MEAN, SIGLIP_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
|
| 37 |
+
ZOE_MEAN, ZOE_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__)
|
| 40 |
+
|
| 41 |
+
from transformers import StoppingCriteria, StoppingCriteriaList
|
| 42 |
+
|
| 43 |
+
class StopOnReasoningTag(StoppingCriteria):
|
| 44 |
+
def __init__(self, tokenizer, tag="<Reasoning>"):
|
| 45 |
+
self.tag_token_ids = tokenizer.tokenizer.encode(tag, add_special_tokens=False)[:-1]
|
| 46 |
+
# self.tag_token_ids = tmp.input_ids[:-1]
|
| 47 |
+
self.tag_length = len(self.tag_token_ids)
|
| 48 |
+
|
| 49 |
+
def __call__(self, input_ids, scores, **kwargs):
|
| 50 |
+
# Get the last tokens of the generated sequence
|
| 51 |
+
generated_tokens = input_ids[0].tolist()
|
| 52 |
+
# print("Hehe:", type(generated_tokens))
|
| 53 |
+
# print("lmao:", generated_tokens.shape)
|
| 54 |
+
# print("generated tokens",generated_tokens)
|
| 55 |
+
# print("tag length",self.tag_length)
|
| 56 |
+
# print("tag token",self.tag_token_ids)
|
| 57 |
+
|
| 58 |
+
# if len(generated_tokens) < tag_length:
|
| 59 |
+
# return False
|
| 60 |
+
return generated_tokens[-self.tag_length:] == self.tag_token_ids
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class Ego3DPositionEmbeddingMLP(nn.Module):
|
| 65 |
+
"""Absolute pos embedding, learned.
|
| 66 |
+
https://github.com/kwea123/nerf_pl/blob/52aeb387da64a9ad9a0f914ea9b049ffc598b20c/models/nerf.py#L4
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def __init__(self, in_channels=3, num_pos_feats=768, n_freqs=8, logscale=True):
|
| 70 |
+
super(Ego3DPositionEmbeddingMLP, self).__init__()
|
| 71 |
+
self.n_freqs = n_freqs
|
| 72 |
+
self.freq_out_channels = in_channels * (2 * n_freqs + 1)
|
| 73 |
+
if logscale:
|
| 74 |
+
freq_bands = 2 ** torch.linspace(0, n_freqs - 1, n_freqs)
|
| 75 |
+
else:
|
| 76 |
+
freq_bands = torch.linspace(1, 2 ** (n_freqs - 1), n_freqs)
|
| 77 |
+
|
| 78 |
+
center = torch.tensor([0., 0., 2.]).repeat(in_channels // 3)
|
| 79 |
+
self.register_buffer("freq_bands", freq_bands, persistent=False)
|
| 80 |
+
self.register_buffer("center", center, persistent=False)
|
| 81 |
+
|
| 82 |
+
self.position_embedding_head = nn.Sequential(
|
| 83 |
+
nn.Linear(self.freq_out_channels, num_pos_feats),
|
| 84 |
+
nn.LayerNorm(num_pos_feats),
|
| 85 |
+
nn.ReLU(),
|
| 86 |
+
nn.Linear(num_pos_feats, num_pos_feats),
|
| 87 |
+
)
|
| 88 |
+
self._reset_parameters()
|
| 89 |
+
|
| 90 |
+
def _reset_parameters(self):
|
| 91 |
+
"""init with small weights to maintain stable training."""
|
| 92 |
+
for p in self.parameters():
|
| 93 |
+
if p.dim() > 1:
|
| 94 |
+
nn.init.xavier_uniform_(p, gain=0.01)
|
| 95 |
+
|
| 96 |
+
@torch.no_grad()
|
| 97 |
+
def frequency_encoding(self, xyz):
|
| 98 |
+
"""
|
| 99 |
+
Embeds x to (x, sin(2^k x), cos(2^k x), ...)
|
| 100 |
+
Different from the paper, "x" is also in the output
|
| 101 |
+
See https://github.com/bmild/nerf/issues/12
|
| 102 |
+
x \in [-2, 2]
|
| 103 |
+
y \in [-2, 2]
|
| 104 |
+
z \in [0., 4]
|
| 105 |
+
Inputs:
|
| 106 |
+
x: (b n m)
|
| 107 |
+
Outputs:
|
| 108 |
+
out: (b n o)
|
| 109 |
+
"""
|
| 110 |
+
xyz_n = ((xyz - self.center) / 2.0).to(self.freq_bands.dtype)
|
| 111 |
+
xyz_feq = xyz_n.unsqueeze(-1) * self.freq_bands # (b n m 1)
|
| 112 |
+
sin_xyz, cos_xyz = torch.sin(xyz_feq), torch.cos(xyz_feq) # (b n m nf)
|
| 113 |
+
encoding = torch.cat([xyz_n.unsqueeze(-1), sin_xyz, cos_xyz], -1).reshape(*xyz.shape[:2], -1)
|
| 114 |
+
return encoding
|
| 115 |
+
|
| 116 |
+
def forward(self, xyz):
|
| 117 |
+
"""Forward pass, xyz is (B, N, 3or6), output (B, N, F)."""
|
| 118 |
+
freq_encoding = self.frequency_encoding(xyz)
|
| 119 |
+
position_embedding = self.position_embedding_head(freq_encoding)
|
| 120 |
+
return position_embedding
|
| 121 |
+
|
| 122 |
+
def process_zoe(pixel_values, pad_mode="reflect", output_size=(384, 512)):
|
| 123 |
+
"""https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/models/zoedepth/image_processing_zoedepth.py"""
|
| 124 |
+
# h, w = images.shape[-2:]
|
| 125 |
+
# pad
|
| 126 |
+
ph, pw = 31, 31 # int((h / 2)**0.5 * 3), int((w / 2)**0.5 * 3) # 32, 31
|
| 127 |
+
images = F.pad(pixel_values, (pw, pw, ph, ph), mode=pad_mode)
|
| 128 |
+
# resize
|
| 129 |
+
size = (384, 384) # get_resize_output_image_size
|
| 130 |
+
images = F.interpolate(images, size=size, mode="bicubic", align_corners=True)
|
| 131 |
+
# zoe: padding -> resize -> nomalize. we follow `nomalize -> padding -> resize` from siglip
|
| 132 |
+
images = TF.normalize(images, mean=ZOE_MEAN, std=ZOE_STD)
|
| 133 |
+
return images, ph, pw
|
| 134 |
+
|
| 135 |
+
@dataclass
|
| 136 |
+
class SpatialVLACausalLMOutputWithPast(ModelOutput):
|
| 137 |
+
loss: Optional[torch.FloatTensor] = None
|
| 138 |
+
logits: torch.FloatTensor = None
|
| 139 |
+
past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None
|
| 140 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 141 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
| 142 |
+
image_hidden_states: Optional[torch.FloatTensor] = None
|
| 143 |
+
|
| 144 |
+
class SpatialVLAMultiModalProjector(nn.Module):
|
| 145 |
+
def __init__(self, config: SpatialVLAConfig):
|
| 146 |
+
super().__init__()
|
| 147 |
+
self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
|
| 148 |
+
|
| 149 |
+
def forward(self, image_features):
|
| 150 |
+
hidden_states = self.linear(image_features)
|
| 151 |
+
return hidden_states
|
| 152 |
+
|
| 153 |
+
class SpatialVLAPreTrainedModel(PreTrainedModel):
|
| 154 |
+
config_class = SpatialVLAConfig
|
| 155 |
+
base_model_prefix = "model"
|
| 156 |
+
supports_gradient_checkpointing = True
|
| 157 |
+
_no_split_modules = ["SpatialVLAMultiModalProjector", "ZoeDepthForDepthEstimation", "Ego3DPositionEmbeddingMLP"]
|
| 158 |
+
_skip_keys_device_placement = "past_key_values"
|
| 159 |
+
_supports_cache_class = True
|
| 160 |
+
_supports_quantized_cache = True
|
| 161 |
+
_supports_static_cache = True
|
| 162 |
+
_supports_cache_class = True
|
| 163 |
+
_supports_flash_attn_2 = True
|
| 164 |
+
_supports_sdpa = True
|
| 165 |
+
|
| 166 |
+
def _init_weights(self, module):
|
| 167 |
+
std = (
|
| 168 |
+
self.config.initializer_range
|
| 169 |
+
if hasattr(self.config, "initializer_range")
|
| 170 |
+
else self.config.text_config.initializer_range
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
if hasattr(module, "class_embedding"):
|
| 174 |
+
module.class_embedding.data.normal_(mean=0.0, std=std)
|
| 175 |
+
|
| 176 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
| 177 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 178 |
+
if module.bias is not None:
|
| 179 |
+
module.bias.data.zero_()
|
| 180 |
+
elif isinstance(module, nn.Embedding):
|
| 181 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 182 |
+
if module.padding_idx is not None:
|
| 183 |
+
module.weight.data[module.padding_idx].zero_()
|
| 184 |
+
|
| 185 |
+
class SpatialVLAForConditionalGeneration(SpatialVLAPreTrainedModel, GenerationMixin):
|
| 186 |
+
def __init__(self, config: SpatialVLAConfig, vision_model=None, vision_zoe_model=None, projector_model=None, language_model=None):
|
| 187 |
+
super().__init__(config)
|
| 188 |
+
|
| 189 |
+
self.vision_tower = vision_model or AutoModel.from_config(config=config.vision_config)
|
| 190 |
+
self.multi_modal_projector = projector_model or SpatialVLAMultiModalProjector(config)
|
| 191 |
+
self.vocab_size = config.text_config.vocab_size
|
| 192 |
+
if language_model is None:
|
| 193 |
+
language_model = Gemma2ForCausalLM(config=config.text_config)
|
| 194 |
+
if language_model._tied_weights_keys is not None:
|
| 195 |
+
self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
|
| 196 |
+
self.language_model = language_model
|
| 197 |
+
|
| 198 |
+
if config.use_vision_zoe:
|
| 199 |
+
self.vision_zoe_model = vision_zoe_model or ZoeDepthForDepthEstimation(config.vision_zoe_config)
|
| 200 |
+
self.position_embedding_3d = Ego3DPositionEmbeddingMLP(
|
| 201 |
+
config.ego3d_patch_reso**2 * 3, num_pos_feats=config.vision_config.hidden_size, n_freqs=config.n_freqs
|
| 202 |
+
)
|
| 203 |
+
# register buffer
|
| 204 |
+
patch_size, reso, image_size = config.vision_config.patch_size, config.ego3d_patch_reso, config.vision_config.image_size
|
| 205 |
+
y, x = torch.meshgrid(torch.arange(0, image_size, patch_size // reso), torch.arange(0, image_size, patch_size // reso), indexing="ij") # (h//sp w//sp)
|
| 206 |
+
y, x = y + patch_size / reso / 2, x + patch_size / reso / 2
|
| 207 |
+
uv_h = torch.stack([x, y, torch.ones_like(x)], dim=0).reshape(3, -1) # (3 hw)
|
| 208 |
+
self.register_buffer("uv_h", uv_h, persistent=False)
|
| 209 |
+
|
| 210 |
+
# shared spatial embeddings for <ACTION> <IMG>
|
| 211 |
+
if config.use_spatial_token:
|
| 212 |
+
self.spatial_embed_tokens = nn.Embedding(self.config.spatial_token_num, config.text_config.hidden_size)
|
| 213 |
+
else:
|
| 214 |
+
self.spatial_embed_tokens = None
|
| 215 |
+
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
|
| 216 |
+
self.processor = None
|
| 217 |
+
|
| 218 |
+
def criteria_value(self):
|
| 219 |
+
self.criteria = StoppingCriteriaList([StopOnReasoningTag(self.processor, "<Reasoning>")])
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def backproject_patch(self, K: torch.Tensor, depth: torch.Tensor, patch_size=14, reso=2) -> torch.Tensor:
|
| 223 |
+
"""
|
| 224 |
+
Backproject depth map to 3D points in camera coordinate.
|
| 225 |
+
Args:
|
| 226 |
+
K: camera intrinsic matrix (b 3 3)
|
| 227 |
+
depth: depth map (b 1 h w)
|
| 228 |
+
patch_size: patch size for siglip
|
| 229 |
+
reso: reso^2 -> sample points in each patch
|
| 230 |
+
patch sz = 14 ......
|
| 231 |
+
┌────────┬────────┐
|
| 232 |
+
│ ─ ─ │ ─ ─ │
|
| 233 |
+
│ points │ ├─ ─ ─
|
| 234 |
+
│ ─ ─ │ ─ ─ │
|
| 235 |
+
├────────┼────────┤
|
| 236 |
+
│ ─ ─ │ ─ ─ │
|
| 237 |
+
│ │ │
|
| 238 |
+
│ ─ ─ │ ─ ─ │
|
| 239 |
+
└────────┴────────┘
|
| 240 |
+
reso=2───►points=4
|
| 241 |
+
│
|
| 242 |
+
│
|
| 243 |
+
"""
|
| 244 |
+
b, c, h, w = depth.shape
|
| 245 |
+
hp, wp = h // patch_size, w // patch_size
|
| 246 |
+
sub_hp = sub_wp = reso
|
| 247 |
+
patch_depth = F.interpolate(depth, size=(hp * reso, wp * reso), mode="area").reshape(b, c, -1)
|
| 248 |
+
p_cam = (inv(K.float()) @ self.uv_h.float()) * patch_depth # (b 3 3) @ (3 hw) -> (b 3 hw) * (b 1 hw) -> (b 3 hw)
|
| 249 |
+
patch_p_cam = p_cam.reshape(b, 3, hp, sub_hp, wp, sub_wp).permute(0, 2, 4, 3, 5, 1).reshape(b, hp * wp, -1)
|
| 250 |
+
return patch_p_cam
|
| 251 |
+
|
| 252 |
+
def get_input_embeddings(self):
|
| 253 |
+
return self.language_model.get_input_embeddings()
|
| 254 |
+
|
| 255 |
+
def set_input_embeddings(self, value):
|
| 256 |
+
self.language_model.set_input_embeddings(value)
|
| 257 |
+
|
| 258 |
+
def get_output_embeddings(self):
|
| 259 |
+
return self.language_model.get_output_embeddings()
|
| 260 |
+
|
| 261 |
+
def set_output_embeddings(self, new_embeddings):
|
| 262 |
+
self.language_model.set_output_embeddings(new_embeddings)
|
| 263 |
+
|
| 264 |
+
def set_decoder(self, decoder):
|
| 265 |
+
self.language_model.set_decoder(decoder)
|
| 266 |
+
|
| 267 |
+
def get_decoder(self):
|
| 268 |
+
return self.language_model.get_decoder()
|
| 269 |
+
|
| 270 |
+
def tie_weights(self):
|
| 271 |
+
return self.language_model.tie_weights()
|
| 272 |
+
|
| 273 |
+
def resize_token_embeddings(
|
| 274 |
+
self,
|
| 275 |
+
new_num_tokens: Optional[int] = None,
|
| 276 |
+
pad_to_multiple_of: Optional[int] = None,
|
| 277 |
+
mean_resizing: bool = True,
|
| 278 |
+
) -> nn.Embedding:
|
| 279 |
+
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
|
| 280 |
+
vocab_size = model_embeds.weight.shape[0]
|
| 281 |
+
self.config.text_config.vocab_size = self.vocab_size = self.config._vocab_size = vocab_size
|
| 282 |
+
self.tie_weights()
|
| 283 |
+
return model_embeds
|
| 284 |
+
|
| 285 |
+
def _update_causal_mask(
|
| 286 |
+
self,
|
| 287 |
+
attention_mask,
|
| 288 |
+
token_type_ids,
|
| 289 |
+
past_key_values,
|
| 290 |
+
cache_position,
|
| 291 |
+
input_ids=None,
|
| 292 |
+
inputs_embeds=None,
|
| 293 |
+
is_training: bool = False,
|
| 294 |
+
):
|
| 295 |
+
if self.config.text_config._attn_implementation == "flash_attention_2":
|
| 296 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
| 297 |
+
return attention_mask
|
| 298 |
+
return None
|
| 299 |
+
|
| 300 |
+
using_static_cache = isinstance(past_key_values, StaticCache)
|
| 301 |
+
min_dtype = torch.finfo(self.dtype).min
|
| 302 |
+
inputs_lead_dim = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
|
| 303 |
+
sequence_length = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
|
| 304 |
+
if using_static_cache:
|
| 305 |
+
target_length = past_key_values.get_max_cache_shape()
|
| 306 |
+
elif isinstance(past_key_values, HybridCache):
|
| 307 |
+
target_length = past_key_values.get_max_cache_shape()
|
| 308 |
+
else:
|
| 309 |
+
target_length = (
|
| 310 |
+
attention_mask.shape[-1]
|
| 311 |
+
if isinstance(attention_mask, torch.Tensor)
|
| 312 |
+
else cache_position[0] + sequence_length + 1
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
if attention_mask is not None and attention_mask.dim() == 4:
|
| 316 |
+
return attention_mask
|
| 317 |
+
|
| 318 |
+
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device)
|
| 319 |
+
if sequence_length != 1:
|
| 320 |
+
if is_training: causal_mask = torch.triu(causal_mask, diagonal=1)
|
| 321 |
+
else: causal_mask[:, :sequence_length] = 0.0
|
| 322 |
+
|
| 323 |
+
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
|
| 324 |
+
causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
|
| 325 |
+
if attention_mask is not None:
|
| 326 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
| 327 |
+
mask_length = attention_mask.shape[-1]
|
| 328 |
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
|
| 329 |
+
padding_mask = padding_mask == 0
|
| 330 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
|
| 331 |
+
if is_training:
|
| 332 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0)
|
| 333 |
+
return causal_mask
|
| 334 |
+
|
| 335 |
+
def get_image_features(self, pixel_values: torch.FloatTensor, intrinsic: torch.FloatTensor):
|
| 336 |
+
siglip_pixel_values = TF.normalize(pixel_values, mean=SIGLIP_MEAN, std=SIGLIP_STD)
|
| 337 |
+
image_outputs = self.vision_tower(siglip_pixel_values)
|
| 338 |
+
|
| 339 |
+
# ego3d position encoding
|
| 340 |
+
if self.config.use_vision_zoe:
|
| 341 |
+
zoe_pixel_values, ph, pw = process_zoe(pixel_values, pad_mode="reflect")
|
| 342 |
+
with torch.no_grad():
|
| 343 |
+
pvh, pvw = pixel_values.shape[-2:]
|
| 344 |
+
depth = self.vision_zoe_model(pixel_values=zoe_pixel_values).predicted_depth
|
| 345 |
+
depth = F.interpolate(
|
| 346 |
+
depth.unsqueeze(1),
|
| 347 |
+
size=(pvh+2*ph, pvw+2*pw),
|
| 348 |
+
mode="bicubic",
|
| 349 |
+
align_corners=True,
|
| 350 |
+
)[..., ph:-ph, pw:-pw]
|
| 351 |
+
xyz = self.backproject_patch(
|
| 352 |
+
intrinsic, depth, patch_size=self.config.vision_config.patch_size, reso=self.config.ego3d_patch_reso
|
| 353 |
+
) # (b, n, 3*4)
|
| 354 |
+
pos_embed_3d = self.position_embedding_3d(xyz)
|
| 355 |
+
selected_image_feature = image_outputs.last_hidden_state + pos_embed_3d
|
| 356 |
+
else:
|
| 357 |
+
selected_image_feature = image_outputs.last_hidden_state
|
| 358 |
+
image_features = self.multi_modal_projector(selected_image_feature)
|
| 359 |
+
image_features = image_features / (self.config.text_config.hidden_size**0.5)
|
| 360 |
+
return image_features
|
| 361 |
+
|
| 362 |
+
def forward(
|
| 363 |
+
self,
|
| 364 |
+
input_ids: torch.LongTensor = None,
|
| 365 |
+
pixel_values: torch.FloatTensor = None,
|
| 366 |
+
actions: Optional[torch.FloatTensor] = None,
|
| 367 |
+
intrinsic: Optional[torch.Tensor] = None,
|
| 368 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 369 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 370 |
+
past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None,
|
| 371 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
| 372 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 373 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 374 |
+
labels: Optional[torch.LongTensor] = None,
|
| 375 |
+
use_cache: Optional[bool] = None,
|
| 376 |
+
output_attentions: Optional[bool] = None,
|
| 377 |
+
output_hidden_states: Optional[bool] = None,
|
| 378 |
+
return_dict: Optional[bool] = None,
|
| 379 |
+
num_logits_to_keep: int = 0,
|
| 380 |
+
) -> Union[Tuple, SpatialVLACausalLMOutputWithPast]:
|
| 381 |
+
|
| 382 |
+
output_attentions = output_attentions or self.config.output_attentions
|
| 383 |
+
output_hidden_states = output_hidden_states or self.config.output_hidden_states
|
| 384 |
+
return_dict = return_dict or self.config.use_return_dict
|
| 385 |
+
|
| 386 |
+
is_training = token_type_ids is not None and labels is not None
|
| 387 |
+
|
| 388 |
+
if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids).clone() # avoid checkpint grad True
|
| 389 |
+
|
| 390 |
+
if self.config.use_spatial_token:
|
| 391 |
+
spatial_selected = (input_ids >= self.config.action_token_begin_idx) & (input_ids < self.config.action_token_begin_idx + self.config.spatial_token_num)
|
| 392 |
+
inputs_embeds[spatial_selected] = inputs_embeds[spatial_selected] * 0.0 + self.spatial_embed_tokens(input_ids[spatial_selected] - self.config.action_token_begin_idx)
|
| 393 |
+
|
| 394 |
+
if cache_position is None:
|
| 395 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 396 |
+
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
|
| 397 |
+
|
| 398 |
+
if position_ids is None:
|
| 399 |
+
position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed
|
| 400 |
+
|
| 401 |
+
# merge
|
| 402 |
+
if pixel_values is not None:
|
| 403 |
+
image_features = self.get_image_features(pixel_values, intrinsic)
|
| 404 |
+
special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
|
| 405 |
+
special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
|
| 406 |
+
if inputs_embeds[special_image_mask].numel() != image_features.numel():
|
| 407 |
+
image_tokens_in_text = torch.sum(input_ids == self.config.image_token_index)
|
| 408 |
+
raise ValueError(
|
| 409 |
+
f"Number of images does not match number of special image tokens in the input text. "
|
| 410 |
+
f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} "
|
| 411 |
+
"tokens from image embeddings."
|
| 412 |
+
)
|
| 413 |
+
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
|
| 414 |
+
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
|
| 415 |
+
|
| 416 |
+
# mask out pad-token-ids in labels for BC
|
| 417 |
+
if labels is not None and self.pad_token_id in labels:
|
| 418 |
+
logger.warning_once(
|
| 419 |
+
"`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. ",
|
| 420 |
+
"You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.",
|
| 421 |
+
)
|
| 422 |
+
labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels)
|
| 423 |
+
|
| 424 |
+
causal_mask = self._update_causal_mask(
|
| 425 |
+
attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
|
| 426 |
+
)
|
| 427 |
+
outputs = self.language_model(
|
| 428 |
+
attention_mask=causal_mask,
|
| 429 |
+
position_ids=position_ids,
|
| 430 |
+
past_key_values=past_key_values,
|
| 431 |
+
inputs_embeds=inputs_embeds,
|
| 432 |
+
use_cache=use_cache,
|
| 433 |
+
output_attentions=output_attentions,
|
| 434 |
+
output_hidden_states=output_hidden_states,
|
| 435 |
+
return_dict=return_dict,
|
| 436 |
+
cache_position=cache_position,
|
| 437 |
+
num_logits_to_keep=num_logits_to_keep,
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
logits = outputs.logits
|
| 441 |
+
# print("logits", logits.shape)
|
| 442 |
+
loss = None
|
| 443 |
+
if labels is not None:
|
| 444 |
+
logits = logits.float()
|
| 445 |
+
shift_logits = logits[..., :-1, :]
|
| 446 |
+
shift_labels = labels[..., 1:]
|
| 447 |
+
|
| 448 |
+
mask = (shift_labels >= self.action_tokenizer.translation_tokenizer.token_start_idx) & (
|
| 449 |
+
shift_labels <= self.action_tokenizer.gripper_tokenizer.token_end_idx
|
| 450 |
+
)
|
| 451 |
+
if attention_mask is not None:
|
| 452 |
+
shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
|
| 453 |
+
shift_logits_action = shift_logits[(shift_attention_mask & mask).to(logits.device) != 0].contiguous()
|
| 454 |
+
shift_labels_action = shift_labels[(shift_attention_mask & mask).to(shift_labels.device) != 0].contiguous()
|
| 455 |
+
shift_logits_reason = shift_logits[(shift_attention_mask & ~mask).to(logits.device) != 0].contiguous()
|
| 456 |
+
shift_labels_reason = shift_labels[(shift_attention_mask & ~mask).to(shift_labels.device) != 0].contiguous()
|
| 457 |
+
else:
|
| 458 |
+
shift_logits_action = shift_logits[mask].contiguous()
|
| 459 |
+
shift_logits_reason = shift_logits[~mask].contiguous()
|
| 460 |
+
|
| 461 |
+
shift_labels_action = shift_labels[mask].contiguous()
|
| 462 |
+
shift_labels_reason = shift_labels[~mask].contiguous()
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 466 |
+
|
| 467 |
+
flat_logits_action = shift_logits_action.view(-1, self.config.text_config.vocab_size)
|
| 468 |
+
flat_labels_action = shift_labels_action.view(-1).to(shift_logits.device)
|
| 469 |
+
loss_action = loss_fct(flat_logits_action, flat_labels_action)
|
| 470 |
+
|
| 471 |
+
flat_logits_reason = shift_logits_reason.view(-1, self.config.text_config.vocab_size)
|
| 472 |
+
flat_labels_reason = shift_labels_reason.view(-1).to(shift_logits.device)
|
| 473 |
+
l1_loss_reasoning = loss_fct(flat_logits_reason, flat_labels_reason)
|
| 474 |
+
|
| 475 |
+
loss = 0.7*loss_action + 0.3*l1_loss_reasoning
|
| 476 |
+
if not return_dict:
|
| 477 |
+
output = (logits,) + outputs[1:]
|
| 478 |
+
return (loss,) + output if loss is not None else output
|
| 479 |
+
|
| 480 |
+
return SpatialVLACausalLMOutputWithPast(
|
| 481 |
+
loss=loss,
|
| 482 |
+
logits=logits,
|
| 483 |
+
past_key_values=outputs.past_key_values,
|
| 484 |
+
hidden_states=outputs.hidden_states,
|
| 485 |
+
attentions=outputs.attentions,
|
| 486 |
+
image_hidden_states=image_features if pixel_values is not None else None,
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
# AR inference
|
| 490 |
+
def prepare_inputs_for_generation(
|
| 491 |
+
self,
|
| 492 |
+
input_ids,
|
| 493 |
+
past_key_values=None,
|
| 494 |
+
inputs_embeds=None,
|
| 495 |
+
cache_position=None,
|
| 496 |
+
position_ids=None,
|
| 497 |
+
pixel_values=None,
|
| 498 |
+
intrinsic=None,
|
| 499 |
+
attention_mask=None,
|
| 500 |
+
token_type_ids=None,
|
| 501 |
+
use_cache=True,
|
| 502 |
+
num_logits_to_keep=None,
|
| 503 |
+
labels=None,
|
| 504 |
+
**kwargs,
|
| 505 |
+
):
|
| 506 |
+
model_inputs = self.language_model.prepare_inputs_for_generation(
|
| 507 |
+
input_ids,
|
| 508 |
+
past_key_values=past_key_values,
|
| 509 |
+
inputs_embeds=inputs_embeds,
|
| 510 |
+
attention_mask=attention_mask,
|
| 511 |
+
position_ids=position_ids,
|
| 512 |
+
cache_position=cache_position,
|
| 513 |
+
use_cache=use_cache,
|
| 514 |
+
num_logits_to_keep=num_logits_to_keep,
|
| 515 |
+
token_type_ids=token_type_ids,
|
| 516 |
+
**kwargs,
|
| 517 |
+
)
|
| 518 |
+
if model_inputs.get("position_ids") is not None:
|
| 519 |
+
model_inputs["position_ids"] += 1
|
| 520 |
+
if cache_position[0] == 0:
|
| 521 |
+
model_inputs["pixel_values"] = pixel_values
|
| 522 |
+
is_training = token_type_ids is not None and labels is not None
|
| 523 |
+
if cache_position[0] == 0 and isinstance(past_key_values, HybridCache):
|
| 524 |
+
causal_mask = self._update_causal_mask(attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training)
|
| 525 |
+
model_inputs["attention_mask"] = causal_mask
|
| 526 |
+
model_inputs["intrinsic"] = intrinsic
|
| 527 |
+
return model_inputs
|
| 528 |
+
|
| 529 |
+
# @torch.no_grad()
|
| 530 |
+
@torch.inference_mode()
|
| 531 |
+
def predict_action(
|
| 532 |
+
self,
|
| 533 |
+
model_inputs,
|
| 534 |
+
) -> torch.Tensor:
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
model_inputs = model_inputs.to(torch.bfloat16).to(self.device)
|
| 538 |
+
input_len = model_inputs["input_ids"].shape[-1]
|
| 539 |
+
generation_outputs = self.generate(**model_inputs, max_new_tokens=256, stopping_criteria=self.criteria,do_sample=False)
|
| 540 |
+
return generation_outputs[:,input_len:]
|
| 541 |
+
|
| 542 |
+
@torch.no_grad()
|
| 543 |
+
def predict_action_with_attentions(
|
| 544 |
+
self,
|
| 545 |
+
model_inputs: Dict[str, torch.Tensor],
|
| 546 |
+
return_attentions: bool = True,
|
| 547 |
+
) -> None:
|
| 548 |
+
model_inputs = model_inputs.to(self.device, torch.bfloat16)
|
| 549 |
+
input_len = model_inputs["input_ids"].shape[-1]
|
| 550 |
+
|
| 551 |
+
model_outputs = self.generate(
|
| 552 |
+
**model_inputs,
|
| 553 |
+
max_new_tokens=256,
|
| 554 |
+
do_sample=False,
|
| 555 |
+
# return_attentions=return_attentions,
|
| 556 |
+
output_attentions=return_attentions,
|
| 557 |
+
return_dict_in_generate=True,
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
generated_ids = model_outputs["sequences"][:, input_len:]
|
| 561 |
+
attentions = model_outputs["attentions"]
|
| 562 |
+
|
| 563 |
+
return generated_ids, attentions
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
@classmethod
|
| 568 |
+
def from_pretrained(
|
| 569 |
+
cls,
|
| 570 |
+
pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
|
| 571 |
+
*model_args,
|
| 572 |
+
config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
|
| 573 |
+
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
| 574 |
+
ignore_mismatched_sizes: bool = False,
|
| 575 |
+
force_download: bool = False,
|
| 576 |
+
local_files_only: bool = False,
|
| 577 |
+
token: Optional[Union[str, bool]] = None,
|
| 578 |
+
revision: str = "main",
|
| 579 |
+
use_safetensors: Optional[bool] = None,
|
| 580 |
+
weights_only: bool = True,
|
| 581 |
+
**kwargs,
|
| 582 |
+
):
|
| 583 |
+
model = super().from_pretrained(
|
| 584 |
+
pretrained_model_name_or_path,
|
| 585 |
+
*model_args,
|
| 586 |
+
config=config,
|
| 587 |
+
cache_dir=cache_dir,
|
| 588 |
+
ignore_mismatched_sizes=ignore_mismatched_sizes,
|
| 589 |
+
force_download=force_download,
|
| 590 |
+
local_files_only=local_files_only,
|
| 591 |
+
token=token,
|
| 592 |
+
revision=revision,
|
| 593 |
+
use_safetensors=use_safetensors,
|
| 594 |
+
weights_only=weights_only,
|
| 595 |
+
**kwargs,
|
| 596 |
+
)
|
| 597 |
+
if model.config.use_spatial_token:
|
| 598 |
+
model.language_model.model.embed_tokens.weight.data[-model.config.spatial_token_num:] = model.spatial_embed_tokens.weight.data
|
| 599 |
+
return model
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_map": {
|
| 3 |
+
"AutoProcessor": "processing_spatialvla.SpatialVLAProcessor"
|
| 4 |
+
},
|
| 5 |
+
"do_convert_rgb": null,
|
| 6 |
+
"do_normalize": false,
|
| 7 |
+
"do_rescale": true,
|
| 8 |
+
"do_resize": true,
|
| 9 |
+
"image_mean": [
|
| 10 |
+
0.5,
|
| 11 |
+
0.5,
|
| 12 |
+
0.5
|
| 13 |
+
],
|
| 14 |
+
"image_processor_type": "SiglipImageProcessor",
|
| 15 |
+
"image_seq_length": 256,
|
| 16 |
+
"image_std": [
|
| 17 |
+
0.5,
|
| 18 |
+
0.5,
|
| 19 |
+
0.5
|
| 20 |
+
],
|
| 21 |
+
"processor_class": "SpatialVLAProcessor",
|
| 22 |
+
"resample": 3,
|
| 23 |
+
"rescale_factor": 0.00392156862745098,
|
| 24 |
+
"size": {
|
| 25 |
+
"height": 224,
|
| 26 |
+
"width": 224
|
| 27 |
+
}
|
| 28 |
+
}
|
processing_spatialvla.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import logging
|
| 16 |
+
from typing import List, Optional, Union, Dict
|
| 17 |
+
import numpy as np
|
| 18 |
+
import torch
|
| 19 |
+
from transformers.feature_extraction_utils import BatchFeature
|
| 20 |
+
from transformers.image_utils import ImageInput, is_valid_image
|
| 21 |
+
from transformers.processing_utils import Unpack, _validate_images_text_input_order, ProcessorMixin
|
| 22 |
+
from transformers.tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput
|
| 23 |
+
from transformers.utils import logging
|
| 24 |
+
from transformers.models.paligemma.processing_paligemma import (
|
| 25 |
+
make_batched_images,
|
| 26 |
+
build_string_from_input,
|
| 27 |
+
_is_str_or_image,
|
| 28 |
+
PaliGemmaProcessorKwargs,
|
| 29 |
+
IMAGE_TOKEN,
|
| 30 |
+
EXTRA_TOKENS
|
| 31 |
+
)
|
| 32 |
+
from .action_tokenizer import SpatialActionTokenizer
|
| 33 |
+
logger = logging.get_logger(__name__)
|
| 34 |
+
|
| 35 |
+
class SpatialVLAProcessor(ProcessorMixin):
|
| 36 |
+
attributes = ["image_processor", "tokenizer"]
|
| 37 |
+
valid_kwargs = ["chat_template"]
|
| 38 |
+
image_processor_class = "SiglipImageProcessor"
|
| 39 |
+
tokenizer_class = ("GemmaTokenizer", "GemmaTokenizerFast")
|
| 40 |
+
|
| 41 |
+
def __init__(
|
| 42 |
+
self,
|
| 43 |
+
image_processor=None,
|
| 44 |
+
tokenizer=None,
|
| 45 |
+
chat_template=None,
|
| 46 |
+
statistics: Optional[dict] = None,
|
| 47 |
+
bin_policy=None,
|
| 48 |
+
intrinsic_config=None,
|
| 49 |
+
action_config=None,
|
| 50 |
+
num_obs_steps=1,
|
| 51 |
+
obs_delta=1,
|
| 52 |
+
action_chunk_size=1,
|
| 53 |
+
min_sigma=0.0,
|
| 54 |
+
**kwargs,
|
| 55 |
+
):
|
| 56 |
+
if image_processor is None:
|
| 57 |
+
raise ValueError("You need to specify an `image_processor`.")
|
| 58 |
+
if tokenizer is None:
|
| 59 |
+
raise ValueError("You need to specify a `tokenizer`.")
|
| 60 |
+
if not hasattr(image_processor, "image_seq_length"):
|
| 61 |
+
raise ValueError("Image processor is missing an `image_seq_length` attribute.")
|
| 62 |
+
|
| 63 |
+
self.image_seq_length = image_processor.image_seq_length
|
| 64 |
+
|
| 65 |
+
if not hasattr(tokenizer, "image_token"):
|
| 66 |
+
image_token = AddedToken(IMAGE_TOKEN, normalized=False, special=True)
|
| 67 |
+
tokens_to_add = {"additional_special_tokens": [image_token]}
|
| 68 |
+
tokenizer.add_special_tokens(tokens_to_add)
|
| 69 |
+
self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
|
| 70 |
+
else:
|
| 71 |
+
self.image_token_id = tokenizer.image_token_id
|
| 72 |
+
|
| 73 |
+
tokenizer.add_tokens(EXTRA_TOKENS)
|
| 74 |
+
tokenizer.add_bos_token = False
|
| 75 |
+
tokenizer.add_eos_token = False
|
| 76 |
+
|
| 77 |
+
super().__init__(image_processor, tokenizer, chat_template=chat_template)
|
| 78 |
+
|
| 79 |
+
# action tokenizer
|
| 80 |
+
self.statistics = statistics if statistics else {}
|
| 81 |
+
self.bin_policy = bin_policy
|
| 82 |
+
self.min_sigma = min_sigma
|
| 83 |
+
self.intrinsic_config = intrinsic_config
|
| 84 |
+
self.action_config = action_config
|
| 85 |
+
self.num_obs_steps = num_obs_steps
|
| 86 |
+
self.obs_delta = obs_delta
|
| 87 |
+
self.action_chunk_size = action_chunk_size
|
| 88 |
+
self.dataset_intrinsics = {}
|
| 89 |
+
height, width = image_processor.size["height"], image_processor.size["width"]
|
| 90 |
+
|
| 91 |
+
# scale intrinsic matrix
|
| 92 |
+
for k, v in intrinsic_config.items():
|
| 93 |
+
K = torch.tensor(v["intrinsic"]).float()
|
| 94 |
+
K[:2] *= torch.tensor([width / v["width"], height / v["height"]])[:, None]
|
| 95 |
+
self.dataset_intrinsics[k] = K
|
| 96 |
+
|
| 97 |
+
self.action_tokenizer = SpatialActionTokenizer(
|
| 98 |
+
tokenizer=tokenizer, num_bins=action_config["num_bins"],
|
| 99 |
+
bin_policy=bin_policy, use_spherical=action_config["use_spherical"],
|
| 100 |
+
min_sigma=min_sigma,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
def __call__(
|
| 104 |
+
self,
|
| 105 |
+
reasoning: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
| 106 |
+
images: ImageInput = None,
|
| 107 |
+
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
| 108 |
+
unnorm_key: Optional[str] = None,
|
| 109 |
+
suffix_actions: Optional[np.array] = None, # (t e)
|
| 110 |
+
**kwargs: Unpack[PaliGemmaProcessorKwargs],
|
| 111 |
+
) -> BatchFeature:
|
| 112 |
+
images, text = _validate_images_text_input_order(images, text)
|
| 113 |
+
|
| 114 |
+
output_kwargs = self._merge_kwargs(
|
| 115 |
+
PaliGemmaProcessorKwargs,
|
| 116 |
+
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
|
| 117 |
+
**kwargs,
|
| 118 |
+
)
|
| 119 |
+
if suffix_actions is not None:
|
| 120 |
+
action_tokens = self.action_tokenizer(suffix_actions) # (n,3)
|
| 121 |
+
suffix="".join(action_tokens.flatten())
|
| 122 |
+
|
| 123 |
+
suffix = f"{suffix}<Reasoning>: {reasoning}"
|
| 124 |
+
|
| 125 |
+
else:
|
| 126 |
+
suffix = output_kwargs["text_kwargs"].pop("suffix", None)
|
| 127 |
+
|
| 128 |
+
return_token_type_ids = True if suffix is not None else False
|
| 129 |
+
|
| 130 |
+
if images is None:
|
| 131 |
+
raise ValueError("`images` are expected as arguments to a `PaliGemmaProcessor` instance.")
|
| 132 |
+
if text is None:
|
| 133 |
+
logger.warning_once( "You are using PaliGemma without a text prefix. It will perform as a picture-captioning model.")
|
| 134 |
+
text = ""
|
| 135 |
+
|
| 136 |
+
if _is_str_or_image(text):
|
| 137 |
+
text = [text]
|
| 138 |
+
elif isinstance(text, list) and _is_str_or_image(text[0]):
|
| 139 |
+
pass
|
| 140 |
+
|
| 141 |
+
if text is not None and images is not None:
|
| 142 |
+
if not any(IMAGE_TOKEN in sample for sample in text):
|
| 143 |
+
if isinstance(text, List) and isinstance(images, List):
|
| 144 |
+
if len(images) != len(text):
|
| 145 |
+
raise ValueError(
|
| 146 |
+
f"Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image or list of images."
|
| 147 |
+
)
|
| 148 |
+
if is_valid_image(images):
|
| 149 |
+
images = [[images]]
|
| 150 |
+
elif isinstance(images, list) and is_valid_image(images[0]):
|
| 151 |
+
images = [[image] for image in images]
|
| 152 |
+
elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):
|
| 153 |
+
raise ValueError("images must be an image, list of images or list of list of images")
|
| 154 |
+
if suffix is not None and _is_str_or_image(suffix): suffix = [suffix]
|
| 155 |
+
if suffix is not None: suffix = [sfx + self.tokenizer.eos_token for sfx in suffix]
|
| 156 |
+
print(f"suffix: {suffix}")
|
| 157 |
+
input_strings = [
|
| 158 |
+
build_string_from_input(
|
| 159 |
+
prompt=prompt,
|
| 160 |
+
bos_token=self.tokenizer.bos_token,
|
| 161 |
+
image_seq_len=self.image_seq_length,
|
| 162 |
+
image_token=IMAGE_TOKEN,
|
| 163 |
+
num_images=len(image_list) if isinstance(image_list, list) else 1,
|
| 164 |
+
)
|
| 165 |
+
for prompt, image_list in zip(text, images)
|
| 166 |
+
]
|
| 167 |
+
images = make_batched_images(images)
|
| 168 |
+
else:
|
| 169 |
+
expanded_samples = []
|
| 170 |
+
for sample in text:
|
| 171 |
+
expanded_sample = sample.replace(IMAGE_TOKEN, IMAGE_TOKEN * self.image_seq_length)
|
| 172 |
+
bos_rfind_index = expanded_sample.rfind(IMAGE_TOKEN)
|
| 173 |
+
bos_index = bos_rfind_index + len(IMAGE_TOKEN) if bos_rfind_index != -1 else 0
|
| 174 |
+
expanded_sample = (
|
| 175 |
+
expanded_sample[:bos_index] + self.tokenizer.bos_token + expanded_sample[bos_index:]
|
| 176 |
+
)
|
| 177 |
+
expanded_samples.append(expanded_sample)
|
| 178 |
+
input_strings = [f"{sample}\n" for sample in expanded_samples]
|
| 179 |
+
pixel_values = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"]
|
| 180 |
+
|
| 181 |
+
if output_kwargs["text_kwargs"].get("max_length", None) is not None:
|
| 182 |
+
output_kwargs["text_kwargs"]["max_length"] += self.image_seq_length
|
| 183 |
+
|
| 184 |
+
inputs = self.tokenizer(
|
| 185 |
+
input_strings,
|
| 186 |
+
text_pair=suffix,
|
| 187 |
+
return_token_type_ids=return_token_type_ids,
|
| 188 |
+
**output_kwargs["text_kwargs"],
|
| 189 |
+
)
|
| 190 |
+
# print(suffix)
|
| 191 |
+
intrinsic = self.dataset_intrinsics[unnorm_key] if unnorm_key in self.dataset_intrinsics else self.dataset_intrinsics["default"]
|
| 192 |
+
return_data = {**inputs, "pixel_values": pixel_values, "intrinsic": intrinsic}
|
| 193 |
+
|
| 194 |
+
if return_token_type_ids:
|
| 195 |
+
labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100)
|
| 196 |
+
return_data.update({"labels": labels})
|
| 197 |
+
return BatchFeature(data=return_data)
|
| 198 |
+
|
| 199 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Gemma
|
| 200 |
+
def batch_decode(self, *args, **kwargs):
|
| 201 |
+
"""
|
| 202 |
+
This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
| 203 |
+
refer to the docstring of this method for more information.
|
| 204 |
+
"""
|
| 205 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
| 206 |
+
|
| 207 |
+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Gemma
|
| 208 |
+
def decode(self, *args, **kwargs):
|
| 209 |
+
"""
|
| 210 |
+
This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
| 211 |
+
the docstring of this method for more information.
|
| 212 |
+
"""
|
| 213 |
+
return self.tokenizer.decode(*args, **kwargs)
|
| 214 |
+
|
| 215 |
+
@property
|
| 216 |
+
def model_input_names(self):
|
| 217 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
| 218 |
+
image_processor_input_names = self.image_processor.model_input_names
|
| 219 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
| 220 |
+
|
| 221 |
+
def decode_actions(
|
| 222 |
+
self,
|
| 223 |
+
generation_outputs: torch.Tensor,
|
| 224 |
+
unnorm_key: Optional[str] = None,
|
| 225 |
+
) -> Dict[str, torch.Tensor]:
|
| 226 |
+
action_token_num = 3 # translation + rotation + gripper
|
| 227 |
+
predicted_action_token_ids = generation_outputs[0, : action_token_num * self.action_chunk_size].detach().cpu().long().numpy()
|
| 228 |
+
assert self.tokenizer.eos_token != predicted_action_token_ids[-1], "[error] actions contain EOS token, please check you truncation settings!"
|
| 229 |
+
|
| 230 |
+
if predicted_action_token_ids.shape[0] < action_token_num * self.action_chunk_size: # pad with zeros
|
| 231 |
+
logger.warning(f"Padding zero action!")
|
| 232 |
+
predicted_action_token_ids = np.concatenate(
|
| 233 |
+
[
|
| 234 |
+
predicted_action_token_ids,
|
| 235 |
+
np.zeros(action_token_num * self.action_chunk_size - predicted_action_token_ids.shape[0], dtype=np.longlong),
|
| 236 |
+
]
|
| 237 |
+
)
|
| 238 |
+
predicted_action_token_ids = predicted_action_token_ids.reshape(-1, action_token_num)
|
| 239 |
+
normalized_action_chunks = self.action_tokenizer.decode_token_ids_to_actions(predicted_action_token_ids)
|
| 240 |
+
|
| 241 |
+
if unnorm_key is None:
|
| 242 |
+
logger.warning(f"unnorm_key {unnorm_key} is not in statistics, use next one")
|
| 243 |
+
unnorm_key = next(self.statistics.keys())
|
| 244 |
+
action_norm_stats = self.statistics[unnorm_key]["action"]
|
| 245 |
+
|
| 246 |
+
action_dim = len(action_norm_stats["q01"])
|
| 247 |
+
mask = np.array(action_norm_stats.get("mask", np.ones(action_dim)), dtype=bool)
|
| 248 |
+
action_high, action_low = np.array(action_norm_stats["q99"]), np.array(action_norm_stats["q01"])
|
| 249 |
+
|
| 250 |
+
actions = []
|
| 251 |
+
for normalized_actions in normalized_action_chunks:
|
| 252 |
+
action = np.where(
|
| 253 |
+
mask,
|
| 254 |
+
0.5 * (normalized_actions + 1) * (action_high - action_low) + action_low,
|
| 255 |
+
normalized_actions,
|
| 256 |
+
)
|
| 257 |
+
actions.append(action)
|
| 258 |
+
actions = np.stack(actions)
|
| 259 |
+
return {"actions": actions, "action_ids": predicted_action_token_ids}
|
processor_config.json
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"action_chunk_size": 4,
|
| 3 |
+
"action_config": {
|
| 4 |
+
"distribution": "gaussian",
|
| 5 |
+
"num_bins": {
|
| 6 |
+
"gripper": 2,
|
| 7 |
+
"rotation": {
|
| 8 |
+
"pitch_bins": 16,
|
| 9 |
+
"roll_bins": 16,
|
| 10 |
+
"yaw_bins": 16
|
| 11 |
+
},
|
| 12 |
+
"total": 8194,
|
| 13 |
+
"translation": {
|
| 14 |
+
"phi_bins": 32,
|
| 15 |
+
"r_bins": 8,
|
| 16 |
+
"theta_bins": 16
|
| 17 |
+
}
|
| 18 |
+
},
|
| 19 |
+
"use_spherical": true
|
| 20 |
+
},
|
| 21 |
+
"auto_map": {
|
| 22 |
+
"AutoProcessor": "processing_spatialvla.SpatialVLAProcessor"
|
| 23 |
+
},
|
| 24 |
+
"bin_policy": {
|
| 25 |
+
"rotation": {
|
| 26 |
+
"pitch_bins": [
|
| 27 |
+
-1.0,
|
| 28 |
+
-0.4236293919771139,
|
| 29 |
+
-0.2973624970533583,
|
| 30 |
+
-0.21059576820767317,
|
| 31 |
+
-0.14044938844843713,
|
| 32 |
+
-0.0791789125851777,
|
| 33 |
+
-0.023048480293744636,
|
| 34 |
+
0.030167161843358437,
|
| 35 |
+
0.08204200739679071,
|
| 36 |
+
0.13389374587953162,
|
| 37 |
+
0.18703587338481154,
|
| 38 |
+
0.24302765601977616,
|
| 39 |
+
0.30406026229156,
|
| 40 |
+
0.37378821800324374,
|
| 41 |
+
0.45971873753598247,
|
| 42 |
+
0.5836276162507279,
|
| 43 |
+
0.9999999999999991
|
| 44 |
+
],
|
| 45 |
+
"roll_bins": [
|
| 46 |
+
-0.9999999999999999,
|
| 47 |
+
-0.48696292418679255,
|
| 48 |
+
-0.3676073739484146,
|
| 49 |
+
-0.28549591499691584,
|
| 50 |
+
-0.21907612836502022,
|
| 51 |
+
-0.16103745543314568,
|
| 52 |
+
-0.10784881328909159,
|
| 53 |
+
-0.05740408497876547,
|
| 54 |
+
-0.00821079709993185,
|
| 55 |
+
0.040983744804115825,
|
| 56 |
+
0.0914324636886914,
|
| 57 |
+
0.144628635967148,
|
| 58 |
+
0.20268023967111456,
|
| 59 |
+
0.269122809861373,
|
| 60 |
+
0.35127995163586373,
|
| 61 |
+
0.4707654855904555,
|
| 62 |
+
0.9999999999999944
|
| 63 |
+
],
|
| 64 |
+
"yaw_bins": [
|
| 65 |
+
-1.0,
|
| 66 |
+
-0.4473279373756505,
|
| 67 |
+
-0.3332741619243962,
|
| 68 |
+
-0.25494122059754437,
|
| 69 |
+
-0.19161826850058544,
|
| 70 |
+
-0.1363039890445066,
|
| 71 |
+
-0.08562203792073503,
|
| 72 |
+
-0.03756062019257189,
|
| 73 |
+
0.009304860859811767,
|
| 74 |
+
0.05616950282205181,
|
| 75 |
+
0.1042282501882964,
|
| 76 |
+
0.15490516155832307,
|
| 77 |
+
0.21021078414249433,
|
| 78 |
+
0.2735184749468475,
|
| 79 |
+
0.35182078330381356,
|
| 80 |
+
0.465787139096136,
|
| 81 |
+
0.9999999999999982
|
| 82 |
+
]
|
| 83 |
+
},
|
| 84 |
+
"translation": {
|
| 85 |
+
"phi_bins": [
|
| 86 |
+
-3.141592653589793,
|
| 87 |
+
-2.611427824867527,
|
| 88 |
+
-2.250204012654159,
|
| 89 |
+
-1.9664312602343461,
|
| 90 |
+
-1.727567317192397,
|
| 91 |
+
-1.5180333466123621,
|
| 92 |
+
-1.3290717520482633,
|
| 93 |
+
-1.1552219136523942,
|
| 94 |
+
-0.9928174267972283,
|
| 95 |
+
-0.8392525074770641,
|
| 96 |
+
-0.6925871222960145,
|
| 97 |
+
-0.5513178350935227,
|
| 98 |
+
-0.41423640072445,
|
| 99 |
+
-0.28033770999881874,
|
| 100 |
+
-0.14875675757685075,
|
| 101 |
+
-0.018723165750234833,
|
| 102 |
+
0.11047361805186211,
|
| 103 |
+
0.2395128839618976,
|
| 104 |
+
0.3690681218889241,
|
| 105 |
+
0.49983192073784344,
|
| 106 |
+
0.6325427359682341,
|
| 107 |
+
0.7680163128439619,
|
| 108 |
+
0.9071854848022353,
|
| 109 |
+
1.0511538919389105,
|
| 110 |
+
1.2012725735857557,
|
| 111 |
+
1.359254858953288,
|
| 112 |
+
1.52735781547609,
|
| 113 |
+
1.708685638209645,
|
| 114 |
+
1.9077325684228925,
|
| 115 |
+
2.1314415012063312,
|
| 116 |
+
2.3915198815314898,
|
| 117 |
+
2.710422326959981,
|
| 118 |
+
3.141592653589793
|
| 119 |
+
],
|
| 120 |
+
"r_bins": [
|
| 121 |
+
0.0,
|
| 122 |
+
0.24715317617636928,
|
| 123 |
+
0.3738653185927623,
|
| 124 |
+
0.4741546344271254,
|
| 125 |
+
0.5660713758244397,
|
| 126 |
+
0.6591763123588074,
|
| 127 |
+
0.7640208367398835,
|
| 128 |
+
0.905077308623254,
|
| 129 |
+
1.7320508075688772
|
| 130 |
+
],
|
| 131 |
+
"theta_bins": [
|
| 132 |
+
0.0,
|
| 133 |
+
0.9482227818534477,
|
| 134 |
+
1.232949635587941,
|
| 135 |
+
1.4288683204982662,
|
| 136 |
+
1.586471048273713,
|
| 137 |
+
1.7230822806307542,
|
| 138 |
+
1.8470152323808435,
|
| 139 |
+
1.9631023836372554,
|
| 140 |
+
2.0745890527961355,
|
| 141 |
+
2.1839605665055863,
|
| 142 |
+
2.2933911513280534,
|
| 143 |
+
2.405063409356251,
|
| 144 |
+
2.521491080766048,
|
| 145 |
+
2.6459805006534918,
|
| 146 |
+
2.7834919014248793,
|
| 147 |
+
2.942634872432456,
|
| 148 |
+
3.141592653589793
|
| 149 |
+
]
|
| 150 |
+
}
|
| 151 |
+
},
|
| 152 |
+
"intrinsic_config": {
|
| 153 |
+
"bridge_orig/1.0.0": {
|
| 154 |
+
"height": 480,
|
| 155 |
+
"intrinsic": [
|
| 156 |
+
[
|
| 157 |
+
623.588,
|
| 158 |
+
0,
|
| 159 |
+
319.501
|
| 160 |
+
],
|
| 161 |
+
[
|
| 162 |
+
0,
|
| 163 |
+
623.588,
|
| 164 |
+
239.545
|
| 165 |
+
],
|
| 166 |
+
[
|
| 167 |
+
0,
|
| 168 |
+
0,
|
| 169 |
+
1
|
| 170 |
+
]
|
| 171 |
+
],
|
| 172 |
+
"width": 640
|
| 173 |
+
},
|
| 174 |
+
"default": {
|
| 175 |
+
"height": 480,
|
| 176 |
+
"intrinsic": [
|
| 177 |
+
[
|
| 178 |
+
623.588,
|
| 179 |
+
0,
|
| 180 |
+
319.501
|
| 181 |
+
],
|
| 182 |
+
[
|
| 183 |
+
0,
|
| 184 |
+
623.588,
|
| 185 |
+
239.545
|
| 186 |
+
],
|
| 187 |
+
[
|
| 188 |
+
0,
|
| 189 |
+
0,
|
| 190 |
+
1
|
| 191 |
+
]
|
| 192 |
+
],
|
| 193 |
+
"width": 640
|
| 194 |
+
}
|
| 195 |
+
},
|
| 196 |
+
"min_sigma": 0.0,
|
| 197 |
+
"num_obs_steps": 1,
|
| 198 |
+
"obs_delta": 1,
|
| 199 |
+
"processor_class": "SpatialVLAProcessor",
|
| 200 |
+
"statistics": {
|
| 201 |
+
"bridge_orig/1.0.0": {
|
| 202 |
+
"action": {
|
| 203 |
+
"mask": [
|
| 204 |
+
true,
|
| 205 |
+
true,
|
| 206 |
+
true,
|
| 207 |
+
true,
|
| 208 |
+
true,
|
| 209 |
+
true,
|
| 210 |
+
false
|
| 211 |
+
],
|
| 212 |
+
"max": [
|
| 213 |
+
0.055814191699028015,
|
| 214 |
+
0.09974314272403717,
|
| 215 |
+
0.07338187843561172,
|
| 216 |
+
0.41116073727607727,
|
| 217 |
+
0.3018309473991394,
|
| 218 |
+
6.236903190612793,
|
| 219 |
+
1.0
|
| 220 |
+
],
|
| 221 |
+
"mean": [
|
| 222 |
+
0.00020478814258240163,
|
| 223 |
+
0.00012579727626871318,
|
| 224 |
+
-0.00013988478167448193,
|
| 225 |
+
-0.00017113501962739974,
|
| 226 |
+
-0.0003538677701726556,
|
| 227 |
+
0.00019135206821374595,
|
| 228 |
+
0.5760049223899841
|
| 229 |
+
],
|
| 230 |
+
"min": [
|
| 231 |
+
-0.0696982890367508,
|
| 232 |
+
-0.0885118767619133,
|
| 233 |
+
-0.06311047077178955,
|
| 234 |
+
-0.3737139105796814,
|
| 235 |
+
-0.3136279881000519,
|
| 236 |
+
-6.244088649749756,
|
| 237 |
+
0.0
|
| 238 |
+
],
|
| 239 |
+
"q01": [
|
| 240 |
+
-0.02925512194633484,
|
| 241 |
+
-0.04143750108778477,
|
| 242 |
+
-0.025954971089959145,
|
| 243 |
+
-0.08004292100667953,
|
| 244 |
+
-0.09390476904809475,
|
| 245 |
+
-0.204636562615633,
|
| 246 |
+
0.0
|
| 247 |
+
],
|
| 248 |
+
"q99": [
|
| 249 |
+
0.028467297554016113,
|
| 250 |
+
0.04052329249680042,
|
| 251 |
+
0.040265134535729885,
|
| 252 |
+
0.0807134248316288,
|
| 253 |
+
0.08023637719452381,
|
| 254 |
+
0.2036343589425087,
|
| 255 |
+
1.0
|
| 256 |
+
],
|
| 257 |
+
"std": [
|
| 258 |
+
0.009861940518021584,
|
| 259 |
+
0.013633579015731812,
|
| 260 |
+
0.012660318985581398,
|
| 261 |
+
0.028013890609145164,
|
| 262 |
+
0.030946530401706696,
|
| 263 |
+
0.08104098588228226,
|
| 264 |
+
0.49418240785598755
|
| 265 |
+
]
|
| 266 |
+
},
|
| 267 |
+
"num_trajectories": 4173,
|
| 268 |
+
"num_transitions": 147976,
|
| 269 |
+
"proprio": {
|
| 270 |
+
"max": [
|
| 271 |
+
0.0,
|
| 272 |
+
0.0,
|
| 273 |
+
0.0,
|
| 274 |
+
0.0,
|
| 275 |
+
0.0,
|
| 276 |
+
0.0,
|
| 277 |
+
0.0
|
| 278 |
+
],
|
| 279 |
+
"mean": [
|
| 280 |
+
0.0,
|
| 281 |
+
0.0,
|
| 282 |
+
0.0,
|
| 283 |
+
0.0,
|
| 284 |
+
0.0,
|
| 285 |
+
0.0,
|
| 286 |
+
0.0
|
| 287 |
+
],
|
| 288 |
+
"min": [
|
| 289 |
+
0.0,
|
| 290 |
+
0.0,
|
| 291 |
+
0.0,
|
| 292 |
+
0.0,
|
| 293 |
+
0.0,
|
| 294 |
+
0.0,
|
| 295 |
+
0.0
|
| 296 |
+
],
|
| 297 |
+
"q01": [
|
| 298 |
+
0.0,
|
| 299 |
+
0.0,
|
| 300 |
+
0.0,
|
| 301 |
+
0.0,
|
| 302 |
+
0.0,
|
| 303 |
+
0.0,
|
| 304 |
+
0.0
|
| 305 |
+
],
|
| 306 |
+
"q99": [
|
| 307 |
+
0.0,
|
| 308 |
+
0.0,
|
| 309 |
+
0.0,
|
| 310 |
+
0.0,
|
| 311 |
+
0.0,
|
| 312 |
+
0.0,
|
| 313 |
+
0.0
|
| 314 |
+
],
|
| 315 |
+
"std": [
|
| 316 |
+
0.0,
|
| 317 |
+
0.0,
|
| 318 |
+
0.0,
|
| 319 |
+
0.0,
|
| 320 |
+
0.0,
|
| 321 |
+
0.0,
|
| 322 |
+
0.0
|
| 323 |
+
]
|
| 324 |
+
}
|
| 325 |
+
}
|
| 326 |
+
}
|
| 327 |
+
}
|
rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:540f3db439cbbdd36aca5c70433757fc4c1857e7fffc16f42148fcf92b711029
|
| 3 |
+
size 14244
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
{
|
| 4 |
+
"content": "<image>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false
|
| 9 |
+
}
|
| 10 |
+
],
|
| 11 |
+
"bos_token": {
|
| 12 |
+
"content": "<bos>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false
|
| 17 |
+
},
|
| 18 |
+
"eos_token": {
|
| 19 |
+
"content": "<eos>",
|
| 20 |
+
"lstrip": false,
|
| 21 |
+
"normalized": false,
|
| 22 |
+
"rstrip": false,
|
| 23 |
+
"single_word": false
|
| 24 |
+
},
|
| 25 |
+
"pad_token": {
|
| 26 |
+
"content": "<pad>",
|
| 27 |
+
"lstrip": false,
|
| 28 |
+
"normalized": false,
|
| 29 |
+
"rstrip": false,
|
| 30 |
+
"single_word": false
|
| 31 |
+
},
|
| 32 |
+
"unk_token": {
|
| 33 |
+
"content": "<unk>",
|
| 34 |
+
"lstrip": false,
|
| 35 |
+
"normalized": false,
|
| 36 |
+
"rstrip": false,
|
| 37 |
+
"single_word": false
|
| 38 |
+
}
|
| 39 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e537c6b0e9bf4083bd38cbee2a5d8aa973899c9e5eb91cf1a0e975838b005d5
|
| 3 |
+
size 36157779
|
tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dac8141dbe5733e9bd51a1f98aadd5a772028488a0ba49edfd727a9e437da291
|
| 3 |
+
size 7544
|
zero_to_fp32.py
ADDED
|
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example:
|
| 14 |
+
# python zero_to_fp32.py . output_dir/
|
| 15 |
+
# or
|
| 16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import torch
|
| 20 |
+
import glob
|
| 21 |
+
import math
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
import json
|
| 25 |
+
from tqdm import tqdm
|
| 26 |
+
from collections import OrderedDict
|
| 27 |
+
from dataclasses import dataclass
|
| 28 |
+
|
| 29 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 30 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 31 |
+
from deepspeed.utils import logger
|
| 32 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 33 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 34 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class zero_model_state:
|
| 39 |
+
buffers: dict()
|
| 40 |
+
param_shapes: dict()
|
| 41 |
+
shared_params: list
|
| 42 |
+
ds_version: int
|
| 43 |
+
frozen_param_shapes: dict()
|
| 44 |
+
frozen_param_fragments: dict()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
debug = 0
|
| 48 |
+
|
| 49 |
+
# load to cpu
|
| 50 |
+
device = torch.device('cpu')
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def atoi(text):
|
| 54 |
+
return int(text) if text.isdigit() else text
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def natural_keys(text):
|
| 58 |
+
'''
|
| 59 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 60 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 61 |
+
(See Toothy's implementation in the comments)
|
| 62 |
+
'''
|
| 63 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 67 |
+
if not os.path.isdir(checkpoint_dir):
|
| 68 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 69 |
+
|
| 70 |
+
# there should be only one file
|
| 71 |
+
if zero_stage <= 2:
|
| 72 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 73 |
+
elif zero_stage == 3:
|
| 74 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 75 |
+
|
| 76 |
+
if not os.path.exists(file):
|
| 77 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 78 |
+
|
| 79 |
+
return file
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 83 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 84 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 85 |
+
|
| 86 |
+
if len(ckpt_files) == 0:
|
| 87 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 88 |
+
|
| 89 |
+
return ckpt_files
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def get_optim_files(checkpoint_dir):
|
| 93 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def get_model_state_files(checkpoint_dir):
|
| 97 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def parse_model_states(files):
|
| 101 |
+
zero_model_states = []
|
| 102 |
+
for file in files:
|
| 103 |
+
state_dict = torch.load(file, map_location=device)
|
| 104 |
+
|
| 105 |
+
if BUFFER_NAMES not in state_dict:
|
| 106 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 107 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 108 |
+
if debug:
|
| 109 |
+
print("Found buffers:", buffer_names)
|
| 110 |
+
|
| 111 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 112 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 113 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 114 |
+
|
| 115 |
+
# collect parameters that are included in param_shapes
|
| 116 |
+
param_names = []
|
| 117 |
+
for s in param_shapes:
|
| 118 |
+
for name in s.keys():
|
| 119 |
+
param_names.append(name)
|
| 120 |
+
|
| 121 |
+
# update with frozen parameters
|
| 122 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 123 |
+
if frozen_param_shapes is not None:
|
| 124 |
+
if debug:
|
| 125 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 126 |
+
param_names += list(frozen_param_shapes.keys())
|
| 127 |
+
|
| 128 |
+
# handle shared params
|
| 129 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 130 |
+
|
| 131 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 132 |
+
|
| 133 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 134 |
+
|
| 135 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 136 |
+
param_shapes=param_shapes,
|
| 137 |
+
shared_params=shared_params,
|
| 138 |
+
ds_version=ds_version,
|
| 139 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 140 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 141 |
+
zero_model_states.append(z_model_state)
|
| 142 |
+
|
| 143 |
+
return zero_model_states
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 147 |
+
total_files = len(files)
|
| 148 |
+
state_dicts = []
|
| 149 |
+
for f in files:
|
| 150 |
+
state_dict = torch.load(f, map_location=device)
|
| 151 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 152 |
+
# and also handle the case where it was already removed by another helper script
|
| 153 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 154 |
+
state_dicts.append(state_dict)
|
| 155 |
+
|
| 156 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 157 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 158 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 159 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 160 |
+
|
| 161 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 162 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 163 |
+
# use the max of the partition_count to get the dp world_size.
|
| 164 |
+
|
| 165 |
+
if type(world_size) is list:
|
| 166 |
+
world_size = max(world_size)
|
| 167 |
+
|
| 168 |
+
if world_size != total_files:
|
| 169 |
+
raise ValueError(
|
| 170 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 171 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
# the groups are named differently in each stage
|
| 175 |
+
if zero_stage <= 2:
|
| 176 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 177 |
+
elif zero_stage == 3:
|
| 178 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 179 |
+
else:
|
| 180 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 181 |
+
|
| 182 |
+
if zero_stage <= 2:
|
| 183 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 184 |
+
elif zero_stage == 3:
|
| 185 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
| 186 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
| 187 |
+
#
|
| 188 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
| 189 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
| 190 |
+
|
| 191 |
+
fp32_flat_groups = [
|
| 192 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
| 193 |
+
]
|
| 194 |
+
|
| 195 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 199 |
+
"""
|
| 200 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 204 |
+
|
| 205 |
+
"""
|
| 206 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 207 |
+
|
| 208 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 209 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 210 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 211 |
+
|
| 212 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 213 |
+
|
| 214 |
+
zero_model_states = parse_model_states(model_files)
|
| 215 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 216 |
+
|
| 217 |
+
if zero_stage <= 2:
|
| 218 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 219 |
+
exclude_frozen_parameters)
|
| 220 |
+
elif zero_stage == 3:
|
| 221 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 222 |
+
exclude_frozen_parameters)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 226 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 227 |
+
return
|
| 228 |
+
|
| 229 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 230 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 231 |
+
|
| 232 |
+
if debug:
|
| 233 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 234 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 235 |
+
|
| 236 |
+
wanted_params = len(frozen_param_shapes)
|
| 237 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 238 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 239 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 240 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 241 |
+
|
| 242 |
+
total_params = 0
|
| 243 |
+
total_numel = 0
|
| 244 |
+
for name, shape in frozen_param_shapes.items():
|
| 245 |
+
total_params += 1
|
| 246 |
+
unpartitioned_numel = shape.numel()
|
| 247 |
+
total_numel += unpartitioned_numel
|
| 248 |
+
|
| 249 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 250 |
+
|
| 251 |
+
if debug:
|
| 252 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 253 |
+
|
| 254 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def _has_callable(obj, fn):
|
| 258 |
+
attr = getattr(obj, fn, None)
|
| 259 |
+
return callable(attr)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 263 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 264 |
+
|
| 265 |
+
# Reconstruction protocol:
|
| 266 |
+
#
|
| 267 |
+
# XXX: document this
|
| 268 |
+
|
| 269 |
+
if debug:
|
| 270 |
+
for i in range(world_size):
|
| 271 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 272 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 273 |
+
|
| 274 |
+
# XXX: memory usage doubles here (zero2)
|
| 275 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 276 |
+
merged_single_partition_of_fp32_groups = []
|
| 277 |
+
for i in range(num_param_groups):
|
| 278 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 279 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 280 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 281 |
+
avail_numel = sum(
|
| 282 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 283 |
+
|
| 284 |
+
if debug:
|
| 285 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 286 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 287 |
+
# not asserting if there is a mismatch due to possible padding
|
| 288 |
+
print(f"Have {avail_numel} numels to process.")
|
| 289 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 290 |
+
|
| 291 |
+
# params
|
| 292 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 293 |
+
# out-of-core computing solution
|
| 294 |
+
total_numel = 0
|
| 295 |
+
total_params = 0
|
| 296 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 297 |
+
offset = 0
|
| 298 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 299 |
+
for name, shape in shapes.items():
|
| 300 |
+
|
| 301 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 302 |
+
total_numel += unpartitioned_numel
|
| 303 |
+
total_params += 1
|
| 304 |
+
|
| 305 |
+
if debug:
|
| 306 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 307 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 308 |
+
offset += unpartitioned_numel
|
| 309 |
+
|
| 310 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 311 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 312 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 313 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 314 |
+
align_to = 2 * world_size
|
| 315 |
+
|
| 316 |
+
def zero2_align(x):
|
| 317 |
+
return align_to * math.ceil(x / align_to)
|
| 318 |
+
|
| 319 |
+
if debug:
|
| 320 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 321 |
+
|
| 322 |
+
offset = zero2_align(offset)
|
| 323 |
+
avail_numel = zero2_align(avail_numel)
|
| 324 |
+
|
| 325 |
+
if debug:
|
| 326 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 327 |
+
|
| 328 |
+
# Sanity check
|
| 329 |
+
if offset != avail_numel:
|
| 330 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 331 |
+
|
| 332 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 336 |
+
exclude_frozen_parameters):
|
| 337 |
+
state_dict = OrderedDict()
|
| 338 |
+
|
| 339 |
+
# buffers
|
| 340 |
+
buffers = zero_model_states[0].buffers
|
| 341 |
+
state_dict.update(buffers)
|
| 342 |
+
if debug:
|
| 343 |
+
print(f"added {len(buffers)} buffers")
|
| 344 |
+
|
| 345 |
+
if not exclude_frozen_parameters:
|
| 346 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 347 |
+
|
| 348 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 349 |
+
|
| 350 |
+
# recover shared parameters
|
| 351 |
+
for pair in zero_model_states[0].shared_params:
|
| 352 |
+
if pair[1] in state_dict:
|
| 353 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 354 |
+
|
| 355 |
+
return state_dict
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 359 |
+
remainder = unpartitioned_numel % world_size
|
| 360 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 361 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 362 |
+
return partitioned_numel, padding_numel
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 366 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 367 |
+
return
|
| 368 |
+
|
| 369 |
+
if debug:
|
| 370 |
+
for i in range(world_size):
|
| 371 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 372 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 373 |
+
|
| 374 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 375 |
+
wanted_params = len(frozen_param_shapes)
|
| 376 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 377 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 378 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 379 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 380 |
+
|
| 381 |
+
total_params = 0
|
| 382 |
+
total_numel = 0
|
| 383 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 384 |
+
total_params += 1
|
| 385 |
+
unpartitioned_numel = shape.numel()
|
| 386 |
+
total_numel += unpartitioned_numel
|
| 387 |
+
|
| 388 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 389 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 390 |
+
|
| 391 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 392 |
+
|
| 393 |
+
if debug:
|
| 394 |
+
print(
|
| 395 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 402 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 403 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 404 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 405 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 406 |
+
|
| 407 |
+
# merge list of dicts, preserving order
|
| 408 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 409 |
+
|
| 410 |
+
if debug:
|
| 411 |
+
for i in range(world_size):
|
| 412 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 413 |
+
|
| 414 |
+
wanted_params = len(param_shapes)
|
| 415 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 416 |
+
# not asserting if there is a mismatch due to possible padding
|
| 417 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 418 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 419 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 420 |
+
|
| 421 |
+
# params
|
| 422 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 423 |
+
# out-of-core computing solution
|
| 424 |
+
offset = 0
|
| 425 |
+
total_numel = 0
|
| 426 |
+
total_params = 0
|
| 427 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
|
| 428 |
+
unpartitioned_numel = shape.numel()
|
| 429 |
+
total_numel += unpartitioned_numel
|
| 430 |
+
total_params += 1
|
| 431 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 432 |
+
|
| 433 |
+
if debug:
|
| 434 |
+
print(
|
| 435 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
# XXX: memory usage doubles here
|
| 439 |
+
state_dict[name] = torch.cat(
|
| 440 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
| 441 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 442 |
+
offset += partitioned_numel
|
| 443 |
+
|
| 444 |
+
offset *= world_size
|
| 445 |
+
|
| 446 |
+
# Sanity check
|
| 447 |
+
if offset != avail_numel:
|
| 448 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 449 |
+
|
| 450 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 454 |
+
exclude_frozen_parameters):
|
| 455 |
+
state_dict = OrderedDict()
|
| 456 |
+
|
| 457 |
+
# buffers
|
| 458 |
+
buffers = zero_model_states[0].buffers
|
| 459 |
+
state_dict.update(buffers)
|
| 460 |
+
if debug:
|
| 461 |
+
print(f"added {len(buffers)} buffers")
|
| 462 |
+
|
| 463 |
+
if not exclude_frozen_parameters:
|
| 464 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 465 |
+
|
| 466 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 467 |
+
|
| 468 |
+
# recover shared parameters
|
| 469 |
+
for pair in zero_model_states[0].shared_params:
|
| 470 |
+
if pair[1] in state_dict:
|
| 471 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 472 |
+
|
| 473 |
+
return state_dict
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
| 477 |
+
"""
|
| 478 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 479 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 480 |
+
via a model hub.
|
| 481 |
+
|
| 482 |
+
Args:
|
| 483 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 484 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 485 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 486 |
+
|
| 487 |
+
Returns:
|
| 488 |
+
- pytorch ``state_dict``
|
| 489 |
+
|
| 490 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
| 491 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 492 |
+
the checkpoint.
|
| 493 |
+
|
| 494 |
+
A typical usage might be ::
|
| 495 |
+
|
| 496 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 497 |
+
# do the training and checkpoint saving
|
| 498 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 499 |
+
model = model.cpu() # move to cpu
|
| 500 |
+
model.load_state_dict(state_dict)
|
| 501 |
+
# submit to model hub or save the model to share with others
|
| 502 |
+
|
| 503 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 504 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 505 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 506 |
+
|
| 507 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 508 |
+
|
| 509 |
+
"""
|
| 510 |
+
if tag is None:
|
| 511 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 512 |
+
if os.path.isfile(latest_path):
|
| 513 |
+
with open(latest_path, 'r') as fd:
|
| 514 |
+
tag = fd.read().strip()
|
| 515 |
+
else:
|
| 516 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 517 |
+
|
| 518 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 519 |
+
|
| 520 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 521 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 522 |
+
|
| 523 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
| 527 |
+
output_dir,
|
| 528 |
+
max_shard_size="5GB",
|
| 529 |
+
safe_serialization=False,
|
| 530 |
+
tag=None,
|
| 531 |
+
exclude_frozen_parameters=False):
|
| 532 |
+
"""
|
| 533 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 534 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 535 |
+
|
| 536 |
+
Args:
|
| 537 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 538 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
| 539 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
| 540 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
| 541 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 542 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 543 |
+
"""
|
| 544 |
+
# Dependency pre-check
|
| 545 |
+
if safe_serialization:
|
| 546 |
+
try:
|
| 547 |
+
from safetensors.torch import save_file
|
| 548 |
+
except ImportError:
|
| 549 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
| 550 |
+
raise
|
| 551 |
+
if max_shard_size is not None:
|
| 552 |
+
try:
|
| 553 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
| 554 |
+
except ImportError:
|
| 555 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
| 556 |
+
raise
|
| 557 |
+
|
| 558 |
+
# Convert zero checkpoint to state_dict
|
| 559 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
| 560 |
+
|
| 561 |
+
# Shard the model if it is too big.
|
| 562 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
| 563 |
+
if max_shard_size is not None:
|
| 564 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
| 565 |
+
state_dict_split = split_torch_state_dict_into_shards(state_dict,
|
| 566 |
+
filename_pattern=filename_pattern,
|
| 567 |
+
max_shard_size=max_shard_size)
|
| 568 |
+
else:
|
| 569 |
+
from collections import namedtuple
|
| 570 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
| 571 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
| 572 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
| 573 |
+
|
| 574 |
+
# Save the model
|
| 575 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
| 576 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
| 577 |
+
shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
|
| 578 |
+
output_path = os.path.join(output_dir, shard_file)
|
| 579 |
+
if safe_serialization:
|
| 580 |
+
save_file(shard, output_path, metadata={"format": "pt"})
|
| 581 |
+
else:
|
| 582 |
+
torch.save(shard, output_path)
|
| 583 |
+
|
| 584 |
+
# Save index if sharded
|
| 585 |
+
if state_dict_split.is_sharded:
|
| 586 |
+
index = {
|
| 587 |
+
"metadata": state_dict_split.metadata,
|
| 588 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
| 589 |
+
}
|
| 590 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
| 591 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
| 592 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
| 593 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
| 594 |
+
f.write(content)
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 598 |
+
"""
|
| 599 |
+
1. Put the provided model to cpu
|
| 600 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 601 |
+
3. Load it into the provided model
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
- ``model``: the model object to update
|
| 605 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 606 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 607 |
+
|
| 608 |
+
Returns:
|
| 609 |
+
- ``model`: modified model
|
| 610 |
+
|
| 611 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 612 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 613 |
+
conveniently placed for you in the checkpoint folder.
|
| 614 |
+
|
| 615 |
+
A typical usage might be ::
|
| 616 |
+
|
| 617 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 618 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 619 |
+
# submit to model hub or save the model to share with others
|
| 620 |
+
|
| 621 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 622 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 623 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 624 |
+
|
| 625 |
+
"""
|
| 626 |
+
logger.info(f"Extracting fp32 weights")
|
| 627 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 628 |
+
|
| 629 |
+
logger.info(f"Overwriting model with fp32 weights")
|
| 630 |
+
model = model.cpu()
|
| 631 |
+
model.load_state_dict(state_dict, strict=False)
|
| 632 |
+
|
| 633 |
+
return model
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
if __name__ == "__main__":
|
| 637 |
+
parser = argparse.ArgumentParser()
|
| 638 |
+
parser.add_argument("checkpoint_dir",
|
| 639 |
+
type=str,
|
| 640 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 641 |
+
parser.add_argument("output_dir",
|
| 642 |
+
type=str,
|
| 643 |
+
help="directory to the pytorch fp32 state_dict output files"
|
| 644 |
+
"(e.g. path/checkpoint-12-output/)")
|
| 645 |
+
parser.add_argument(
|
| 646 |
+
"--max_shard_size",
|
| 647 |
+
type=str,
|
| 648 |
+
default="5GB",
|
| 649 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
| 650 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
| 651 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
| 652 |
+
"without CPU OOM issues.")
|
| 653 |
+
parser.add_argument(
|
| 654 |
+
"--safe_serialization",
|
| 655 |
+
default=False,
|
| 656 |
+
action='store_true',
|
| 657 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
| 658 |
+
parser.add_argument("-t",
|
| 659 |
+
"--tag",
|
| 660 |
+
type=str,
|
| 661 |
+
default=None,
|
| 662 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 663 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 664 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 665 |
+
args = parser.parse_args()
|
| 666 |
+
|
| 667 |
+
debug = args.debug
|
| 668 |
+
|
| 669 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 670 |
+
args.output_dir,
|
| 671 |
+
max_shard_size=args.max_shard_size,
|
| 672 |
+
safe_serialization=args.safe_serialization,
|
| 673 |
+
tag=args.tag,
|
| 674 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|