Text Generation
Transformers
Safetensors
English
bolmo
custom_code
benjamin commited on
Commit
f893117
·
verified ·
1 Parent(s): 8e42dcc

Upload BolmoForCausalLM

Browse files
configuration_bolmo.py CHANGED
@@ -3,7 +3,7 @@ from typing import Any
3
 
4
  from transformers.configuration_utils import PretrainedConfig, layer_type_validation
5
  from transformers.modeling_rope_utils import rope_config_validation
6
- from olmo_core.nn.blt.hf.tokenization_bolmo import ByteTokenizerConfig
7
 
8
  class BolmoConfig(PretrainedConfig):
9
  r"""
 
3
 
4
  from transformers.configuration_utils import PretrainedConfig, layer_type_validation
5
  from transformers.modeling_rope_utils import rope_config_validation
6
+ from .tokenization_bolmo import ByteTokenizerConfig
7
 
8
  class BolmoConfig(PretrainedConfig):
9
  r"""
modeling_bolmo.py CHANGED
@@ -22,9 +22,9 @@ from transformers.utils import auto_docstring, can_return_tuple
22
  from transformers.utils.deprecation import deprecate_kwarg
23
  from transformers.utils.generic import check_model_inputs
24
 
25
- from olmo_core.nn.blt.hf.configuration_bolmo import BolmoConfig
26
- from olmo_core.nn.blt.hf.tokenization_bolmo import ByteTokenizerConfig
27
- from olmo_core.nn.blt.hf.utils_bolmo import compute_boundary_mask, pad_right, pad_left, MaskState
28
 
29
  from xlstm.xlstm_large.model import mLSTMLayer, mLSTMLayerConfig, mLSTMLayerStateType, soft_cap, mLSTMBackendConfig
30
 
 
22
  from transformers.utils.deprecation import deprecate_kwarg
23
  from transformers.utils.generic import check_model_inputs
24
 
25
+ from .configuration_bolmo import BolmoConfig
26
+ from .tokenization_bolmo import ByteTokenizerConfig
27
+ from .utils_bolmo import compute_boundary_mask, pad_right, pad_left, MaskState
28
 
29
  from xlstm.xlstm_large.model import mLSTMLayer, mLSTMLayerConfig, mLSTMLayerStateType, soft_cap, mLSTMBackendConfig
30
 
tokenization_bolmo.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from functools import lru_cache
3
+ from typing import Optional
4
+ from transformers import AutoTokenizer
5
+
6
+ # Source: https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9
7
+ # Also implemented in https://docs.rs/tokenizers/latest/src/tokenizers/pre_tokenizers/byte_level.rs.html#13-39
8
+ _CHARS_TO_BYTES = {
9
+ "Ā": 0, "ā": 1, "Ă": 2, "ă": 3, "Ą": 4, "ą": 5, "Ć": 6, "ć": 7, "Ĉ": 8,
10
+ "ĉ": 9, "Ċ": 10, "ċ": 11, "Č": 12, "č": 13, "Ď": 14, "ď": 15, "Đ": 16,
11
+ "đ": 17, "Ē": 18, "ē": 19, "Ĕ": 20, "ĕ": 21, "Ė": 22, "ė": 23, "Ę": 24,
12
+ "ę": 25, "Ě": 26, "ě": 27, "Ĝ": 28, "ĝ": 29, "Ğ": 30, "ğ": 31, "Ġ": 32,
13
+ "!": 33, '"': 34, "#": 35, "$": 36, "%": 37, "&": 38, "'": 39, "(": 40,
14
+ ")": 41, "*": 42, "+": 43, ",": 44, "-": 45, ".": 46, "/": 47, "0": 48,
15
+ "1": 49, "2": 50, "3": 51, "4": 52, "5": 53, "6": 54, "7": 55, "8": 56,
16
+ "9": 57, ":": 58, ";": 59, "<": 60, "=": 61, ">": 62, "?": 63, "@": 64,
17
+ "A": 65, "B": 66, "C": 67, "D": 68, "E": 69, "F": 70, "G": 71, "H": 72,
18
+ "I": 73, "J": 74, "K": 75, "L": 76, "M": 77, "N": 78, "O": 79, "P": 80,
19
+ "Q": 81, "R": 82, "S": 83, "T": 84, "U": 85, "V": 86, "W": 87, "X": 88,
20
+ "Y": 89, "Z": 90, "[": 91, "\\": 92, "]": 93, "^": 94, "_": 95, "`": 96,
21
+ "a": 97, "b": 98, "c": 99, "d": 100, "e": 101, "f": 102, "g": 103,
22
+ "h": 104, "i": 105, "j": 106, "k": 107, "l": 108, "m": 109, "n": 110,
23
+ "o": 111, "p": 112, "q": 113, "r": 114, "s": 115, "t": 116, "u": 117,
24
+ "v": 118, "w": 119, "x": 120, "y": 121, "z": 122, "{": 123, "|": 124,
25
+ "}": 125, "~": 126, "ġ": 127, "Ģ": 128, "ģ": 129, "Ĥ": 130, "ĥ": 131,
26
+ "Ħ": 132, "ħ": 133, "Ĩ": 134, "ĩ": 135, "Ī": 136, "ī": 137, "Ĭ": 138,
27
+ "ĭ": 139, "Į": 140, "į": 141, "İ": 142, "ı": 143, "IJ": 144, "ij": 145,
28
+ "Ĵ": 146, "ĵ": 147, "Ķ": 148, "ķ": 149, "ĸ": 150, "Ĺ": 151, "ĺ": 152,
29
+ "Ļ": 153, "ļ": 154, "Ľ": 155, "ľ": 156, "Ŀ": 157, "ŀ": 158, "Ł": 159,
30
+ "ł": 160, "¡": 161, "¢": 162, "£": 163, "¤": 164, "¥": 165, "¦": 166,
31
+ "§": 167, "¨": 168, "©": 169, "ª": 170, "«": 171, "¬": 172, "Ń": 173,
32
+ "®": 174, "¯": 175, "°": 176, "±": 177, "²": 178, "³": 179, "´": 180,
33
+ "µ": 181, "¶": 182, "·": 183, "¸": 184, "¹": 185, "º": 186, "»": 187,
34
+ "¼": 188, "½": 189, "¾": 190, "¿": 191, "À": 192, "Á": 193, "Â": 194,
35
+ "Ã": 195, "Ä": 196, "Å": 197, "Æ": 198, "Ç": 199, "È": 200, "É": 201,
36
+ "Ê": 202, "Ë": 203, "Ì": 204, "Í": 205, "Î": 206, "Ï": 207, "Ð": 208,
37
+ "Ñ": 209, "Ò": 210, "Ó": 211, "Ô": 212, "Õ": 213, "Ö": 214, "×": 215,
38
+ "Ø": 216, "Ù": 217, "Ú": 218, "Û": 219, "Ü": 220, "Ý": 221, "Þ": 222,
39
+ "ß": 223, "à": 224, "á": 225, "â": 226, "ã": 227, "ä": 228, "å": 229,
40
+ "æ": 230, "ç": 231, "è": 232, "é": 233, "ê": 234, "ë": 235, "ì": 236,
41
+ "í": 237, "î": 238, "ï": 239, "ð": 240, "ñ": 241, "ò": 242, "ó": 243,
42
+ "ô": 244, "õ": 245, "ö": 246, "÷": 247, "ø": 248, "ù": 249, "ú": 250,
43
+ "û": 251, "ü": 252, "ý": 253, "þ": 254, "ÿ": 255,
44
+ }
45
+ _BYTES_TO_CHARS = {v: k for k, v in _CHARS_TO_BYTES.items()}
46
+
47
+ def _bytes_to_chars(byte_sequence: bytes) -> str:
48
+ return "".join(_BYTES_TO_CHARS[byte] for byte in byte_sequence)
49
+
50
+ def _chars_to_bytes(char_sequence: str) -> list:
51
+ return list(bytes(_CHARS_TO_BYTES[char] for char in char_sequence))
52
+
53
+ @dataclass
54
+ class ByteTokenizerConfig:
55
+ vocab_size: int
56
+ bos_token_id: int
57
+ pad_token_id: int
58
+ eos_token_id: int
59
+ bpe_token_end_id: int
60
+ special_tokens: list[str] = field(default_factory=lambda: [])
61
+ special_tokens_first: bool = True
62
+ original_identifier: Optional[str] = None
63
+
64
+
65
+ @classmethod
66
+ def bolmo(cls) -> "ByteTokenizerConfig":
67
+ special_tokens = [
68
+ "<pad>",
69
+ "<bos>",
70
+ "<eos>",
71
+ "<bpe_token_end>",
72
+ ]
73
+
74
+ return cls(
75
+ # *2 to accomodate fused boundary tokens
76
+ vocab_size=(len(special_tokens) + 256) * 2,
77
+ special_tokens=special_tokens,
78
+ bos_token_id=special_tokens.index("<bos>"),
79
+ pad_token_id=special_tokens.index("<pad>"),
80
+ eos_token_id=special_tokens.index("<bos>"),
81
+ bpe_token_end_id=special_tokens.index("<bpe_token_end>"),
82
+ original_identifier="allenai/dolma2-tokenizer",
83
+ )
84
+
85
+ def build(self):
86
+ return ByteTokenizer(self)
87
+
88
+
89
+ class ByteTokenizer:
90
+ TOKEN_ID_KEY = -1
91
+
92
+ def __init__(self, tokenizer_config: ByteTokenizerConfig):
93
+ self.config = tokenizer_config
94
+ self.hf_tokenizer = AutoTokenizer.from_pretrained(tokenizer_config.original_identifier)
95
+ if self.config.special_tokens_first:
96
+ self.offset = len(tokenizer_config.special_tokens)
97
+ self.special_tokens_offset = 0
98
+ else:
99
+ self.offset = 0
100
+ self.special_tokens_offset = self.config.vocab_size - len(tokenizer_config.special_tokens)
101
+
102
+ self.byte_sequences = {}
103
+
104
+ for key, value in self.hf_tokenizer.get_vocab().items():
105
+ if key in self.config.special_tokens:
106
+ byte_sequence = [self.special_tokens_offset + self.config.special_tokens.index(key)]
107
+ elif value == self.hf_tokenizer.eos_token_id and self.eos_token_id is not None:
108
+ byte_sequence = [self.eos_token_id]
109
+ elif value == self.hf_tokenizer.bos_token_id and self.bos_token_id is not None:
110
+ byte_sequence = [self.bos_token_id]
111
+ elif value == self.hf_tokenizer.pad_token_id and self.pad_token_id is not None:
112
+ byte_sequence = [self.pad_token_id]
113
+ else:
114
+ byte_sequence = [self.offset + i for i in _chars_to_bytes(key)]
115
+
116
+ assert self.byte_sequences.get(value) is None
117
+ self.byte_sequences[value] = byte_sequence
118
+
119
+ self.byte_trie = {}
120
+
121
+ for token_id, byte_sequence in self.byte_sequences.items():
122
+ current_dict = self.byte_trie
123
+ for byte in byte_sequence[::-1]: # retrieved from the back so store in reverse order
124
+ if byte not in current_dict:
125
+ current_dict[byte] = {}
126
+ current_dict = current_dict[byte]
127
+ current_dict[ByteTokenizer.TOKEN_ID_KEY] = token_id
128
+
129
+ @property
130
+ def bos_token_id(self):
131
+ return self.config.bos_token_id
132
+
133
+ @property
134
+ def eos_token_id(self):
135
+ return self.config.eos_token_id
136
+
137
+ @property
138
+ def pad_token_id(self):
139
+ return self.config.pad_token_id
140
+
141
+ @property
142
+ def bpe_token_end_id(self):
143
+ return self.config.bpe_token_end_id
144
+
145
+ def expand_byte_ids(self, byte_ids: list[int], n_last: Optional[int] = None) -> list[int]:
146
+ # search in the byte tree for the longest matching token at every byte position
147
+ expanded_ids = []
148
+ for i in range(len(byte_ids)):
149
+ if n_last is not None and i < len(byte_ids) - n_last:
150
+ continue
151
+
152
+ current_dict = self.byte_trie
153
+ current_expansion = None
154
+
155
+ for i in range(i, -1, -1):
156
+ byte = byte_ids[i]
157
+
158
+ if byte == self.bpe_token_end_id:
159
+ # skip bpe token end markers, needed for generation
160
+ continue
161
+
162
+ if byte >= self.offset + 256:
163
+ # ignore fused boundary
164
+ byte -= self.offset + 256
165
+
166
+ try:
167
+ current_dict = current_dict[byte]
168
+ if ByteTokenizer.TOKEN_ID_KEY in current_dict:
169
+ current_expansion = current_dict[ByteTokenizer.TOKEN_ID_KEY]
170
+ except KeyError:
171
+ assert current_expansion is not None
172
+ break
173
+
174
+ expanded_ids.append(current_expansion)
175
+
176
+ return expanded_ids
177
+
178
+ def patch_ids_to_byte_ids(self, input_ids: list[int]):
179
+ return [byte_token_id for token_id in input_ids for byte_token_id in self.byte_sequences[token_id]]
180
+
181
+ def encode(self, string: str, add_special_tokens=False):
182
+ input_ids = self.hf_tokenizer.encode(string, add_special_tokens=add_special_tokens)
183
+ return self.patch_ids_to_byte_ids(input_ids)
184
+
185
+ def decode(self, tokens: list[int]) -> str:
186
+ return self.decode_to_bytes(tokens).decode("utf-8", errors="replace")
187
+
188
+ def decode_to_bytes(self, tokens: list[int]) -> bytes:
189
+ tokens_without_boundary = []
190
+ for token in tokens:
191
+ if token >= (self.offset + 256):
192
+ token -= self.offset + 256
193
+
194
+ tokens_without_boundary.append(token)
195
+
196
+ utf8_bytes = [min(token - self.offset, 255) for token in tokens_without_boundary if token >= self.offset]
197
+ return bytes(utf8_bytes)
198
+
199
+ def get_tokens_and_patch_lengths(self, original_input_ids: list[int], add_bos=False, strip_pad=False, skip_last=False):
200
+ if add_bos and self.bos_token_id is not None:
201
+ byte_tokens = [self.bos_token_id]
202
+ patch_lengths = [1]
203
+ else:
204
+ byte_tokens = []
205
+ patch_lengths = []
206
+
207
+ for idx, token in enumerate(original_input_ids):
208
+ # optionally skip last token to keep the length the same if add_bos=True
209
+ if skip_last and idx == len(original_input_ids) - 1:
210
+ break
211
+
212
+ token_byte_tokens = self.patch_ids_to_byte_ids([int(token)])
213
+
214
+ if strip_pad and all(t == self.pad_token_id for t in token_byte_tokens):
215
+ # skip padding tokens
216
+ continue
217
+
218
+ patch_lengths.append(len(token_byte_tokens))
219
+ byte_tokens.extend(token_byte_tokens)
220
+
221
+ return byte_tokens, patch_lengths
222
+
223
+ @lru_cache(maxsize=1024)
224
+ def _is_spacelike(self, token_id: int) -> bool:
225
+ """
226
+ Check if a token ID is spacelike.
227
+ """
228
+ byte = token_id - self.offset
229
+ # see https://github.com/kjslag/spacebyte/blob/321111315c92bce0bc2f9f1630cb0bc82b897c57/spacebyte.py#L137-L145.
230
+ is_spacelike = (
231
+ (byte < ord('0')) |
232
+ ((ord('9') < byte) & (byte < ord('A'))) |
233
+ ((ord('Z') < byte) & (byte < ord('a'))) |
234
+ ((ord('z') < byte) & (byte < 0b1000_0000)) |
235
+ (0b1100_0000 <= byte)
236
+ )
237
+ return is_spacelike
238
+
239
+ @lru_cache(maxsize=1024)
240
+ def _is_strict_spacelike(self, token_id: int) -> bool:
241
+ """
242
+ Check if a token ID is strictly spacelike (only space, tab, newline, carriage return).
243
+ """
244
+ byte = token_id - self.offset
245
+ return byte in {ord(' '), ord('\t'), ord('\n'), ord('\r')}
246
+
247
+ def get_space_patch_lengths(self, input_ids: list[int], max_patch_length: int = 16, kind: str = "strict_end_before_space") -> list[int]:
248
+ patch_lengths = []
249
+ current_length = 0
250
+
251
+ special_tokens = {self.bos_token_id, self.eos_token_id, self.pad_token_id}
252
+
253
+ all_spacelike = [self._is_spacelike(token) for token in input_ids]
254
+
255
+ if kind == "spacebyte":
256
+ for token_idx, token in enumerate(input_ids):
257
+ current_length += 1
258
+
259
+ spacelike = all_spacelike[token_idx]
260
+ previous_spacelike = all_spacelike[token_idx - 1] if token_idx > 0 else False
261
+
262
+ if (not previous_spacelike and spacelike) or current_length >= max_patch_length or token in special_tokens:
263
+ patch_lengths.append(current_length)
264
+ current_length = 0
265
+ elif kind == "spacebyte_end_before_space":
266
+ for token_idx, token in enumerate(input_ids):
267
+ current_length += 1
268
+
269
+ spacelike = all_spacelike[token_idx]
270
+ next_spacelike = all_spacelike[token_idx + 1] if token_idx < len(input_ids) - 1 else True
271
+
272
+ if (not spacelike and next_spacelike) or current_length >= max_patch_length or token in special_tokens:
273
+ patch_lengths.append(current_length)
274
+ current_length = 0
275
+ elif kind == "strict_end_before_space":
276
+ all_strict_spacelike = [self._is_strict_spacelike(token) for token in input_ids]
277
+ in_strict_prefix = True
278
+
279
+ for token_idx, token in enumerate(input_ids):
280
+ current_length += 1
281
+
282
+ spacelike = all_spacelike[token_idx]
283
+ strict_spacelike = all_strict_spacelike[token_idx]
284
+ next_spacelike = all_spacelike[token_idx + 1] if token_idx < len(input_ids) - 1 else True
285
+ next_strict_spacelike = all_strict_spacelike[token_idx + 1] if token_idx < len(input_ids) - 1 else True
286
+
287
+ if not strict_spacelike:
288
+ in_strict_prefix = False
289
+
290
+ if in_strict_prefix:
291
+ continue
292
+
293
+ if (spacelike != next_spacelike) or (strict_spacelike != next_strict_spacelike) or current_length >= max_patch_length or token in special_tokens:
294
+ patch_lengths.append(current_length)
295
+ in_strict_prefix = True
296
+ current_length = 0
297
+
298
+ if current_length > 0:
299
+ patch_lengths.append(current_length)
300
+
301
+ return patch_lengths
utils_bolmo.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+
6
+
7
+ def compute_boundary_mask(boundary_logprobs: torch.Tensor, boundary_threshold: str) -> torch.Tensor:
8
+ if boundary_threshold.startswith("sample:"):
9
+ _, temperature = boundary_threshold.split(":")
10
+ temperature = float(temperature)
11
+
12
+ if temperature == 0:
13
+ return (boundary_logprobs > math.log(0.5))
14
+ elif temperature == 1:
15
+ return torch.bernoulli(torch.exp(boundary_logprobs)).to(torch.bool)
16
+ else:
17
+ raise NotImplementedError("Temperatures outside {0,1} are not implemented yet.")
18
+ elif boundary_threshold.startswith("topk:"):
19
+ _, topk = boundary_threshold.split(":")
20
+ topk = int(topk)
21
+ thresholds = torch.quantile(boundary_logprobs, dim=1, q=1 - (topk / boundary_logprobs.shape[1]))
22
+ return (boundary_logprobs >= thresholds.unsqueeze(-1))
23
+ elif boundary_threshold.startswith("topk_percent:"):
24
+ _, topk_percent = boundary_threshold.split(":")
25
+ topk_percent = float(topk_percent)
26
+ assert 0 <= topk_percent <= 1
27
+ thresholds = torch.quantile(boundary_logprobs, dim=1, q=1 - topk_percent)
28
+ return (boundary_logprobs >= thresholds.unsqueeze(-1))
29
+ else:
30
+ raise ValueError(f"Unknown boundary threshold: {boundary_threshold}")
31
+
32
+
33
+ def _pad(tensors: list[torch.Tensor], multiple_of: int, direction: str, value):
34
+ max_len = max(t.size(0) for t in tensors)
35
+ if multiple_of > 1:
36
+ # Round up max_len to the nearest multiple_of
37
+ max_len = ((max_len + multiple_of - 1) // multiple_of) * multiple_of
38
+ padded = []
39
+ for t in tensors:
40
+ if direction == "left":
41
+ pad_shape = (max_len - t.size(0), 0)
42
+ elif direction == "right":
43
+ pad_shape = (0, max_len - t.size(0))
44
+ else:
45
+ raise ValueError(f"Unknown direction: {direction}. Must be 'left' or 'right'.")
46
+ padded.append(F.pad(t, pad_shape, value=value))
47
+ return torch.stack(padded, dim=0)
48
+
49
+ def pad_right(
50
+ tensors: list[torch.Tensor],
51
+ multiple_of: int = 128,
52
+ value=0,
53
+ ):
54
+ return _pad(tensors, multiple_of, direction="right", value=value)
55
+
56
+ def pad_left(
57
+ tensors: list[torch.Tensor],
58
+ multiple_of: int = 128,
59
+ value=0,
60
+ ):
61
+ return _pad(tensors, multiple_of, direction="left", value=value)
62
+
63
+ class MaskState:
64
+ def __init__(self, mask):
65
+ self.cpu_mask = mask.cpu()
66
+
67
+ self.mask = mask
68
+ self.inv_mask = ~mask
69
+ self._all = self.cpu_mask.all().item()
70
+ self._any = self.cpu_mask.any().item()
71
+
72
+ def any(self):
73
+ return self._any
74
+
75
+ def all(self):
76
+ return self._all
77
+
78
+ def selective_get(self, x, inv=False):
79
+ # try to avoid sync through nonzero on index
80
+ if inv:
81
+ if self.all():
82
+ return x[[]]
83
+ elif not self.any():
84
+ return x
85
+ else:
86
+ return x[self.inv_mask]
87
+ else:
88
+ if self.all():
89
+ return x
90
+ elif not self.any():
91
+ return x[[]]
92
+ else:
93
+ return x[self.mask]
94
+
95
+ def selective_put(self, x, out, inv=False):
96
+ # try to avoid sync through nonzero on index
97
+ if inv:
98
+ if self.all():
99
+ return
100
+ elif not self.any():
101
+ out.copy_(x)
102
+ else:
103
+ out[self.inv_mask] = x
104
+ else:
105
+ if self.all():
106
+ out.copy_(x)
107
+ elif not self.any():
108
+ return
109
+ else:
110
+ out[self.mask] = x
111
+
112
+ def selective_add(self, x, out, inv=False):
113
+ # try to avoid sync through nonzero on index
114
+ if inv:
115
+ if self.all():
116
+ return
117
+ elif not self.any():
118
+ out.add_(x)
119
+ else:
120
+ out[self.inv_mask] += x
121
+ else:
122
+ if self.all():
123
+ out.add_(x)
124
+ elif not self.any():
125
+ return
126
+ else:
127
+ out[self.mask] += x