Try again?
Browse files- src/accelerator.py +100 -12
- src/big_modeling.py +9 -3
- src/commands/config/config_args.py +7 -0
- src/commands/config/config_utils.py +1 -0
- src/commands/estimate.py +1 -0
- src/commands/launch.py +1 -0
- src/data_loader.py +45 -5
- src/hooks.py +22 -0
- src/launchers.py +1 -0
- src/local_sgd.py +5 -0
- src/logging.py +3 -0
- src/optimizer.py +18 -0
- src/scheduler.py +7 -0
- src/state.py +62 -5
- src/tracking.py +47 -0
- src/utils/dataclasses.py +31 -0
- src/utils/deepspeed.py +20 -0
- src/utils/imports.py +1 -0
- src/utils/launch.py +3 -0
- src/utils/megatron_lm.py +73 -15
- src/utils/memory.py +1 -0
- src/utils/modeling.py +2 -0
- src/utils/offload.py +8 -0
- src/utils/operations.py +18 -0
src/accelerator.py
CHANGED
|
@@ -87,6 +87,7 @@ class Accelerator:
|
|
| 87 |
- **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.
|
| 88 |
- **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.
|
| 89 |
"""
|
|
|
|
| 90 |
def __init__(
|
| 91 |
self,
|
| 92 |
device_placement: bool = True,
|
|
@@ -296,44 +297,56 @@ class Accelerator:
|
|
| 296 |
self.flag_tensor = None
|
| 297 |
check_os_kernel()
|
| 298 |
@property
|
|
|
|
| 299 |
def use_distributed(self):
|
| 300 |
"""
|
| 301 |
Whether the Accelerator is configured for distributed training
|
| 302 |
"""
|
| 303 |
return self.state.use_distributed
|
| 304 |
@property
|
|
|
|
| 305 |
def distributed_type(self):
|
| 306 |
return self.state.distributed_type
|
| 307 |
@property
|
|
|
|
| 308 |
def num_processes(self):
|
| 309 |
return self.state.num_processes
|
| 310 |
@property
|
|
|
|
| 311 |
def process_index(self):
|
| 312 |
return self.state.process_index
|
| 313 |
@property
|
|
|
|
| 314 |
def local_process_index(self):
|
| 315 |
return self.state.local_process_index
|
| 316 |
@property
|
|
|
|
| 317 |
def device(self):
|
| 318 |
return self.state.device
|
| 319 |
@property
|
|
|
|
| 320 |
def project_dir(self):
|
| 321 |
return self.project_configuration.project_dir
|
| 322 |
@property
|
|
|
|
| 323 |
def logging_dir(self):
|
| 324 |
return self.project_configuration.logging_dir
|
| 325 |
@property
|
|
|
|
| 326 |
def save_iteration(self):
|
| 327 |
return self.project_configuration.iteration
|
| 328 |
@property
|
|
|
|
| 329 |
def is_main_process(self):
|
| 330 |
"""True for one process only."""
|
| 331 |
return self.state.is_main_process
|
| 332 |
@property
|
|
|
|
| 333 |
def is_local_main_process(self):
|
| 334 |
"""True for one process per server."""
|
| 335 |
return self.state.is_local_main_process
|
| 336 |
@property
|
|
|
|
| 337 |
def use_fp16(self):
|
| 338 |
warnings.warn(
|
| 339 |
"The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
|
|
@@ -342,12 +355,15 @@ class Accelerator:
|
|
| 342 |
)
|
| 343 |
return self.mixed_precision != "no"
|
| 344 |
@property
|
|
|
|
| 345 |
def is_last_process(self):
|
| 346 |
return self.process_index == self.num_processes - 1
|
| 347 |
@property
|
|
|
|
| 348 |
def mixed_precision(self):
|
| 349 |
return self.state.mixed_precision
|
| 350 |
@contextmanager
|
|
|
|
| 351 |
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 352 |
"""
|
| 353 |
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
|
@@ -382,6 +398,7 @@ class Accelerator:
|
|
| 382 |
"""
|
| 383 |
with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
|
| 384 |
yield inputs
|
|
|
|
| 385 |
def on_main_process(self, function: Callable[..., Any] = None):
|
| 386 |
"""
|
| 387 |
A decorator that will run the decorated function on the main process only. Can also be called using the
|
|
@@ -407,9 +424,11 @@ class Accelerator:
|
|
| 407 |
raise ValueError(
|
| 408 |
"The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 409 |
)
|
| 410 |
-
|
|
|
|
| 411 |
return PartialState().on_main_process(function)(*args, **kwargs)
|
| 412 |
return _inner
|
|
|
|
| 413 |
def on_local_main_process(self, function: Callable[..., Any] = None):
|
| 414 |
"""
|
| 415 |
A decorator that will run the decorated function on the local main process only. Can also be called using the
|
|
@@ -422,7 +441,8 @@ class Accelerator:
|
|
| 422 |
from accelerate import Accelerator
|
| 423 |
accelerator = Accelerator()
|
| 424 |
@accelerator.on_local_main_process
|
| 425 |
-
|
|
|
|
| 426 |
print("This will be printed by process 0 only on each server.")
|
| 427 |
print_something()
|
| 428 |
# On server 1:
|
|
@@ -439,9 +459,11 @@ class Accelerator:
|
|
| 439 |
raise ValueError(
|
| 440 |
"The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 441 |
)
|
| 442 |
-
|
|
|
|
| 443 |
return PartialState().on_local_main_process(function)(*args, **kwargs)
|
| 444 |
return _inner
|
|
|
|
| 445 |
def on_last_process(self, function: Callable[..., Any]):
|
| 446 |
"""
|
| 447 |
A decorator that will run the decorated function on the last process only. Can also be called using the
|
|
@@ -454,7 +476,8 @@ class Accelerator:
|
|
| 454 |
from accelerate import Accelerator
|
| 455 |
accelerator = Accelerator()
|
| 456 |
@accelerator.on_last_process
|
| 457 |
-
|
|
|
|
| 458 |
print(f"Printed on process {accelerator.process_index}")
|
| 459 |
print_something()
|
| 460 |
"Printed on process 3"
|
|
@@ -468,9 +491,11 @@ class Accelerator:
|
|
| 468 |
raise ValueError(
|
| 469 |
"The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 470 |
)
|
| 471 |
-
|
|
|
|
| 472 |
return PartialState().on_last_process(function)(*args, **kwargs)
|
| 473 |
return _inner
|
|
|
|
| 474 |
def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
|
| 475 |
"""
|
| 476 |
A decorator that will run the decorated function on a given process index only. Can also be called using the
|
|
@@ -486,7 +511,8 @@ class Accelerator:
|
|
| 486 |
from accelerate import Accelerator
|
| 487 |
accelerator = Accelerator()
|
| 488 |
@accelerator.on_process(process_index=2)
|
| 489 |
-
|
|
|
|
| 490 |
print(f"Printed on process {accelerator.process_index}")
|
| 491 |
print_something()
|
| 492 |
"Printed on process 2"
|
|
@@ -503,9 +529,11 @@ class Accelerator:
|
|
| 503 |
raise ValueError(
|
| 504 |
"The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 505 |
)
|
| 506 |
-
|
|
|
|
| 507 |
return PartialState().on_process(function, process_index)(*args, **kwargs)
|
| 508 |
return _inner
|
|
|
|
| 509 |
def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
|
| 510 |
"""
|
| 511 |
A decorator that will run the decorated function on a given local process index only. Can also be called using
|
|
@@ -521,7 +549,8 @@ class Accelerator:
|
|
| 521 |
from accelerate import Accelerator
|
| 522 |
accelerator = Accelerator()
|
| 523 |
@accelerator.on_local_process(local_process_index=2)
|
| 524 |
-
|
|
|
|
| 525 |
print(f"Printed on process {accelerator.local_process_index}")
|
| 526 |
print_something()
|
| 527 |
# On server 1:
|
|
@@ -541,10 +570,12 @@ class Accelerator:
|
|
| 541 |
raise ValueError(
|
| 542 |
"The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 543 |
)
|
| 544 |
-
|
|
|
|
| 545 |
return PartialState().on_local_process(function, local_process_index)(*args, **kwargs)
|
| 546 |
return _inner
|
| 547 |
@contextmanager
|
|
|
|
| 548 |
def main_process_first(self):
|
| 549 |
"""
|
| 550 |
Lets the main process go first inside a with block.
|
|
@@ -562,6 +593,7 @@ class Accelerator:
|
|
| 562 |
with self.state.main_process_first():
|
| 563 |
yield
|
| 564 |
@contextmanager
|
|
|
|
| 565 |
def local_main_process_first(self):
|
| 566 |
"""
|
| 567 |
Lets the local main process go inside a with block.
|
|
@@ -579,6 +611,7 @@ class Accelerator:
|
|
| 579 |
with self.state.local_main_process_first():
|
| 580 |
yield
|
| 581 |
@contextmanager
|
|
|
|
| 582 |
def no_sync(self, model):
|
| 583 |
"""
|
| 584 |
A context manager to disable gradient synchronizations across DDP processes by calling
|
|
@@ -613,6 +646,7 @@ class Accelerator:
|
|
| 613 |
yield
|
| 614 |
@staticmethod
|
| 615 |
@contextmanager
|
|
|
|
| 616 |
def trigger_sync_in_backward(model):
|
| 617 |
"""Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under
|
| 618 |
`Accelerator.no_sync` (only applicable in multi-GPU scenarios).
|
|
@@ -651,6 +685,7 @@ class Accelerator:
|
|
| 651 |
finally:
|
| 652 |
model.require_backward_grad_sync = old_require_backward_grad_sync
|
| 653 |
model.require_forward_param_sync = old_require_forward_param_sync
|
|
|
|
| 654 |
def _do_sync(self):
|
| 655 |
"Sets the right `sync_gradients` context and either resets or increases `self.step`"
|
| 656 |
if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
|
|
@@ -660,18 +695,23 @@ class Accelerator:
|
|
| 660 |
self.step += 1
|
| 661 |
self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0)
|
| 662 |
@property
|
|
|
|
| 663 |
def sync_gradients(self):
|
| 664 |
return self.gradient_state.sync_gradients
|
| 665 |
@sync_gradients.setter
|
|
|
|
| 666 |
def sync_gradients(self, sync_gradients):
|
| 667 |
self.gradient_state.sync_gradients = sync_gradients
|
| 668 |
@property
|
|
|
|
| 669 |
def gradient_accumulation_steps(self):
|
| 670 |
return self.gradient_state.num_steps
|
| 671 |
@gradient_accumulation_steps.setter
|
|
|
|
| 672 |
def gradient_accumulation_steps(self, gradient_accumulation_steps):
|
| 673 |
self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps})
|
| 674 |
@contextmanager
|
|
|
|
| 675 |
def accumulate(self, *models):
|
| 676 |
"""
|
| 677 |
A context manager that will lightly wrap around and perform gradient accumulation automatically
|
|
@@ -700,6 +740,7 @@ class Accelerator:
|
|
| 700 |
cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
|
| 701 |
yield
|
| 702 |
@contextmanager
|
|
|
|
| 703 |
def join_uneven_inputs(self, joinables, even_batches=None):
|
| 704 |
"""
|
| 705 |
A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper
|
|
@@ -766,6 +807,7 @@ class Accelerator:
|
|
| 766 |
)
|
| 767 |
with contextlib.nullcontext(joinables):
|
| 768 |
yield
|
|
|
|
| 769 |
def print(self, *args, **kwargs):
|
| 770 |
"""
|
| 771 |
Drop in replacement of `print()` to only print once per server.
|
|
@@ -777,6 +819,7 @@ class Accelerator:
|
|
| 777 |
```
|
| 778 |
"""
|
| 779 |
self.state.print(*args, **kwargs)
|
|
|
|
| 780 |
def _prepare_one(self, obj, first_pass=False, device_placement=None):
|
| 781 |
# First pass of preparation: DataLoader, model, optimizer
|
| 782 |
if first_pass:
|
|
@@ -793,6 +836,7 @@ class Accelerator:
|
|
| 793 |
return scheduler
|
| 794 |
# Return the unprocessed object if previous criteria was not met
|
| 795 |
return obj
|
|
|
|
| 796 |
def prepare(self, *args, device_placement=None):
|
| 797 |
"""
|
| 798 |
Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
|
|
@@ -909,6 +953,7 @@ class Accelerator:
|
|
| 909 |
):
|
| 910 |
setattr(item, "_is_accelerate_prepared", True)
|
| 911 |
return result if len(result) > 1 else result[0]
|
|
|
|
| 912 |
def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False):
|
| 913 |
"""
|
| 914 |
Prepares a PyTorch model for training in any distributed setup. It is recommended to use
|
|
@@ -1061,6 +1106,7 @@ class Accelerator:
|
|
| 1061 |
raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
|
| 1062 |
model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
|
| 1063 |
return model
|
|
|
|
| 1064 |
def _prepare_deepspeed(self, *args):
|
| 1065 |
import deepspeed
|
| 1066 |
deepspeed_plugin = self.state.deepspeed_plugin
|
|
@@ -1250,6 +1296,7 @@ class Accelerator:
|
|
| 1250 |
"You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
|
| 1251 |
)
|
| 1252 |
return tuple(result)
|
|
|
|
| 1253 |
def _prepare_megatron_lm(self, *args):
|
| 1254 |
megatron_lm_plugin = self.state.megatron_lm_plugin
|
| 1255 |
if not megatron_lm_plugin.megatron_dataset_flag:
|
|
@@ -1342,6 +1389,7 @@ class Accelerator:
|
|
| 1342 |
"You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
|
| 1343 |
)
|
| 1344 |
return tuple(result)
|
|
|
|
| 1345 |
def _prepare_ipex(self, *args):
|
| 1346 |
if not is_ipex_available():
|
| 1347 |
raise ImportError(
|
|
@@ -1373,6 +1421,7 @@ class Accelerator:
|
|
| 1373 |
elif isinstance(result[i], (torch.optim.Optimizer)):
|
| 1374 |
result[i] = optimizer
|
| 1375 |
return tuple(result)
|
|
|
|
| 1376 |
def _prepare_msamp(self, *args):
|
| 1377 |
if not is_msamp_available():
|
| 1378 |
raise ImportError(
|
|
@@ -1407,6 +1456,7 @@ class Accelerator:
|
|
| 1407 |
elif isinstance(result[i], (torch.optim.Optimizer)):
|
| 1408 |
result[i] = optimizer
|
| 1409 |
return tuple(result)
|
|
|
|
| 1410 |
def prepare_data_loader(
|
| 1411 |
self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None
|
| 1412 |
):
|
|
@@ -1453,6 +1503,7 @@ class Accelerator:
|
|
| 1453 |
)
|
| 1454 |
self._dataloaders.append(prepared_data_loader)
|
| 1455 |
return prepared_data_loader
|
|
|
|
| 1456 |
def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):
|
| 1457 |
"""
|
| 1458 |
Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use
|
|
@@ -1481,6 +1532,7 @@ class Accelerator:
|
|
| 1481 |
optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)
|
| 1482 |
self._optimizers.append(optimizer)
|
| 1483 |
return optimizer
|
|
|
|
| 1484 |
def prepare_scheduler(self, scheduler: LRScheduler):
|
| 1485 |
"""
|
| 1486 |
Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use
|
|
@@ -1517,6 +1569,7 @@ class Accelerator:
|
|
| 1517 |
)
|
| 1518 |
self._schedulers.append(scheduler)
|
| 1519 |
return scheduler
|
|
|
|
| 1520 |
def backward(self, loss, **kwargs):
|
| 1521 |
"""
|
| 1522 |
Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based
|
|
@@ -1542,6 +1595,7 @@ class Accelerator:
|
|
| 1542 |
self.scaler.scale(loss).backward(**kwargs)
|
| 1543 |
else:
|
| 1544 |
loss.backward(**kwargs)
|
|
|
|
| 1545 |
def set_trigger(self):
|
| 1546 |
"""
|
| 1547 |
Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
|
|
@@ -1563,6 +1617,7 @@ class Accelerator:
|
|
| 1563 |
```
|
| 1564 |
"""
|
| 1565 |
self.flag_tensor = torch.tensor(1, device=self.device)
|
|
|
|
| 1566 |
def check_trigger(self):
|
| 1567 |
"""
|
| 1568 |
Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
|
|
@@ -1591,6 +1646,7 @@ class Accelerator:
|
|
| 1591 |
self.flag_tensor = torch.tensor(0, device=self.device)
|
| 1592 |
return True
|
| 1593 |
return False
|
|
|
|
| 1594 |
def unscale_gradients(self, optimizer=None):
|
| 1595 |
"""
|
| 1596 |
Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
|
|
@@ -1624,6 +1680,7 @@ class Accelerator:
|
|
| 1624 |
gradients = xm._fetch_gradients(opt)
|
| 1625 |
self.reduce(gradients, scale=1.0 / self.num_processes)
|
| 1626 |
self.scaler.unscale_(opt)
|
|
|
|
| 1627 |
def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
|
| 1628 |
"""
|
| 1629 |
Should be used in place of `torch.nn.utils.clip_grad_norm_`.
|
|
@@ -1656,6 +1713,7 @@ class Accelerator:
|
|
| 1656 |
return None
|
| 1657 |
self.unscale_gradients()
|
| 1658 |
return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
|
|
|
|
| 1659 |
def clip_grad_value_(self, parameters, clip_value):
|
| 1660 |
"""
|
| 1661 |
Should be used in place of `torch.nn.utils.clip_grad_value_`.
|
|
@@ -1678,6 +1736,7 @@ class Accelerator:
|
|
| 1678 |
raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.")
|
| 1679 |
self.unscale_gradients()
|
| 1680 |
torch.nn.utils.clip_grad_value_(parameters, clip_value)
|
|
|
|
| 1681 |
def gather(self, tensor):
|
| 1682 |
"""
|
| 1683 |
Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to
|
|
@@ -1703,6 +1762,7 @@ class Accelerator:
|
|
| 1703 |
```
|
| 1704 |
"""
|
| 1705 |
return gather(tensor)
|
|
|
|
| 1706 |
def gather_for_metrics(self, input_data):
|
| 1707 |
"""
|
| 1708 |
Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
|
|
@@ -1744,7 +1804,8 @@ class Accelerator:
|
|
| 1744 |
return data
|
| 1745 |
elif self.gradient_state.remainder > 0:
|
| 1746 |
# Last batch needs to be truncated on distributed systems as it contains additional samples
|
| 1747 |
-
|
|
|
|
| 1748 |
return tensor[: self.gradient_state.remainder]
|
| 1749 |
return recursively_apply(_adjust_samples, data)
|
| 1750 |
else: # remainder is 0
|
|
@@ -1756,6 +1817,7 @@ class Accelerator:
|
|
| 1756 |
except Exception:
|
| 1757 |
# Dataset had no length or raised an error
|
| 1758 |
return data
|
|
|
|
| 1759 |
def reduce(self, tensor, reduction="sum", scale=1.0):
|
| 1760 |
"""
|
| 1761 |
Reduce the values in *tensor* across all processes based on *reduction*.
|
|
@@ -1785,6 +1847,7 @@ class Accelerator:
|
|
| 1785 |
```
|
| 1786 |
"""
|
| 1787 |
return reduce(tensor, reduction, scale)
|
|
|
|
| 1788 |
def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
|
| 1789 |
"""
|
| 1790 |
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
|
|
@@ -1814,6 +1877,7 @@ class Accelerator:
|
|
| 1814 |
```
|
| 1815 |
"""
|
| 1816 |
return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)
|
|
|
|
| 1817 |
def unwrap_model(self, model, keep_fp32_wrapper: bool = True):
|
| 1818 |
"""
|
| 1819 |
Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving
|
|
@@ -1840,6 +1904,7 @@ class Accelerator:
|
|
| 1840 |
```
|
| 1841 |
"""
|
| 1842 |
return extract_model_from_parallel(model, keep_fp32_wrapper)
|
|
|
|
| 1843 |
def wait_for_everyone(self):
|
| 1844 |
"""
|
| 1845 |
Will stop the execution of the current process until every other process has reached that point (so this does
|
|
@@ -1861,6 +1926,7 @@ class Accelerator:
|
|
| 1861 |
"""
|
| 1862 |
wait_for_everyone()
|
| 1863 |
@on_main_process
|
|
|
|
| 1864 |
def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}):
|
| 1865 |
"""
|
| 1866 |
Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations
|
|
@@ -1902,6 +1968,7 @@ class Accelerator:
|
|
| 1902 |
if config is not None:
|
| 1903 |
for tracker in self.trackers:
|
| 1904 |
tracker.store_init_configuration(config)
|
|
|
|
| 1905 |
def get_tracker(self, name: str, unwrap: bool = False):
|
| 1906 |
"""
|
| 1907 |
Returns a `tracker` from `self.trackers` based on `name` on the main process only.
|
|
@@ -1929,6 +1996,7 @@ class Accelerator:
|
|
| 1929 |
# Handle tracker only made on main process
|
| 1930 |
return GeneralTracker(_blank=True)
|
| 1931 |
@on_main_process
|
|
|
|
| 1932 |
def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}):
|
| 1933 |
"""
|
| 1934 |
Logs `values` to all stored trackers in `self.trackers` on the main process only.
|
|
@@ -1954,6 +2022,7 @@ class Accelerator:
|
|
| 1954 |
for tracker in self.trackers:
|
| 1955 |
tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))
|
| 1956 |
@on_main_process
|
|
|
|
| 1957 |
def end_training(self):
|
| 1958 |
"""
|
| 1959 |
Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be
|
|
@@ -1969,6 +2038,7 @@ class Accelerator:
|
|
| 1969 |
"""
|
| 1970 |
for tracker in self.trackers:
|
| 1971 |
tracker.finish()
|
|
|
|
| 1972 |
def save(self, obj, f, safe_serialization=False):
|
| 1973 |
"""
|
| 1974 |
Save the object passed to disk once per machine. Use in place of `torch.save`.
|
|
@@ -1993,6 +2063,7 @@ class Accelerator:
|
|
| 1993 |
save_on_each_node=self.project_configuration.save_on_each_node,
|
| 1994 |
safe_serialization=safe_serialization,
|
| 1995 |
)
|
|
|
|
| 1996 |
def save_model(
|
| 1997 |
self,
|
| 1998 |
model: torch.nn.Module,
|
|
@@ -2081,6 +2152,7 @@ class Accelerator:
|
|
| 2081 |
f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
|
| 2082 |
f"index located at {save_index_file}."
|
| 2083 |
)
|
|
|
|
| 2084 |
def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
|
| 2085 |
"""
|
| 2086 |
Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`].
|
|
@@ -2104,6 +2176,7 @@ class Accelerator:
|
|
| 2104 |
handle = hooks.RemovableHandle(self._save_model_state_pre_hook)
|
| 2105 |
self._save_model_state_pre_hook[handle.id] = hook
|
| 2106 |
return handle
|
|
|
|
| 2107 |
def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs):
|
| 2108 |
"""
|
| 2109 |
Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.
|
|
@@ -2143,7 +2216,8 @@ class Accelerator:
|
|
| 2143 |
and (len(folders) + 1 > self.project_configuration.total_limit)
|
| 2144 |
and self.is_main_process
|
| 2145 |
):
|
| 2146 |
-
|
|
|
|
| 2147 |
return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
|
| 2148 |
folders.sort(key=_inner)
|
| 2149 |
logger.warning(
|
|
@@ -2219,6 +2293,7 @@ class Accelerator:
|
|
| 2219 |
save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
|
| 2220 |
self.project_configuration.iteration += 1
|
| 2221 |
return save_location
|
|
|
|
| 2222 |
def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
|
| 2223 |
"""
|
| 2224 |
Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`].
|
|
@@ -2241,6 +2316,7 @@ class Accelerator:
|
|
| 2241 |
handle = hooks.RemovableHandle(self._load_model_state_pre_hook)
|
| 2242 |
self._load_model_state_pre_hook[handle.id] = hook
|
| 2243 |
return handle
|
|
|
|
| 2244 |
def load_state(self, input_dir: str = None, **load_model_func_kwargs):
|
| 2245 |
"""
|
| 2246 |
Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
|
|
@@ -2274,7 +2350,8 @@ class Accelerator:
|
|
| 2274 |
# Pick up from automatic checkpoint naming
|
| 2275 |
input_dir = os.path.join(self.project_dir, "checkpoints")
|
| 2276 |
folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)]
|
| 2277 |
-
|
|
|
|
| 2278 |
return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
|
| 2279 |
folders.sort(key=_inner)
|
| 2280 |
input_dir = folders[-1]
|
|
@@ -2356,6 +2433,7 @@ class Accelerator:
|
|
| 2356 |
logger.info(f"Loading in {len(custom_checkpoints)} custom states")
|
| 2357 |
for index, obj in enumerate(self._custom_objects):
|
| 2358 |
load_custom_state(obj, input_dir, index)
|
|
|
|
| 2359 |
def free_memory(self):
|
| 2360 |
"""
|
| 2361 |
Will release all references to the internal objects stored and call the garbage collector. You should call this
|
|
@@ -2377,6 +2455,7 @@ class Accelerator:
|
|
| 2377 |
self.deepspeed_engine_wrapped = None
|
| 2378 |
self.step = 0
|
| 2379 |
release_memory()
|
|
|
|
| 2380 |
def clear(self):
|
| 2381 |
"""
|
| 2382 |
Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
|
|
@@ -2392,6 +2471,7 @@ class Accelerator:
|
|
| 2392 |
```
|
| 2393 |
"""
|
| 2394 |
self.free_memory()
|
|
|
|
| 2395 |
def _get_named_parameters(self, *args):
|
| 2396 |
named_parameters = {}
|
| 2397 |
for obj in args:
|
|
@@ -2399,6 +2479,7 @@ class Accelerator:
|
|
| 2399 |
obj = extract_model_from_parallel(obj)
|
| 2400 |
named_parameters.update({n: p for n, p in obj.named_parameters()})
|
| 2401 |
return named_parameters
|
|
|
|
| 2402 |
def _get_devices(self, *args):
|
| 2403 |
model_device = None
|
| 2404 |
optimizer_device = None
|
|
@@ -2415,6 +2496,7 @@ class Accelerator:
|
|
| 2415 |
optimizer_device = param_group["params"][0].device
|
| 2416 |
break
|
| 2417 |
return (model_device, optimizer_device)
|
|
|
|
| 2418 |
def get_state_dict(self, model, unwrap=True):
|
| 2419 |
"""
|
| 2420 |
Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full
|
|
@@ -2461,6 +2543,7 @@ class Accelerator:
|
|
| 2461 |
model = self.unwrap_model(model)
|
| 2462 |
state_dict = model.state_dict()
|
| 2463 |
return state_dict
|
|
|
|
| 2464 |
def register_for_checkpointing(self, *objects):
|
| 2465 |
"""
|
| 2466 |
Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
|
|
@@ -2490,6 +2573,7 @@ class Accelerator:
|
|
| 2490 |
raise ValueError(err)
|
| 2491 |
self._custom_objects.extend(objects)
|
| 2492 |
@contextmanager
|
|
|
|
| 2493 |
def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None):
|
| 2494 |
"""
|
| 2495 |
Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing
|
|
@@ -2521,6 +2605,7 @@ class Accelerator:
|
|
| 2521 |
yield
|
| 2522 |
autocast_context.__exit__(*sys.exc_info())
|
| 2523 |
@property
|
|
|
|
| 2524 |
def optimizer_step_was_skipped(self):
|
| 2525 |
"""
|
| 2526 |
Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which
|
|
@@ -2530,6 +2615,7 @@ class Accelerator:
|
|
| 2530 |
if optimizer.step_was_skipped:
|
| 2531 |
return True
|
| 2532 |
return False
|
|
|
|
| 2533 |
def skip_first_batches(self, dataloader, num_batches: int = 0):
|
| 2534 |
"""
|
| 2535 |
Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
|
|
@@ -2556,9 +2642,11 @@ class Accelerator:
|
|
| 2556 |
```
|
| 2557 |
"""
|
| 2558 |
return skip_first_batches(dataloader, num_batches=num_batches)
|
|
|
|
| 2559 |
def __deepcopy__(self, memo):
|
| 2560 |
logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.")
|
| 2561 |
return self
|
|
|
|
| 2562 |
def verify_device_map(self, model: torch.nn.Module) -> bool:
|
| 2563 |
"""
|
| 2564 |
Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`.
|
|
|
|
| 87 |
- **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.
|
| 88 |
- **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.
|
| 89 |
"""
|
| 90 |
+
|
| 91 |
def __init__(
|
| 92 |
self,
|
| 93 |
device_placement: bool = True,
|
|
|
|
| 297 |
self.flag_tensor = None
|
| 298 |
check_os_kernel()
|
| 299 |
@property
|
| 300 |
+
|
| 301 |
def use_distributed(self):
|
| 302 |
"""
|
| 303 |
Whether the Accelerator is configured for distributed training
|
| 304 |
"""
|
| 305 |
return self.state.use_distributed
|
| 306 |
@property
|
| 307 |
+
|
| 308 |
def distributed_type(self):
|
| 309 |
return self.state.distributed_type
|
| 310 |
@property
|
| 311 |
+
|
| 312 |
def num_processes(self):
|
| 313 |
return self.state.num_processes
|
| 314 |
@property
|
| 315 |
+
|
| 316 |
def process_index(self):
|
| 317 |
return self.state.process_index
|
| 318 |
@property
|
| 319 |
+
|
| 320 |
def local_process_index(self):
|
| 321 |
return self.state.local_process_index
|
| 322 |
@property
|
| 323 |
+
|
| 324 |
def device(self):
|
| 325 |
return self.state.device
|
| 326 |
@property
|
| 327 |
+
|
| 328 |
def project_dir(self):
|
| 329 |
return self.project_configuration.project_dir
|
| 330 |
@property
|
| 331 |
+
|
| 332 |
def logging_dir(self):
|
| 333 |
return self.project_configuration.logging_dir
|
| 334 |
@property
|
| 335 |
+
|
| 336 |
def save_iteration(self):
|
| 337 |
return self.project_configuration.iteration
|
| 338 |
@property
|
| 339 |
+
|
| 340 |
def is_main_process(self):
|
| 341 |
"""True for one process only."""
|
| 342 |
return self.state.is_main_process
|
| 343 |
@property
|
| 344 |
+
|
| 345 |
def is_local_main_process(self):
|
| 346 |
"""True for one process per server."""
|
| 347 |
return self.state.is_local_main_process
|
| 348 |
@property
|
| 349 |
+
|
| 350 |
def use_fp16(self):
|
| 351 |
warnings.warn(
|
| 352 |
"The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
|
|
|
|
| 355 |
)
|
| 356 |
return self.mixed_precision != "no"
|
| 357 |
@property
|
| 358 |
+
|
| 359 |
def is_last_process(self):
|
| 360 |
return self.process_index == self.num_processes - 1
|
| 361 |
@property
|
| 362 |
+
|
| 363 |
def mixed_precision(self):
|
| 364 |
return self.state.mixed_precision
|
| 365 |
@contextmanager
|
| 366 |
+
|
| 367 |
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 368 |
"""
|
| 369 |
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
|
|
|
| 398 |
"""
|
| 399 |
with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
|
| 400 |
yield inputs
|
| 401 |
+
|
| 402 |
def on_main_process(self, function: Callable[..., Any] = None):
|
| 403 |
"""
|
| 404 |
A decorator that will run the decorated function on the main process only. Can also be called using the
|
|
|
|
| 424 |
raise ValueError(
|
| 425 |
"The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 426 |
)
|
| 427 |
+
|
| 428 |
+
def _inner(*args, **kwargs):
|
| 429 |
return PartialState().on_main_process(function)(*args, **kwargs)
|
| 430 |
return _inner
|
| 431 |
+
|
| 432 |
def on_local_main_process(self, function: Callable[..., Any] = None):
|
| 433 |
"""
|
| 434 |
A decorator that will run the decorated function on the local main process only. Can also be called using the
|
|
|
|
| 441 |
from accelerate import Accelerator
|
| 442 |
accelerator = Accelerator()
|
| 443 |
@accelerator.on_local_main_process
|
| 444 |
+
|
| 445 |
+
def print_something():
|
| 446 |
print("This will be printed by process 0 only on each server.")
|
| 447 |
print_something()
|
| 448 |
# On server 1:
|
|
|
|
| 459 |
raise ValueError(
|
| 460 |
"The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 461 |
)
|
| 462 |
+
|
| 463 |
+
def _inner(*args, **kwargs):
|
| 464 |
return PartialState().on_local_main_process(function)(*args, **kwargs)
|
| 465 |
return _inner
|
| 466 |
+
|
| 467 |
def on_last_process(self, function: Callable[..., Any]):
|
| 468 |
"""
|
| 469 |
A decorator that will run the decorated function on the last process only. Can also be called using the
|
|
|
|
| 476 |
from accelerate import Accelerator
|
| 477 |
accelerator = Accelerator()
|
| 478 |
@accelerator.on_last_process
|
| 479 |
+
|
| 480 |
+
def print_something():
|
| 481 |
print(f"Printed on process {accelerator.process_index}")
|
| 482 |
print_something()
|
| 483 |
"Printed on process 3"
|
|
|
|
| 491 |
raise ValueError(
|
| 492 |
"The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 493 |
)
|
| 494 |
+
|
| 495 |
+
def _inner(*args, **kwargs):
|
| 496 |
return PartialState().on_last_process(function)(*args, **kwargs)
|
| 497 |
return _inner
|
| 498 |
+
|
| 499 |
def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
|
| 500 |
"""
|
| 501 |
A decorator that will run the decorated function on a given process index only. Can also be called using the
|
|
|
|
| 511 |
from accelerate import Accelerator
|
| 512 |
accelerator = Accelerator()
|
| 513 |
@accelerator.on_process(process_index=2)
|
| 514 |
+
|
| 515 |
+
def print_something():
|
| 516 |
print(f"Printed on process {accelerator.process_index}")
|
| 517 |
print_something()
|
| 518 |
"Printed on process 2"
|
|
|
|
| 529 |
raise ValueError(
|
| 530 |
"The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 531 |
)
|
| 532 |
+
|
| 533 |
+
def _inner(*args, **kwargs):
|
| 534 |
return PartialState().on_process(function, process_index)(*args, **kwargs)
|
| 535 |
return _inner
|
| 536 |
+
|
| 537 |
def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
|
| 538 |
"""
|
| 539 |
A decorator that will run the decorated function on a given local process index only. Can also be called using
|
|
|
|
| 549 |
from accelerate import Accelerator
|
| 550 |
accelerator = Accelerator()
|
| 551 |
@accelerator.on_local_process(local_process_index=2)
|
| 552 |
+
|
| 553 |
+
def print_something():
|
| 554 |
print(f"Printed on process {accelerator.local_process_index}")
|
| 555 |
print_something()
|
| 556 |
# On server 1:
|
|
|
|
| 570 |
raise ValueError(
|
| 571 |
"The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
|
| 572 |
)
|
| 573 |
+
|
| 574 |
+
def _inner(*args, **kwargs):
|
| 575 |
return PartialState().on_local_process(function, local_process_index)(*args, **kwargs)
|
| 576 |
return _inner
|
| 577 |
@contextmanager
|
| 578 |
+
|
| 579 |
def main_process_first(self):
|
| 580 |
"""
|
| 581 |
Lets the main process go first inside a with block.
|
|
|
|
| 593 |
with self.state.main_process_first():
|
| 594 |
yield
|
| 595 |
@contextmanager
|
| 596 |
+
|
| 597 |
def local_main_process_first(self):
|
| 598 |
"""
|
| 599 |
Lets the local main process go inside a with block.
|
|
|
|
| 611 |
with self.state.local_main_process_first():
|
| 612 |
yield
|
| 613 |
@contextmanager
|
| 614 |
+
|
| 615 |
def no_sync(self, model):
|
| 616 |
"""
|
| 617 |
A context manager to disable gradient synchronizations across DDP processes by calling
|
|
|
|
| 646 |
yield
|
| 647 |
@staticmethod
|
| 648 |
@contextmanager
|
| 649 |
+
|
| 650 |
def trigger_sync_in_backward(model):
|
| 651 |
"""Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under
|
| 652 |
`Accelerator.no_sync` (only applicable in multi-GPU scenarios).
|
|
|
|
| 685 |
finally:
|
| 686 |
model.require_backward_grad_sync = old_require_backward_grad_sync
|
| 687 |
model.require_forward_param_sync = old_require_forward_param_sync
|
| 688 |
+
|
| 689 |
def _do_sync(self):
|
| 690 |
"Sets the right `sync_gradients` context and either resets or increases `self.step`"
|
| 691 |
if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
|
|
|
|
| 695 |
self.step += 1
|
| 696 |
self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0)
|
| 697 |
@property
|
| 698 |
+
|
| 699 |
def sync_gradients(self):
|
| 700 |
return self.gradient_state.sync_gradients
|
| 701 |
@sync_gradients.setter
|
| 702 |
+
|
| 703 |
def sync_gradients(self, sync_gradients):
|
| 704 |
self.gradient_state.sync_gradients = sync_gradients
|
| 705 |
@property
|
| 706 |
+
|
| 707 |
def gradient_accumulation_steps(self):
|
| 708 |
return self.gradient_state.num_steps
|
| 709 |
@gradient_accumulation_steps.setter
|
| 710 |
+
|
| 711 |
def gradient_accumulation_steps(self, gradient_accumulation_steps):
|
| 712 |
self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps})
|
| 713 |
@contextmanager
|
| 714 |
+
|
| 715 |
def accumulate(self, *models):
|
| 716 |
"""
|
| 717 |
A context manager that will lightly wrap around and perform gradient accumulation automatically
|
|
|
|
| 740 |
cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
|
| 741 |
yield
|
| 742 |
@contextmanager
|
| 743 |
+
|
| 744 |
def join_uneven_inputs(self, joinables, even_batches=None):
|
| 745 |
"""
|
| 746 |
A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper
|
|
|
|
| 807 |
)
|
| 808 |
with contextlib.nullcontext(joinables):
|
| 809 |
yield
|
| 810 |
+
|
| 811 |
def print(self, *args, **kwargs):
|
| 812 |
"""
|
| 813 |
Drop in replacement of `print()` to only print once per server.
|
|
|
|
| 819 |
```
|
| 820 |
"""
|
| 821 |
self.state.print(*args, **kwargs)
|
| 822 |
+
|
| 823 |
def _prepare_one(self, obj, first_pass=False, device_placement=None):
|
| 824 |
# First pass of preparation: DataLoader, model, optimizer
|
| 825 |
if first_pass:
|
|
|
|
| 836 |
return scheduler
|
| 837 |
# Return the unprocessed object if previous criteria was not met
|
| 838 |
return obj
|
| 839 |
+
|
| 840 |
def prepare(self, *args, device_placement=None):
|
| 841 |
"""
|
| 842 |
Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
|
|
|
|
| 953 |
):
|
| 954 |
setattr(item, "_is_accelerate_prepared", True)
|
| 955 |
return result if len(result) > 1 else result[0]
|
| 956 |
+
|
| 957 |
def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False):
|
| 958 |
"""
|
| 959 |
Prepares a PyTorch model for training in any distributed setup. It is recommended to use
|
|
|
|
| 1106 |
raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
|
| 1107 |
model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
|
| 1108 |
return model
|
| 1109 |
+
|
| 1110 |
def _prepare_deepspeed(self, *args):
|
| 1111 |
import deepspeed
|
| 1112 |
deepspeed_plugin = self.state.deepspeed_plugin
|
|
|
|
| 1296 |
"You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
|
| 1297 |
)
|
| 1298 |
return tuple(result)
|
| 1299 |
+
|
| 1300 |
def _prepare_megatron_lm(self, *args):
|
| 1301 |
megatron_lm_plugin = self.state.megatron_lm_plugin
|
| 1302 |
if not megatron_lm_plugin.megatron_dataset_flag:
|
|
|
|
| 1389 |
"You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
|
| 1390 |
)
|
| 1391 |
return tuple(result)
|
| 1392 |
+
|
| 1393 |
def _prepare_ipex(self, *args):
|
| 1394 |
if not is_ipex_available():
|
| 1395 |
raise ImportError(
|
|
|
|
| 1421 |
elif isinstance(result[i], (torch.optim.Optimizer)):
|
| 1422 |
result[i] = optimizer
|
| 1423 |
return tuple(result)
|
| 1424 |
+
|
| 1425 |
def _prepare_msamp(self, *args):
|
| 1426 |
if not is_msamp_available():
|
| 1427 |
raise ImportError(
|
|
|
|
| 1456 |
elif isinstance(result[i], (torch.optim.Optimizer)):
|
| 1457 |
result[i] = optimizer
|
| 1458 |
return tuple(result)
|
| 1459 |
+
|
| 1460 |
def prepare_data_loader(
|
| 1461 |
self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None
|
| 1462 |
):
|
|
|
|
| 1503 |
)
|
| 1504 |
self._dataloaders.append(prepared_data_loader)
|
| 1505 |
return prepared_data_loader
|
| 1506 |
+
|
| 1507 |
def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):
|
| 1508 |
"""
|
| 1509 |
Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use
|
|
|
|
| 1532 |
optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)
|
| 1533 |
self._optimizers.append(optimizer)
|
| 1534 |
return optimizer
|
| 1535 |
+
|
| 1536 |
def prepare_scheduler(self, scheduler: LRScheduler):
|
| 1537 |
"""
|
| 1538 |
Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use
|
|
|
|
| 1569 |
)
|
| 1570 |
self._schedulers.append(scheduler)
|
| 1571 |
return scheduler
|
| 1572 |
+
|
| 1573 |
def backward(self, loss, **kwargs):
|
| 1574 |
"""
|
| 1575 |
Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based
|
|
|
|
| 1595 |
self.scaler.scale(loss).backward(**kwargs)
|
| 1596 |
else:
|
| 1597 |
loss.backward(**kwargs)
|
| 1598 |
+
|
| 1599 |
def set_trigger(self):
|
| 1600 |
"""
|
| 1601 |
Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
|
|
|
|
| 1617 |
```
|
| 1618 |
"""
|
| 1619 |
self.flag_tensor = torch.tensor(1, device=self.device)
|
| 1620 |
+
|
| 1621 |
def check_trigger(self):
|
| 1622 |
"""
|
| 1623 |
Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
|
|
|
|
| 1646 |
self.flag_tensor = torch.tensor(0, device=self.device)
|
| 1647 |
return True
|
| 1648 |
return False
|
| 1649 |
+
|
| 1650 |
def unscale_gradients(self, optimizer=None):
|
| 1651 |
"""
|
| 1652 |
Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
|
|
|
|
| 1680 |
gradients = xm._fetch_gradients(opt)
|
| 1681 |
self.reduce(gradients, scale=1.0 / self.num_processes)
|
| 1682 |
self.scaler.unscale_(opt)
|
| 1683 |
+
|
| 1684 |
def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
|
| 1685 |
"""
|
| 1686 |
Should be used in place of `torch.nn.utils.clip_grad_norm_`.
|
|
|
|
| 1713 |
return None
|
| 1714 |
self.unscale_gradients()
|
| 1715 |
return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
|
| 1716 |
+
|
| 1717 |
def clip_grad_value_(self, parameters, clip_value):
|
| 1718 |
"""
|
| 1719 |
Should be used in place of `torch.nn.utils.clip_grad_value_`.
|
|
|
|
| 1736 |
raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.")
|
| 1737 |
self.unscale_gradients()
|
| 1738 |
torch.nn.utils.clip_grad_value_(parameters, clip_value)
|
| 1739 |
+
|
| 1740 |
def gather(self, tensor):
|
| 1741 |
"""
|
| 1742 |
Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to
|
|
|
|
| 1762 |
```
|
| 1763 |
"""
|
| 1764 |
return gather(tensor)
|
| 1765 |
+
|
| 1766 |
def gather_for_metrics(self, input_data):
|
| 1767 |
"""
|
| 1768 |
Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
|
|
|
|
| 1804 |
return data
|
| 1805 |
elif self.gradient_state.remainder > 0:
|
| 1806 |
# Last batch needs to be truncated on distributed systems as it contains additional samples
|
| 1807 |
+
|
| 1808 |
+
def _adjust_samples(tensor):
|
| 1809 |
return tensor[: self.gradient_state.remainder]
|
| 1810 |
return recursively_apply(_adjust_samples, data)
|
| 1811 |
else: # remainder is 0
|
|
|
|
| 1817 |
except Exception:
|
| 1818 |
# Dataset had no length or raised an error
|
| 1819 |
return data
|
| 1820 |
+
|
| 1821 |
def reduce(self, tensor, reduction="sum", scale=1.0):
|
| 1822 |
"""
|
| 1823 |
Reduce the values in *tensor* across all processes based on *reduction*.
|
|
|
|
| 1847 |
```
|
| 1848 |
"""
|
| 1849 |
return reduce(tensor, reduction, scale)
|
| 1850 |
+
|
| 1851 |
def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
|
| 1852 |
"""
|
| 1853 |
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
|
|
|
|
| 1877 |
```
|
| 1878 |
"""
|
| 1879 |
return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)
|
| 1880 |
+
|
| 1881 |
def unwrap_model(self, model, keep_fp32_wrapper: bool = True):
|
| 1882 |
"""
|
| 1883 |
Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving
|
|
|
|
| 1904 |
```
|
| 1905 |
"""
|
| 1906 |
return extract_model_from_parallel(model, keep_fp32_wrapper)
|
| 1907 |
+
|
| 1908 |
def wait_for_everyone(self):
|
| 1909 |
"""
|
| 1910 |
Will stop the execution of the current process until every other process has reached that point (so this does
|
|
|
|
| 1926 |
"""
|
| 1927 |
wait_for_everyone()
|
| 1928 |
@on_main_process
|
| 1929 |
+
|
| 1930 |
def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}):
|
| 1931 |
"""
|
| 1932 |
Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations
|
|
|
|
| 1968 |
if config is not None:
|
| 1969 |
for tracker in self.trackers:
|
| 1970 |
tracker.store_init_configuration(config)
|
| 1971 |
+
|
| 1972 |
def get_tracker(self, name: str, unwrap: bool = False):
|
| 1973 |
"""
|
| 1974 |
Returns a `tracker` from `self.trackers` based on `name` on the main process only.
|
|
|
|
| 1996 |
# Handle tracker only made on main process
|
| 1997 |
return GeneralTracker(_blank=True)
|
| 1998 |
@on_main_process
|
| 1999 |
+
|
| 2000 |
def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}):
|
| 2001 |
"""
|
| 2002 |
Logs `values` to all stored trackers in `self.trackers` on the main process only.
|
|
|
|
| 2022 |
for tracker in self.trackers:
|
| 2023 |
tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))
|
| 2024 |
@on_main_process
|
| 2025 |
+
|
| 2026 |
def end_training(self):
|
| 2027 |
"""
|
| 2028 |
Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be
|
|
|
|
| 2038 |
"""
|
| 2039 |
for tracker in self.trackers:
|
| 2040 |
tracker.finish()
|
| 2041 |
+
|
| 2042 |
def save(self, obj, f, safe_serialization=False):
|
| 2043 |
"""
|
| 2044 |
Save the object passed to disk once per machine. Use in place of `torch.save`.
|
|
|
|
| 2063 |
save_on_each_node=self.project_configuration.save_on_each_node,
|
| 2064 |
safe_serialization=safe_serialization,
|
| 2065 |
)
|
| 2066 |
+
|
| 2067 |
def save_model(
|
| 2068 |
self,
|
| 2069 |
model: torch.nn.Module,
|
|
|
|
| 2152 |
f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
|
| 2153 |
f"index located at {save_index_file}."
|
| 2154 |
)
|
| 2155 |
+
|
| 2156 |
def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
|
| 2157 |
"""
|
| 2158 |
Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`].
|
|
|
|
| 2176 |
handle = hooks.RemovableHandle(self._save_model_state_pre_hook)
|
| 2177 |
self._save_model_state_pre_hook[handle.id] = hook
|
| 2178 |
return handle
|
| 2179 |
+
|
| 2180 |
def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs):
|
| 2181 |
"""
|
| 2182 |
Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.
|
|
|
|
| 2216 |
and (len(folders) + 1 > self.project_configuration.total_limit)
|
| 2217 |
and self.is_main_process
|
| 2218 |
):
|
| 2219 |
+
|
| 2220 |
+
def _inner(folder):
|
| 2221 |
return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
|
| 2222 |
folders.sort(key=_inner)
|
| 2223 |
logger.warning(
|
|
|
|
| 2293 |
save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
|
| 2294 |
self.project_configuration.iteration += 1
|
| 2295 |
return save_location
|
| 2296 |
+
|
| 2297 |
def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
|
| 2298 |
"""
|
| 2299 |
Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`].
|
|
|
|
| 2316 |
handle = hooks.RemovableHandle(self._load_model_state_pre_hook)
|
| 2317 |
self._load_model_state_pre_hook[handle.id] = hook
|
| 2318 |
return handle
|
| 2319 |
+
|
| 2320 |
def load_state(self, input_dir: str = None, **load_model_func_kwargs):
|
| 2321 |
"""
|
| 2322 |
Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
|
|
|
|
| 2350 |
# Pick up from automatic checkpoint naming
|
| 2351 |
input_dir = os.path.join(self.project_dir, "checkpoints")
|
| 2352 |
folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)]
|
| 2353 |
+
|
| 2354 |
+
def _inner(folder):
|
| 2355 |
return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
|
| 2356 |
folders.sort(key=_inner)
|
| 2357 |
input_dir = folders[-1]
|
|
|
|
| 2433 |
logger.info(f"Loading in {len(custom_checkpoints)} custom states")
|
| 2434 |
for index, obj in enumerate(self._custom_objects):
|
| 2435 |
load_custom_state(obj, input_dir, index)
|
| 2436 |
+
|
| 2437 |
def free_memory(self):
|
| 2438 |
"""
|
| 2439 |
Will release all references to the internal objects stored and call the garbage collector. You should call this
|
|
|
|
| 2455 |
self.deepspeed_engine_wrapped = None
|
| 2456 |
self.step = 0
|
| 2457 |
release_memory()
|
| 2458 |
+
|
| 2459 |
def clear(self):
|
| 2460 |
"""
|
| 2461 |
Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
|
|
|
|
| 2471 |
```
|
| 2472 |
"""
|
| 2473 |
self.free_memory()
|
| 2474 |
+
|
| 2475 |
def _get_named_parameters(self, *args):
|
| 2476 |
named_parameters = {}
|
| 2477 |
for obj in args:
|
|
|
|
| 2479 |
obj = extract_model_from_parallel(obj)
|
| 2480 |
named_parameters.update({n: p for n, p in obj.named_parameters()})
|
| 2481 |
return named_parameters
|
| 2482 |
+
|
| 2483 |
def _get_devices(self, *args):
|
| 2484 |
model_device = None
|
| 2485 |
optimizer_device = None
|
|
|
|
| 2496 |
optimizer_device = param_group["params"][0].device
|
| 2497 |
break
|
| 2498 |
return (model_device, optimizer_device)
|
| 2499 |
+
|
| 2500 |
def get_state_dict(self, model, unwrap=True):
|
| 2501 |
"""
|
| 2502 |
Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full
|
|
|
|
| 2543 |
model = self.unwrap_model(model)
|
| 2544 |
state_dict = model.state_dict()
|
| 2545 |
return state_dict
|
| 2546 |
+
|
| 2547 |
def register_for_checkpointing(self, *objects):
|
| 2548 |
"""
|
| 2549 |
Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
|
|
|
|
| 2573 |
raise ValueError(err)
|
| 2574 |
self._custom_objects.extend(objects)
|
| 2575 |
@contextmanager
|
| 2576 |
+
|
| 2577 |
def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None):
|
| 2578 |
"""
|
| 2579 |
Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing
|
|
|
|
| 2605 |
yield
|
| 2606 |
autocast_context.__exit__(*sys.exc_info())
|
| 2607 |
@property
|
| 2608 |
+
|
| 2609 |
def optimizer_step_was_skipped(self):
|
| 2610 |
"""
|
| 2611 |
Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which
|
|
|
|
| 2615 |
if optimizer.step_was_skipped:
|
| 2616 |
return True
|
| 2617 |
return False
|
| 2618 |
+
|
| 2619 |
def skip_first_batches(self, dataloader, num_batches: int = 0):
|
| 2620 |
"""
|
| 2621 |
Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
|
|
|
|
| 2642 |
```
|
| 2643 |
"""
|
| 2644 |
return skip_first_batches(dataloader, num_batches=num_batches)
|
| 2645 |
+
|
| 2646 |
def __deepcopy__(self, memo):
|
| 2647 |
logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.")
|
| 2648 |
return self
|
| 2649 |
+
|
| 2650 |
def verify_device_map(self, model: torch.nn.Module) -> bool:
|
| 2651 |
"""
|
| 2652 |
Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`.
|
src/big_modeling.py
CHANGED
|
@@ -51,12 +51,14 @@ def init_on_device(device: torch.device, include_buffers: bool = None):
|
|
| 51 |
old_register_parameter = nn.Module.register_parameter
|
| 52 |
if include_buffers:
|
| 53 |
old_register_buffer = nn.Module.register_buffer
|
|
|
|
| 54 |
def register_empty_parameter(module, name, param):
|
| 55 |
old_register_parameter(module, name, param)
|
| 56 |
if param is not None:
|
| 57 |
param_cls = type(module._parameters[name])
|
| 58 |
kwargs = module._parameters[name].__dict__
|
| 59 |
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
|
|
|
|
| 60 |
def register_empty_buffer(module, name, buffer, persistent=True):
|
| 61 |
old_register_buffer(module, name, buffer, persistent=persistent)
|
| 62 |
if buffer is not None:
|
|
@@ -69,8 +71,10 @@ def init_on_device(device: torch.device, include_buffers: bool = None):
|
|
| 69 |
}
|
| 70 |
else:
|
| 71 |
tensor_constructors_to_patch = {}
|
|
|
|
| 72 |
def patch_tensor_constructor(fn):
|
| 73 |
-
|
|
|
|
| 74 |
kwargs["device"] = device
|
| 75 |
return fn(*args, **kwargs)
|
| 76 |
return wrapper
|
|
@@ -318,9 +322,11 @@ def dispatch_model(
|
|
| 318 |
# Attaching the hook may break tied weights, so we retie them
|
| 319 |
retie_parameters(model, tied_params)
|
| 320 |
# add warning to cuda and to method
|
| 321 |
-
|
|
|
|
| 322 |
@wraps(fn)
|
| 323 |
-
|
|
|
|
| 324 |
logger.warning("You shouldn't move a model when it is dispatched on multiple devices.")
|
| 325 |
for param in model.parameters():
|
| 326 |
if param.device == torch.device("meta"):
|
|
|
|
| 51 |
old_register_parameter = nn.Module.register_parameter
|
| 52 |
if include_buffers:
|
| 53 |
old_register_buffer = nn.Module.register_buffer
|
| 54 |
+
|
| 55 |
def register_empty_parameter(module, name, param):
|
| 56 |
old_register_parameter(module, name, param)
|
| 57 |
if param is not None:
|
| 58 |
param_cls = type(module._parameters[name])
|
| 59 |
kwargs = module._parameters[name].__dict__
|
| 60 |
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
|
| 61 |
+
|
| 62 |
def register_empty_buffer(module, name, buffer, persistent=True):
|
| 63 |
old_register_buffer(module, name, buffer, persistent=persistent)
|
| 64 |
if buffer is not None:
|
|
|
|
| 71 |
}
|
| 72 |
else:
|
| 73 |
tensor_constructors_to_patch = {}
|
| 74 |
+
|
| 75 |
def patch_tensor_constructor(fn):
|
| 76 |
+
|
| 77 |
+
def wrapper(*args, **kwargs):
|
| 78 |
kwargs["device"] = device
|
| 79 |
return fn(*args, **kwargs)
|
| 80 |
return wrapper
|
|
|
|
| 322 |
# Attaching the hook may break tied weights, so we retie them
|
| 323 |
retie_parameters(model, tied_params)
|
| 324 |
# add warning to cuda and to method
|
| 325 |
+
|
| 326 |
+
def add_warning(fn, model):
|
| 327 |
@wraps(fn)
|
| 328 |
+
|
| 329 |
+
def wrapper(*args, **kwargs):
|
| 330 |
logger.warning("You shouldn't move a model when it is dispatched on multiple devices.")
|
| 331 |
for param in model.parameters():
|
| 332 |
if param.device == torch.device("meta"):
|
src/commands/config/config_args.py
CHANGED
|
@@ -47,6 +47,7 @@ class BaseConfig:
|
|
| 47 |
mixed_precision: str
|
| 48 |
use_cpu: bool
|
| 49 |
debug: bool
|
|
|
|
| 50 |
def to_dict(self):
|
| 51 |
result = self.__dict__
|
| 52 |
# For serialization, it's best to convert Enums to strings (or their underlying value type).
|
|
@@ -58,6 +59,7 @@ class BaseConfig:
|
|
| 58 |
result = {k: v for k, v in result.items() if v is not None}
|
| 59 |
return result
|
| 60 |
@classmethod
|
|
|
|
| 61 |
def from_json_file(cls, json_file=None):
|
| 62 |
json_file = default_json_config_file if json_file is None else json_file
|
| 63 |
with open(json_file, "r", encoding="utf-8") as f:
|
|
@@ -82,11 +84,13 @@ class BaseConfig:
|
|
| 82 |
" version or fix (and potentially remove) these keys from your config file."
|
| 83 |
)
|
| 84 |
return cls(**config_dict)
|
|
|
|
| 85 |
def to_json_file(self, json_file):
|
| 86 |
with open(json_file, "w", encoding="utf-8") as f:
|
| 87 |
content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
| 88 |
f.write(content)
|
| 89 |
@classmethod
|
|
|
|
| 90 |
def from_yaml_file(cls, yaml_file=None):
|
| 91 |
yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
|
| 92 |
with open(yaml_file, "r", encoding="utf-8") as f:
|
|
@@ -113,9 +117,11 @@ class BaseConfig:
|
|
| 113 |
" version or fix (and potentially remove) these keys from your config file."
|
| 114 |
)
|
| 115 |
return cls(**config_dict)
|
|
|
|
| 116 |
def to_yaml_file(self, yaml_file):
|
| 117 |
with open(yaml_file, "w", encoding="utf-8") as f:
|
| 118 |
yaml.safe_dump(self.to_dict(), f)
|
|
|
|
| 119 |
def __post_init__(self):
|
| 120 |
if isinstance(self.compute_environment, str):
|
| 121 |
self.compute_environment = ComputeEnvironment(self.compute_environment)
|
|
@@ -158,6 +164,7 @@ class ClusterConfig(BaseConfig):
|
|
| 158 |
tpu_env: List[str] = None
|
| 159 |
# args for dynamo
|
| 160 |
dynamo_config: dict = None
|
|
|
|
| 161 |
def __post_init__(self):
|
| 162 |
if self.deepspeed_config is None:
|
| 163 |
self.deepspeed_config = {}
|
|
|
|
| 47 |
mixed_precision: str
|
| 48 |
use_cpu: bool
|
| 49 |
debug: bool
|
| 50 |
+
|
| 51 |
def to_dict(self):
|
| 52 |
result = self.__dict__
|
| 53 |
# For serialization, it's best to convert Enums to strings (or their underlying value type).
|
|
|
|
| 59 |
result = {k: v for k, v in result.items() if v is not None}
|
| 60 |
return result
|
| 61 |
@classmethod
|
| 62 |
+
|
| 63 |
def from_json_file(cls, json_file=None):
|
| 64 |
json_file = default_json_config_file if json_file is None else json_file
|
| 65 |
with open(json_file, "r", encoding="utf-8") as f:
|
|
|
|
| 84 |
" version or fix (and potentially remove) these keys from your config file."
|
| 85 |
)
|
| 86 |
return cls(**config_dict)
|
| 87 |
+
|
| 88 |
def to_json_file(self, json_file):
|
| 89 |
with open(json_file, "w", encoding="utf-8") as f:
|
| 90 |
content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
| 91 |
f.write(content)
|
| 92 |
@classmethod
|
| 93 |
+
|
| 94 |
def from_yaml_file(cls, yaml_file=None):
|
| 95 |
yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
|
| 96 |
with open(yaml_file, "r", encoding="utf-8") as f:
|
|
|
|
| 117 |
" version or fix (and potentially remove) these keys from your config file."
|
| 118 |
)
|
| 119 |
return cls(**config_dict)
|
| 120 |
+
|
| 121 |
def to_yaml_file(self, yaml_file):
|
| 122 |
with open(yaml_file, "w", encoding="utf-8") as f:
|
| 123 |
yaml.safe_dump(self.to_dict(), f)
|
| 124 |
+
|
| 125 |
def __post_init__(self):
|
| 126 |
if isinstance(self.compute_environment, str):
|
| 127 |
self.compute_environment = ComputeEnvironment(self.compute_environment)
|
|
|
|
| 164 |
tpu_env: List[str] = None
|
| 165 |
# args for dynamo
|
| 166 |
dynamo_config: dict = None
|
| 167 |
+
|
| 168 |
def __post_init__(self):
|
| 169 |
if self.deepspeed_config is None:
|
| 170 |
self.deepspeed_config = {}
|
src/commands/config/config_utils.py
CHANGED
|
@@ -49,6 +49,7 @@ class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
|
|
| 49 |
"""
|
| 50 |
A custom formatter that will remove the usage line from the help message for subcommands.
|
| 51 |
"""
|
|
|
|
| 52 |
def _format_usage(self, usage, actions, groups, prefix):
|
| 53 |
usage = super()._format_usage(usage, actions, groups, prefix)
|
| 54 |
usage = usage.replace("<command> [<args>] ", "")
|
|
|
|
| 49 |
"""
|
| 50 |
A custom formatter that will remove the usage line from the help message for subcommands.
|
| 51 |
"""
|
| 52 |
+
|
| 53 |
def _format_usage(self, usage, actions, groups, prefix):
|
| 54 |
usage = super()._format_usage(usage, actions, groups, prefix)
|
| 55 |
usage = usage.replace("<command> [<args>] ", "")
|
src/commands/estimate.py
CHANGED
|
@@ -100,6 +100,7 @@ def create_ascii_table(headers: list, rows: list, title: str):
|
|
| 100 |
formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
|
| 101 |
pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
|
| 102 |
diff = 0
|
|
|
|
| 103 |
def make_row(left_char, middle_char, right_char):
|
| 104 |
return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
|
| 105 |
separator = make_row("├", "┼", "┤")
|
|
|
|
| 100 |
formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
|
| 101 |
pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
|
| 102 |
diff = 0
|
| 103 |
+
|
| 104 |
def make_row(left_char, middle_char, right_char):
|
| 105 |
return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
|
| 106 |
separator = make_row("├", "┼", "┤")
|
src/commands/launch.py
CHANGED
|
@@ -17,6 +17,7 @@ class _CustomHelpAction(argparse._HelpAction):
|
|
| 17 |
called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
|
| 18 |
for that platform.
|
| 19 |
"""
|
|
|
|
| 20 |
def __call__(self, parser, namespace, values, option_string=None):
|
| 21 |
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
|
| 22 |
args = sys.argv[2:]
|
|
|
|
| 17 |
called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
|
| 18 |
for that platform.
|
| 19 |
"""
|
| 20 |
+
|
| 21 |
def __call__(self, parser, namespace, values, option_string=None):
|
| 22 |
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
|
| 23 |
args = sys.argv[2:]
|
src/data_loader.py
CHANGED
|
@@ -29,10 +29,12 @@ class SeedableRandomSampler(RandomSampler):
|
|
| 29 |
If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
|
| 30 |
(stored in `self.epoch`).
|
| 31 |
"""
|
|
|
|
| 32 |
def __init__(self, *args, **kwargs):
|
| 33 |
super().__init__(*args, **kwargs)
|
| 34 |
self.epoch = 0
|
| 35 |
self.seed = torch.random.initial_seed()
|
|
|
|
| 36 |
def __iter__(self):
|
| 37 |
if self.generator is None:
|
| 38 |
self.generator = torch.Generator()
|
|
@@ -43,6 +45,7 @@ class SeedableRandomSampler(RandomSampler):
|
|
| 43 |
self.generator.manual_seed(seed)
|
| 44 |
yield from super().__iter__()
|
| 45 |
self.set_epoch(self.epoch + 1)
|
|
|
|
| 46 |
def set_epoch(self, epoch: int):
|
| 47 |
"Sets the current iteration of the sampler."
|
| 48 |
self.epoch = epoch
|
|
@@ -74,6 +77,7 @@ class BatchSamplerShard(BatchSampler):
|
|
| 74 |
`BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
|
| 75 |
equal to `False`
|
| 76 |
</Tip>"""
|
|
|
|
| 77 |
def __init__(
|
| 78 |
self,
|
| 79 |
batch_sampler: BatchSampler,
|
|
@@ -100,8 +104,10 @@ class BatchSamplerShard(BatchSampler):
|
|
| 100 |
"are not calling this method directly, set `accelerator.even_batches=False` instead."
|
| 101 |
)
|
| 102 |
@property
|
|
|
|
| 103 |
def total_length(self):
|
| 104 |
return len(self.batch_sampler)
|
|
|
|
| 105 |
def __len__(self):
|
| 106 |
if self.split_batches:
|
| 107 |
# Split batches does not change the length of the batch sampler
|
|
@@ -119,8 +125,10 @@ class BatchSamplerShard(BatchSampler):
|
|
| 119 |
else:
|
| 120 |
# Otherwise it depends on the process index.
|
| 121 |
return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
|
|
|
|
| 122 |
def __iter__(self):
|
| 123 |
return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
|
|
|
|
| 124 |
def _iter_with_split(self):
|
| 125 |
initial_data = []
|
| 126 |
batch_length = self.batch_sampler.batch_size // self.num_processes
|
|
@@ -141,6 +149,7 @@ class BatchSamplerShard(BatchSampler):
|
|
| 141 |
initial_data += initial_data
|
| 142 |
batch = batch + initial_data
|
| 143 |
yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
|
|
|
|
| 144 |
def _iter_with_no_split(self):
|
| 145 |
initial_data = []
|
| 146 |
batch_to_yield = []
|
|
@@ -212,6 +221,7 @@ class IterableDatasetShard(IterableDataset):
|
|
| 212 |
- the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
|
| 213 |
this argument is set to `True`.
|
| 214 |
"""
|
|
|
|
| 215 |
def __init__(
|
| 216 |
self,
|
| 217 |
dataset: IterableDataset,
|
|
@@ -232,16 +242,19 @@ class IterableDatasetShard(IterableDataset):
|
|
| 232 |
self.num_processes = num_processes
|
| 233 |
self.process_index = process_index
|
| 234 |
self.split_batches = split_batches
|
|
|
|
| 235 |
def set_epoch(self, epoch):
|
| 236 |
self.epoch = epoch
|
| 237 |
if hasattr(self.dataset, "set_epoch"):
|
| 238 |
self.dataset.set_epoch(epoch)
|
|
|
|
| 239 |
def __len__(self):
|
| 240 |
# We will just raise the downstream error if the underlying dataset is not sized
|
| 241 |
if self.drop_last:
|
| 242 |
return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
|
| 243 |
else:
|
| 244 |
return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
|
|
|
|
| 245 |
def __iter__(self):
|
| 246 |
if (
|
| 247 |
not hasattr(self.dataset, "set_epoch")
|
|
@@ -281,12 +294,15 @@ class DataLoaderStateMixin:
|
|
| 281 |
- **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
|
| 282 |
batch size
|
| 283 |
"""
|
|
|
|
| 284 |
def __init_subclass__(cls, **kwargs):
|
| 285 |
cls.end_of_dataloader = False
|
| 286 |
cls.remainder = -1
|
|
|
|
| 287 |
def reset(self):
|
| 288 |
self.end_of_dataloader = False
|
| 289 |
self.remainder = -1
|
|
|
|
| 290 |
def begin(self):
|
| 291 |
"Prepares the gradient state for the current dataloader"
|
| 292 |
self.reset()
|
|
@@ -295,6 +311,7 @@ class DataLoaderStateMixin:
|
|
| 295 |
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
| 296 |
self.remainder = length % self.total_batch_size
|
| 297 |
self.gradient_state._add_dataloader(self)
|
|
|
|
| 298 |
def end(self):
|
| 299 |
"Cleans up the gradient state after exiting the dataloader"
|
| 300 |
self.gradient_state._remove_dataloader(self)
|
|
@@ -325,6 +342,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
|
| 325 |
number of processes
|
| 326 |
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
| 327 |
"""
|
|
|
|
| 328 |
def __init__(
|
| 329 |
self,
|
| 330 |
dataset,
|
|
@@ -343,6 +361,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
|
| 343 |
self.gradient_state = GradientState()
|
| 344 |
self._drop_last = _drop_last
|
| 345 |
self.iteration = 0
|
|
|
|
| 346 |
def __iter__(self):
|
| 347 |
if self.rng_types is not None:
|
| 348 |
synchronize_rng_states(self.rng_types, self.synchronized_generator)
|
|
@@ -372,6 +391,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
|
| 372 |
break
|
| 373 |
self.iteration += 1
|
| 374 |
self.end()
|
|
|
|
| 375 |
def set_epoch(self, epoch: int):
|
| 376 |
# In case it is manually passed in, the user can set it to what they like
|
| 377 |
if self.iteration != epoch:
|
|
@@ -383,6 +403,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
|
| 383 |
elif hasattr(self.dataset, "set_epoch"):
|
| 384 |
self.dataset.set_epoch(epoch)
|
| 385 |
@property
|
|
|
|
| 386 |
def total_batch_size(self):
|
| 387 |
batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
|
| 388 |
return (
|
|
@@ -391,6 +412,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
|
| 391 |
else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
|
| 392 |
)
|
| 393 |
@property
|
|
|
|
| 394 |
def total_dataset_length(self):
|
| 395 |
if hasattr(self.dataset, "total_length"):
|
| 396 |
return self.dataset.total_length
|
|
@@ -410,22 +432,27 @@ if is_tpu_available(check_device=False):
|
|
| 410 |
number of processes
|
| 411 |
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
| 412 |
"""
|
| 413 |
-
|
|
|
|
| 414 |
super().__init__(dataloader, device)
|
| 415 |
self._rng_types = self._loader.rng_types
|
| 416 |
self._loader.rng_types = None
|
| 417 |
-
|
|
|
|
| 418 |
if self._rng_types is not None:
|
| 419 |
synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
|
| 420 |
return super().__iter__()
|
| 421 |
@property
|
| 422 |
-
|
|
|
|
| 423 |
return self._loader.total_batch_size
|
| 424 |
@property
|
| 425 |
-
|
|
|
|
| 426 |
return self._loader.total_dataset_length
|
| 427 |
@property
|
| 428 |
-
|
|
|
|
| 429 |
return self._loader.batch_sampler
|
| 430 |
class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
| 431 |
"""
|
|
@@ -447,6 +474,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
|
| 447 |
number of processes
|
| 448 |
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
| 449 |
"""
|
|
|
|
| 450 |
def __init__(
|
| 451 |
self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
|
| 452 |
):
|
|
@@ -466,6 +494,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
|
| 466 |
self.skip_batches = skip_batches
|
| 467 |
self.slice_fn = slice_tensors if slice_fn is None else slice_fn
|
| 468 |
self.iteration = 0
|
|
|
|
| 469 |
def _fetch_batches(self, iterator):
|
| 470 |
batches, batch = None, None
|
| 471 |
# On process 0, we gather the batch to dispatch.
|
|
@@ -502,6 +531,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
|
| 502 |
batch_info = [None, True]
|
| 503 |
broadcast_object_list(batch_info)
|
| 504 |
return batch, batch_info
|
|
|
|
| 505 |
def __iter__(self):
|
| 506 |
self.begin()
|
| 507 |
self.set_epoch(self.iteration)
|
|
@@ -568,6 +598,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
|
| 568 |
batch_index += 1
|
| 569 |
self.iteration += 1
|
| 570 |
self.end()
|
|
|
|
| 571 |
def set_epoch(self, epoch: int):
|
| 572 |
# In case it is manually passed in, the user can set it to what they like
|
| 573 |
if self.iteration != epoch:
|
|
@@ -576,6 +607,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
|
| 576 |
self.batch_sampler.sampler.set_epoch(epoch)
|
| 577 |
elif hasattr(self.dataset, "set_epoch"):
|
| 578 |
self.dataset.set_epoch(epoch)
|
|
|
|
| 579 |
def __len__(self):
|
| 580 |
whole_length = super().__len__()
|
| 581 |
if self.split_batches:
|
|
@@ -585,11 +617,13 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
|
| 585 |
else:
|
| 586 |
return math.ceil(whole_length / self.state.num_processes)
|
| 587 |
@property
|
|
|
|
| 588 |
def total_batch_size(self):
|
| 589 |
return (
|
| 590 |
self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
|
| 591 |
)
|
| 592 |
@property
|
|
|
|
| 593 |
def total_dataset_length(self):
|
| 594 |
return len(self.dataset)
|
| 595 |
def prepare_data_loader(
|
|
@@ -783,16 +817,20 @@ class SkipBatchSampler(BatchSampler):
|
|
| 783 |
"""
|
| 784 |
A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
|
| 785 |
"""
|
|
|
|
| 786 |
def __init__(self, batch_sampler, skip_batches=0):
|
| 787 |
self.batch_sampler = batch_sampler
|
| 788 |
self.skip_batches = skip_batches
|
|
|
|
| 789 |
def __iter__(self):
|
| 790 |
for index, samples in enumerate(self.batch_sampler):
|
| 791 |
if index >= self.skip_batches:
|
| 792 |
yield samples
|
| 793 |
@property
|
|
|
|
| 794 |
def total_length(self):
|
| 795 |
return len(self.batch_sampler)
|
|
|
|
| 796 |
def __len__(self):
|
| 797 |
return len(self.batch_sampler) - self.skip_batches
|
| 798 |
class SkipDataLoader(DataLoader):
|
|
@@ -806,9 +844,11 @@ class SkipDataLoader(DataLoader):
|
|
| 806 |
kwargs:
|
| 807 |
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
| 808 |
"""
|
|
|
|
| 809 |
def __init__(self, dataset, skip_batches=0, **kwargs):
|
| 810 |
super().__init__(dataset, **kwargs)
|
| 811 |
self.skip_batches = skip_batches
|
|
|
|
| 812 |
def __iter__(self):
|
| 813 |
for index, batch in enumerate(super().__iter__()):
|
| 814 |
if index >= self.skip_batches:
|
|
|
|
| 29 |
If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
|
| 30 |
(stored in `self.epoch`).
|
| 31 |
"""
|
| 32 |
+
|
| 33 |
def __init__(self, *args, **kwargs):
|
| 34 |
super().__init__(*args, **kwargs)
|
| 35 |
self.epoch = 0
|
| 36 |
self.seed = torch.random.initial_seed()
|
| 37 |
+
|
| 38 |
def __iter__(self):
|
| 39 |
if self.generator is None:
|
| 40 |
self.generator = torch.Generator()
|
|
|
|
| 45 |
self.generator.manual_seed(seed)
|
| 46 |
yield from super().__iter__()
|
| 47 |
self.set_epoch(self.epoch + 1)
|
| 48 |
+
|
| 49 |
def set_epoch(self, epoch: int):
|
| 50 |
"Sets the current iteration of the sampler."
|
| 51 |
self.epoch = epoch
|
|
|
|
| 77 |
`BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
|
| 78 |
equal to `False`
|
| 79 |
</Tip>"""
|
| 80 |
+
|
| 81 |
def __init__(
|
| 82 |
self,
|
| 83 |
batch_sampler: BatchSampler,
|
|
|
|
| 104 |
"are not calling this method directly, set `accelerator.even_batches=False` instead."
|
| 105 |
)
|
| 106 |
@property
|
| 107 |
+
|
| 108 |
def total_length(self):
|
| 109 |
return len(self.batch_sampler)
|
| 110 |
+
|
| 111 |
def __len__(self):
|
| 112 |
if self.split_batches:
|
| 113 |
# Split batches does not change the length of the batch sampler
|
|
|
|
| 125 |
else:
|
| 126 |
# Otherwise it depends on the process index.
|
| 127 |
return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
|
| 128 |
+
|
| 129 |
def __iter__(self):
|
| 130 |
return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
|
| 131 |
+
|
| 132 |
def _iter_with_split(self):
|
| 133 |
initial_data = []
|
| 134 |
batch_length = self.batch_sampler.batch_size // self.num_processes
|
|
|
|
| 149 |
initial_data += initial_data
|
| 150 |
batch = batch + initial_data
|
| 151 |
yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
|
| 152 |
+
|
| 153 |
def _iter_with_no_split(self):
|
| 154 |
initial_data = []
|
| 155 |
batch_to_yield = []
|
|
|
|
| 221 |
- the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
|
| 222 |
this argument is set to `True`.
|
| 223 |
"""
|
| 224 |
+
|
| 225 |
def __init__(
|
| 226 |
self,
|
| 227 |
dataset: IterableDataset,
|
|
|
|
| 242 |
self.num_processes = num_processes
|
| 243 |
self.process_index = process_index
|
| 244 |
self.split_batches = split_batches
|
| 245 |
+
|
| 246 |
def set_epoch(self, epoch):
|
| 247 |
self.epoch = epoch
|
| 248 |
if hasattr(self.dataset, "set_epoch"):
|
| 249 |
self.dataset.set_epoch(epoch)
|
| 250 |
+
|
| 251 |
def __len__(self):
|
| 252 |
# We will just raise the downstream error if the underlying dataset is not sized
|
| 253 |
if self.drop_last:
|
| 254 |
return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
|
| 255 |
else:
|
| 256 |
return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
|
| 257 |
+
|
| 258 |
def __iter__(self):
|
| 259 |
if (
|
| 260 |
not hasattr(self.dataset, "set_epoch")
|
|
|
|
| 294 |
- **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
|
| 295 |
batch size
|
| 296 |
"""
|
| 297 |
+
|
| 298 |
def __init_subclass__(cls, **kwargs):
|
| 299 |
cls.end_of_dataloader = False
|
| 300 |
cls.remainder = -1
|
| 301 |
+
|
| 302 |
def reset(self):
|
| 303 |
self.end_of_dataloader = False
|
| 304 |
self.remainder = -1
|
| 305 |
+
|
| 306 |
def begin(self):
|
| 307 |
"Prepares the gradient state for the current dataloader"
|
| 308 |
self.reset()
|
|
|
|
| 311 |
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
| 312 |
self.remainder = length % self.total_batch_size
|
| 313 |
self.gradient_state._add_dataloader(self)
|
| 314 |
+
|
| 315 |
def end(self):
|
| 316 |
"Cleans up the gradient state after exiting the dataloader"
|
| 317 |
self.gradient_state._remove_dataloader(self)
|
|
|
|
| 342 |
number of processes
|
| 343 |
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
| 344 |
"""
|
| 345 |
+
|
| 346 |
def __init__(
|
| 347 |
self,
|
| 348 |
dataset,
|
|
|
|
| 361 |
self.gradient_state = GradientState()
|
| 362 |
self._drop_last = _drop_last
|
| 363 |
self.iteration = 0
|
| 364 |
+
|
| 365 |
def __iter__(self):
|
| 366 |
if self.rng_types is not None:
|
| 367 |
synchronize_rng_states(self.rng_types, self.synchronized_generator)
|
|
|
|
| 391 |
break
|
| 392 |
self.iteration += 1
|
| 393 |
self.end()
|
| 394 |
+
|
| 395 |
def set_epoch(self, epoch: int):
|
| 396 |
# In case it is manually passed in, the user can set it to what they like
|
| 397 |
if self.iteration != epoch:
|
|
|
|
| 403 |
elif hasattr(self.dataset, "set_epoch"):
|
| 404 |
self.dataset.set_epoch(epoch)
|
| 405 |
@property
|
| 406 |
+
|
| 407 |
def total_batch_size(self):
|
| 408 |
batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
|
| 409 |
return (
|
|
|
|
| 412 |
else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
|
| 413 |
)
|
| 414 |
@property
|
| 415 |
+
|
| 416 |
def total_dataset_length(self):
|
| 417 |
if hasattr(self.dataset, "total_length"):
|
| 418 |
return self.dataset.total_length
|
|
|
|
| 432 |
number of processes
|
| 433 |
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
| 434 |
"""
|
| 435 |
+
|
| 436 |
+
def __init__(self, dataloader: DataLoaderShard, device: torch.device):
|
| 437 |
super().__init__(dataloader, device)
|
| 438 |
self._rng_types = self._loader.rng_types
|
| 439 |
self._loader.rng_types = None
|
| 440 |
+
|
| 441 |
+
def __iter__(self):
|
| 442 |
if self._rng_types is not None:
|
| 443 |
synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
|
| 444 |
return super().__iter__()
|
| 445 |
@property
|
| 446 |
+
|
| 447 |
+
def total_batch_size(self):
|
| 448 |
return self._loader.total_batch_size
|
| 449 |
@property
|
| 450 |
+
|
| 451 |
+
def total_dataset_length(self):
|
| 452 |
return self._loader.total_dataset_length
|
| 453 |
@property
|
| 454 |
+
|
| 455 |
+
def batch_sampler(self):
|
| 456 |
return self._loader.batch_sampler
|
| 457 |
class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
| 458 |
"""
|
|
|
|
| 474 |
number of processes
|
| 475 |
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
| 476 |
"""
|
| 477 |
+
|
| 478 |
def __init__(
|
| 479 |
self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
|
| 480 |
):
|
|
|
|
| 494 |
self.skip_batches = skip_batches
|
| 495 |
self.slice_fn = slice_tensors if slice_fn is None else slice_fn
|
| 496 |
self.iteration = 0
|
| 497 |
+
|
| 498 |
def _fetch_batches(self, iterator):
|
| 499 |
batches, batch = None, None
|
| 500 |
# On process 0, we gather the batch to dispatch.
|
|
|
|
| 531 |
batch_info = [None, True]
|
| 532 |
broadcast_object_list(batch_info)
|
| 533 |
return batch, batch_info
|
| 534 |
+
|
| 535 |
def __iter__(self):
|
| 536 |
self.begin()
|
| 537 |
self.set_epoch(self.iteration)
|
|
|
|
| 598 |
batch_index += 1
|
| 599 |
self.iteration += 1
|
| 600 |
self.end()
|
| 601 |
+
|
| 602 |
def set_epoch(self, epoch: int):
|
| 603 |
# In case it is manually passed in, the user can set it to what they like
|
| 604 |
if self.iteration != epoch:
|
|
|
|
| 607 |
self.batch_sampler.sampler.set_epoch(epoch)
|
| 608 |
elif hasattr(self.dataset, "set_epoch"):
|
| 609 |
self.dataset.set_epoch(epoch)
|
| 610 |
+
|
| 611 |
def __len__(self):
|
| 612 |
whole_length = super().__len__()
|
| 613 |
if self.split_batches:
|
|
|
|
| 617 |
else:
|
| 618 |
return math.ceil(whole_length / self.state.num_processes)
|
| 619 |
@property
|
| 620 |
+
|
| 621 |
def total_batch_size(self):
|
| 622 |
return (
|
| 623 |
self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
|
| 624 |
)
|
| 625 |
@property
|
| 626 |
+
|
| 627 |
def total_dataset_length(self):
|
| 628 |
return len(self.dataset)
|
| 629 |
def prepare_data_loader(
|
|
|
|
| 817 |
"""
|
| 818 |
A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
|
| 819 |
"""
|
| 820 |
+
|
| 821 |
def __init__(self, batch_sampler, skip_batches=0):
|
| 822 |
self.batch_sampler = batch_sampler
|
| 823 |
self.skip_batches = skip_batches
|
| 824 |
+
|
| 825 |
def __iter__(self):
|
| 826 |
for index, samples in enumerate(self.batch_sampler):
|
| 827 |
if index >= self.skip_batches:
|
| 828 |
yield samples
|
| 829 |
@property
|
| 830 |
+
|
| 831 |
def total_length(self):
|
| 832 |
return len(self.batch_sampler)
|
| 833 |
+
|
| 834 |
def __len__(self):
|
| 835 |
return len(self.batch_sampler) - self.skip_batches
|
| 836 |
class SkipDataLoader(DataLoader):
|
|
|
|
| 844 |
kwargs:
|
| 845 |
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
| 846 |
"""
|
| 847 |
+
|
| 848 |
def __init__(self, dataset, skip_batches=0, **kwargs):
|
| 849 |
super().__init__(dataset, **kwargs)
|
| 850 |
self.skip_batches = skip_batches
|
| 851 |
+
|
| 852 |
def __iter__(self):
|
| 853 |
for index, batch in enumerate(super().__iter__()):
|
| 854 |
if index >= self.skip_batches:
|
src/hooks.py
CHANGED
|
@@ -7,6 +7,7 @@ class ModelHook:
|
|
| 7 |
the `torch.no_grad()` context manager.
|
| 8 |
"""
|
| 9 |
no_grad = False
|
|
|
|
| 10 |
def init_hook(self, module):
|
| 11 |
"""
|
| 12 |
To be executed when the hook is attached to the module.
|
|
@@ -14,6 +15,7 @@ class ModelHook:
|
|
| 14 |
module (`torch.nn.Module`): The module attached to this hook.
|
| 15 |
"""
|
| 16 |
return module
|
|
|
|
| 17 |
def pre_forward(self, module, *args, **kwargs):
|
| 18 |
"""
|
| 19 |
To be executed just before the forward method of the model.
|
|
@@ -25,6 +27,7 @@ class ModelHook:
|
|
| 25 |
`Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
|
| 26 |
"""
|
| 27 |
return args, kwargs
|
|
|
|
| 28 |
def post_forward(self, module, output):
|
| 29 |
"""
|
| 30 |
To be executed just after the forward method of the model.
|
|
@@ -35,6 +38,7 @@ class ModelHook:
|
|
| 35 |
`Any`: The processed `output`.
|
| 36 |
"""
|
| 37 |
return output
|
|
|
|
| 38 |
def detach_hook(self, module):
|
| 39 |
"""
|
| 40 |
To be executed when the hook is detached from a module.
|
|
@@ -46,20 +50,25 @@ class SequentialHook(ModelHook):
|
|
| 46 |
"""
|
| 47 |
A hook that can contain several hooks and iterates through them at each event.
|
| 48 |
"""
|
|
|
|
| 49 |
def __init__(self, *hooks):
|
| 50 |
self.hooks = hooks
|
|
|
|
| 51 |
def init_hook(self, module):
|
| 52 |
for hook in self.hooks:
|
| 53 |
module = hook.init_hook(module)
|
| 54 |
return module
|
|
|
|
| 55 |
def pre_forward(self, module, *args, **kwargs):
|
| 56 |
for hook in self.hooks:
|
| 57 |
args, kwargs = hook.pre_forward(module, *args, **kwargs)
|
| 58 |
return args, kwargs
|
|
|
|
| 59 |
def post_forward(self, module, output):
|
| 60 |
for hook in self.hooks:
|
| 61 |
output = hook.post_forward(module, output)
|
| 62 |
return output
|
|
|
|
| 63 |
def detach_hook(self, module):
|
| 64 |
for hook in self.hooks:
|
| 65 |
module = hook.detach_hook(module)
|
|
@@ -95,6 +104,7 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
|
|
| 95 |
module._old_forward = old_forward
|
| 96 |
module = hook.init_hook(module)
|
| 97 |
module._hf_hook = hook
|
|
|
|
| 98 |
def new_forward(module, *args, **kwargs):
|
| 99 |
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
|
| 100 |
if module._hf_hook.no_grad:
|
|
@@ -143,6 +153,7 @@ class AlignDevicesHook(ModelHook):
|
|
| 143 |
place_submodules (`bool`, *optional*, defaults to `False`):
|
| 144 |
Whether to place the submodules on `execution_device` during the `init_hook` event.
|
| 145 |
"""
|
|
|
|
| 146 |
def __init__(
|
| 147 |
self,
|
| 148 |
execution_device: Optional[Union[int, str, torch.device]] = None,
|
|
@@ -164,12 +175,14 @@ class AlignDevicesHook(ModelHook):
|
|
| 164 |
self.input_device = None
|
| 165 |
self.param_original_devices = {}
|
| 166 |
self.buffer_original_devices = {}
|
|
|
|
| 167 |
def __repr__(self):
|
| 168 |
return (
|
| 169 |
f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
|
| 170 |
f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
|
| 171 |
f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
|
| 172 |
)
|
|
|
|
| 173 |
def init_hook(self, module):
|
| 174 |
if not self.offload and self.execution_device is not None:
|
| 175 |
for name, _ in named_module_tensors(module, recurse=self.place_submodules):
|
|
@@ -196,6 +209,7 @@ class AlignDevicesHook(ModelHook):
|
|
| 196 |
for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
|
| 197 |
set_module_tensor_to_device(module, name, self.execution_device)
|
| 198 |
return module
|
|
|
|
| 199 |
def pre_forward(self, module, *args, **kwargs):
|
| 200 |
if self.io_same_device:
|
| 201 |
self.input_device = find_device([args, kwargs])
|
|
@@ -216,6 +230,7 @@ class AlignDevicesHook(ModelHook):
|
|
| 216 |
return send_to_device(args, self.execution_device), send_to_device(
|
| 217 |
kwargs, self.execution_device, skip_keys=self.skip_keys
|
| 218 |
)
|
|
|
|
| 219 |
def post_forward(self, module, output):
|
| 220 |
if self.offload:
|
| 221 |
for name, _ in named_module_tensors(
|
|
@@ -231,6 +246,7 @@ class AlignDevicesHook(ModelHook):
|
|
| 231 |
if self.io_same_device and self.input_device is not None:
|
| 232 |
output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
|
| 233 |
return output
|
|
|
|
| 234 |
def detach_hook(self, module):
|
| 235 |
if self.offload:
|
| 236 |
for name, device in self.original_devices.items():
|
|
@@ -462,6 +478,7 @@ class CpuOffload(ModelHook):
|
|
| 462 |
passed, its offload method will be called just before the forward of the model to which this hook is
|
| 463 |
attached.
|
| 464 |
"""
|
|
|
|
| 465 |
def __init__(
|
| 466 |
self,
|
| 467 |
execution_device: Optional[Union[str, int, torch.device]] = None,
|
|
@@ -469,8 +486,10 @@ class CpuOffload(ModelHook):
|
|
| 469 |
):
|
| 470 |
self.prev_module_hook = prev_module_hook
|
| 471 |
self.execution_device = execution_device if execution_device is not None else PartialState().default_device
|
|
|
|
| 472 |
def init_hook(self, module):
|
| 473 |
return module.to("cpu")
|
|
|
|
| 474 |
def pre_forward(self, module, *args, **kwargs):
|
| 475 |
if self.prev_module_hook is not None:
|
| 476 |
self.prev_module_hook.offload()
|
|
@@ -481,10 +500,13 @@ class UserCpuOffloadHook:
|
|
| 481 |
A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
|
| 482 |
or remove it entirely.
|
| 483 |
"""
|
|
|
|
| 484 |
def __init__(self, model, hook):
|
| 485 |
self.model = model
|
| 486 |
self.hook = hook
|
|
|
|
| 487 |
def offload(self):
|
| 488 |
self.hook.init_hook(self.model)
|
|
|
|
| 489 |
def remove(self):
|
| 490 |
remove_hook_from_module(self.model)
|
|
|
|
| 7 |
the `torch.no_grad()` context manager.
|
| 8 |
"""
|
| 9 |
no_grad = False
|
| 10 |
+
|
| 11 |
def init_hook(self, module):
|
| 12 |
"""
|
| 13 |
To be executed when the hook is attached to the module.
|
|
|
|
| 15 |
module (`torch.nn.Module`): The module attached to this hook.
|
| 16 |
"""
|
| 17 |
return module
|
| 18 |
+
|
| 19 |
def pre_forward(self, module, *args, **kwargs):
|
| 20 |
"""
|
| 21 |
To be executed just before the forward method of the model.
|
|
|
|
| 27 |
`Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
|
| 28 |
"""
|
| 29 |
return args, kwargs
|
| 30 |
+
|
| 31 |
def post_forward(self, module, output):
|
| 32 |
"""
|
| 33 |
To be executed just after the forward method of the model.
|
|
|
|
| 38 |
`Any`: The processed `output`.
|
| 39 |
"""
|
| 40 |
return output
|
| 41 |
+
|
| 42 |
def detach_hook(self, module):
|
| 43 |
"""
|
| 44 |
To be executed when the hook is detached from a module.
|
|
|
|
| 50 |
"""
|
| 51 |
A hook that can contain several hooks and iterates through them at each event.
|
| 52 |
"""
|
| 53 |
+
|
| 54 |
def __init__(self, *hooks):
|
| 55 |
self.hooks = hooks
|
| 56 |
+
|
| 57 |
def init_hook(self, module):
|
| 58 |
for hook in self.hooks:
|
| 59 |
module = hook.init_hook(module)
|
| 60 |
return module
|
| 61 |
+
|
| 62 |
def pre_forward(self, module, *args, **kwargs):
|
| 63 |
for hook in self.hooks:
|
| 64 |
args, kwargs = hook.pre_forward(module, *args, **kwargs)
|
| 65 |
return args, kwargs
|
| 66 |
+
|
| 67 |
def post_forward(self, module, output):
|
| 68 |
for hook in self.hooks:
|
| 69 |
output = hook.post_forward(module, output)
|
| 70 |
return output
|
| 71 |
+
|
| 72 |
def detach_hook(self, module):
|
| 73 |
for hook in self.hooks:
|
| 74 |
module = hook.detach_hook(module)
|
|
|
|
| 104 |
module._old_forward = old_forward
|
| 105 |
module = hook.init_hook(module)
|
| 106 |
module._hf_hook = hook
|
| 107 |
+
|
| 108 |
def new_forward(module, *args, **kwargs):
|
| 109 |
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
|
| 110 |
if module._hf_hook.no_grad:
|
|
|
|
| 153 |
place_submodules (`bool`, *optional*, defaults to `False`):
|
| 154 |
Whether to place the submodules on `execution_device` during the `init_hook` event.
|
| 155 |
"""
|
| 156 |
+
|
| 157 |
def __init__(
|
| 158 |
self,
|
| 159 |
execution_device: Optional[Union[int, str, torch.device]] = None,
|
|
|
|
| 175 |
self.input_device = None
|
| 176 |
self.param_original_devices = {}
|
| 177 |
self.buffer_original_devices = {}
|
| 178 |
+
|
| 179 |
def __repr__(self):
|
| 180 |
return (
|
| 181 |
f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
|
| 182 |
f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
|
| 183 |
f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
|
| 184 |
)
|
| 185 |
+
|
| 186 |
def init_hook(self, module):
|
| 187 |
if not self.offload and self.execution_device is not None:
|
| 188 |
for name, _ in named_module_tensors(module, recurse=self.place_submodules):
|
|
|
|
| 209 |
for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
|
| 210 |
set_module_tensor_to_device(module, name, self.execution_device)
|
| 211 |
return module
|
| 212 |
+
|
| 213 |
def pre_forward(self, module, *args, **kwargs):
|
| 214 |
if self.io_same_device:
|
| 215 |
self.input_device = find_device([args, kwargs])
|
|
|
|
| 230 |
return send_to_device(args, self.execution_device), send_to_device(
|
| 231 |
kwargs, self.execution_device, skip_keys=self.skip_keys
|
| 232 |
)
|
| 233 |
+
|
| 234 |
def post_forward(self, module, output):
|
| 235 |
if self.offload:
|
| 236 |
for name, _ in named_module_tensors(
|
|
|
|
| 246 |
if self.io_same_device and self.input_device is not None:
|
| 247 |
output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
|
| 248 |
return output
|
| 249 |
+
|
| 250 |
def detach_hook(self, module):
|
| 251 |
if self.offload:
|
| 252 |
for name, device in self.original_devices.items():
|
|
|
|
| 478 |
passed, its offload method will be called just before the forward of the model to which this hook is
|
| 479 |
attached.
|
| 480 |
"""
|
| 481 |
+
|
| 482 |
def __init__(
|
| 483 |
self,
|
| 484 |
execution_device: Optional[Union[str, int, torch.device]] = None,
|
|
|
|
| 486 |
):
|
| 487 |
self.prev_module_hook = prev_module_hook
|
| 488 |
self.execution_device = execution_device if execution_device is not None else PartialState().default_device
|
| 489 |
+
|
| 490 |
def init_hook(self, module):
|
| 491 |
return module.to("cpu")
|
| 492 |
+
|
| 493 |
def pre_forward(self, module, *args, **kwargs):
|
| 494 |
if self.prev_module_hook is not None:
|
| 495 |
self.prev_module_hook.offload()
|
|
|
|
| 500 |
A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
|
| 501 |
or remove it entirely.
|
| 502 |
"""
|
| 503 |
+
|
| 504 |
def __init__(self, model, hook):
|
| 505 |
self.model = model
|
| 506 |
self.hook = hook
|
| 507 |
+
|
| 508 |
def offload(self):
|
| 509 |
self.hook.init_hook(self.model)
|
| 510 |
+
|
| 511 |
def remove(self):
|
| 512 |
remove_hook_from_module(self.model)
|
src/launchers.py
CHANGED
|
@@ -43,6 +43,7 @@ def notebook_launcher(
|
|
| 43 |
```python
|
| 44 |
# Assume this is defined in a Jupyter Notebook on an instance with two GPUs
|
| 45 |
from accelerate import notebook_launcher
|
|
|
|
| 46 |
def train(*args):
|
| 47 |
# Your training function here
|
| 48 |
...
|
|
|
|
| 43 |
```python
|
| 44 |
# Assume this is defined in a Jupyter Notebook on an instance with two GPUs
|
| 45 |
from accelerate import notebook_launcher
|
| 46 |
+
|
| 47 |
def train(*args):
|
| 48 |
# Your training function here
|
| 49 |
...
|
src/local_sgd.py
CHANGED
|
@@ -12,16 +12,19 @@ class LocalSGD:
|
|
| 12 |
Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
|
| 13 |
Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
|
| 14 |
"""
|
|
|
|
| 15 |
def __enter__(self):
|
| 16 |
if self.enabled:
|
| 17 |
self.model_sync_obj = self.model.no_sync()
|
| 18 |
self.model_sync_obj.__enter__()
|
| 19 |
return self
|
|
|
|
| 20 |
def __exit__(self, type, value, tb):
|
| 21 |
if self.enabled:
|
| 22 |
# Average all models on exit
|
| 23 |
self._sync_and_avg_model_params()
|
| 24 |
self.model_sync_obj.__exit__(type, value, tb)
|
|
|
|
| 25 |
def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
|
| 26 |
"""
|
| 27 |
Constructor.
|
|
@@ -47,6 +50,7 @@ class LocalSGD:
|
|
| 47 |
self.accelerator = accelerator
|
| 48 |
self.model = model
|
| 49 |
self.local_sgd_steps = local_sgd_steps
|
|
|
|
| 50 |
def step(self):
|
| 51 |
"""
|
| 52 |
This function makes a "step" and synchronizes model parameters if necessary.
|
|
@@ -56,6 +60,7 @@ class LocalSGD:
|
|
| 56 |
return
|
| 57 |
if self.num_steps % self.local_sgd_steps == 0:
|
| 58 |
self._sync_and_avg_model_params()
|
|
|
|
| 59 |
def _sync_and_avg_model_params(self):
|
| 60 |
"""
|
| 61 |
Synchronize + Average model parameters across all GPUs
|
|
|
|
| 12 |
Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
|
| 13 |
Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
|
| 14 |
"""
|
| 15 |
+
|
| 16 |
def __enter__(self):
|
| 17 |
if self.enabled:
|
| 18 |
self.model_sync_obj = self.model.no_sync()
|
| 19 |
self.model_sync_obj.__enter__()
|
| 20 |
return self
|
| 21 |
+
|
| 22 |
def __exit__(self, type, value, tb):
|
| 23 |
if self.enabled:
|
| 24 |
# Average all models on exit
|
| 25 |
self._sync_and_avg_model_params()
|
| 26 |
self.model_sync_obj.__exit__(type, value, tb)
|
| 27 |
+
|
| 28 |
def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
|
| 29 |
"""
|
| 30 |
Constructor.
|
|
|
|
| 50 |
self.accelerator = accelerator
|
| 51 |
self.model = model
|
| 52 |
self.local_sgd_steps = local_sgd_steps
|
| 53 |
+
|
| 54 |
def step(self):
|
| 55 |
"""
|
| 56 |
This function makes a "step" and synchronizes model parameters if necessary.
|
|
|
|
| 60 |
return
|
| 61 |
if self.num_steps % self.local_sgd_steps == 0:
|
| 62 |
self._sync_and_avg_model_params()
|
| 63 |
+
|
| 64 |
def _sync_and_avg_model_params(self):
|
| 65 |
"""
|
| 66 |
Synchronize + Average model parameters across all GPUs
|
src/logging.py
CHANGED
|
@@ -6,10 +6,12 @@ class MultiProcessAdapter(logging.LoggerAdapter):
|
|
| 6 |
Does not require an `Accelerator` object to be created first.
|
| 7 |
"""
|
| 8 |
@staticmethod
|
|
|
|
| 9 |
def _should_log(main_process_only):
|
| 10 |
"Check if log should be performed"
|
| 11 |
state = PartialState()
|
| 12 |
return not main_process_only or (main_process_only and state.is_main_process)
|
|
|
|
| 13 |
def log(self, level, msg, *args, **kwargs):
|
| 14 |
"""
|
| 15 |
Delegates logger call after checking if we should log.
|
|
@@ -38,6 +40,7 @@ class MultiProcessAdapter(logging.LoggerAdapter):
|
|
| 38 |
self.logger.log(level, msg, *args, **kwargs)
|
| 39 |
state.wait_for_everyone()
|
| 40 |
@functools.lru_cache(None)
|
|
|
|
| 41 |
def warning_once(self, *args, **kwargs):
|
| 42 |
"""
|
| 43 |
This method is identical to `logger.warning()`, but will emit the warning with the same message only once
|
|
|
|
| 6 |
Does not require an `Accelerator` object to be created first.
|
| 7 |
"""
|
| 8 |
@staticmethod
|
| 9 |
+
|
| 10 |
def _should_log(main_process_only):
|
| 11 |
"Check if log should be performed"
|
| 12 |
state = PartialState()
|
| 13 |
return not main_process_only or (main_process_only and state.is_main_process)
|
| 14 |
+
|
| 15 |
def log(self, level, msg, *args, **kwargs):
|
| 16 |
"""
|
| 17 |
Delegates logger call after checking if we should log.
|
|
|
|
| 40 |
self.logger.log(level, msg, *args, **kwargs)
|
| 41 |
state.wait_for_everyone()
|
| 42 |
@functools.lru_cache(None)
|
| 43 |
+
|
| 44 |
def warning_once(self, *args, **kwargs):
|
| 45 |
"""
|
| 46 |
This method is identical to `logger.warning()`, but will emit the warning with the same message only once
|
src/optimizer.py
CHANGED
|
@@ -20,6 +20,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
|
| 20 |
scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
|
| 21 |
The scaler to use in the step function if training with mixed precision.
|
| 22 |
"""
|
|
|
|
| 23 |
def __init__(self, optimizer, device_placement=True, scaler=None):
|
| 24 |
self.optimizer = optimizer
|
| 25 |
self.scaler = scaler
|
|
@@ -40,31 +41,41 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
|
| 40 |
state_dict = move_to_device(state_dict, self.accelerator_state.device)
|
| 41 |
self.optimizer.load_state_dict(state_dict)
|
| 42 |
@property
|
|
|
|
| 43 |
def state(self):
|
| 44 |
return self.optimizer.state
|
| 45 |
@state.setter
|
|
|
|
| 46 |
def state(self, state):
|
| 47 |
self.optimizer.state = state
|
| 48 |
@property
|
|
|
|
| 49 |
def param_groups(self):
|
| 50 |
return self.optimizer.param_groups
|
| 51 |
@param_groups.setter
|
|
|
|
| 52 |
def param_groups(self, param_groups):
|
| 53 |
self.optimizer.param_groups = param_groups
|
| 54 |
@property
|
|
|
|
| 55 |
def defaults(self):
|
| 56 |
return self.optimizer.defaults
|
| 57 |
@defaults.setter
|
|
|
|
| 58 |
def defaults(self, defaults):
|
| 59 |
self.optimizer.defaults = defaults
|
|
|
|
| 60 |
def add_param_group(self, param_group):
|
| 61 |
self.optimizer.add_param_group(param_group)
|
|
|
|
| 62 |
def load_state_dict(self, state_dict):
|
| 63 |
if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:
|
| 64 |
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
|
| 65 |
self.optimizer.load_state_dict(state_dict)
|
|
|
|
| 66 |
def state_dict(self):
|
| 67 |
return self.optimizer.state_dict()
|
|
|
|
| 68 |
def zero_grad(self, set_to_none=None):
|
| 69 |
if self.gradient_state.sync_gradients:
|
| 70 |
accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
|
|
@@ -76,6 +87,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
|
| 76 |
if set_to_none is not None:
|
| 77 |
raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
|
| 78 |
self.optimizer.zero_grad()
|
|
|
|
| 79 |
def step(self, closure=None):
|
| 80 |
if self.gradient_state.sync_gradients:
|
| 81 |
if self.accelerator_state.distributed_type == DistributedType.TPU:
|
|
@@ -96,10 +108,12 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
|
| 96 |
self._accelerate_step_called = False
|
| 97 |
else:
|
| 98 |
self.optimizer.step(closure)
|
|
|
|
| 99 |
def _switch_parameters(self, parameters_map):
|
| 100 |
for param_group in self.optimizer.param_groups:
|
| 101 |
param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
|
| 102 |
@property
|
|
|
|
| 103 |
def is_overflow(self):
|
| 104 |
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
|
| 105 |
warnings.warn(
|
|
@@ -109,9 +123,11 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
|
| 109 |
)
|
| 110 |
return self._is_overflow
|
| 111 |
@property
|
|
|
|
| 112 |
def step_was_skipped(self):
|
| 113 |
"""Whether or not the optimizer step was skipped."""
|
| 114 |
return self._is_overflow
|
|
|
|
| 115 |
def __getstate__(self):
|
| 116 |
_ignored_keys = [
|
| 117 |
"_accelerate_step_called",
|
|
@@ -119,6 +135,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
|
| 119 |
"_optimizer_patched_step_method",
|
| 120 |
]
|
| 121 |
return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
|
|
|
|
| 122 |
def __setstate__(self, state):
|
| 123 |
self.__dict__.update(state)
|
| 124 |
if self.scaler is not None:
|
|
@@ -126,6 +143,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
|
| 126 |
self._optimizer_original_step_method = self.optimizer.step
|
| 127 |
self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
|
| 128 |
def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
|
|
|
|
| 129 |
def patched_step(*args, **kwargs):
|
| 130 |
accelerated_optimizer._accelerate_step_called = True
|
| 131 |
return method(*args, **kwargs)
|
|
|
|
| 20 |
scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
|
| 21 |
The scaler to use in the step function if training with mixed precision.
|
| 22 |
"""
|
| 23 |
+
|
| 24 |
def __init__(self, optimizer, device_placement=True, scaler=None):
|
| 25 |
self.optimizer = optimizer
|
| 26 |
self.scaler = scaler
|
|
|
|
| 41 |
state_dict = move_to_device(state_dict, self.accelerator_state.device)
|
| 42 |
self.optimizer.load_state_dict(state_dict)
|
| 43 |
@property
|
| 44 |
+
|
| 45 |
def state(self):
|
| 46 |
return self.optimizer.state
|
| 47 |
@state.setter
|
| 48 |
+
|
| 49 |
def state(self, state):
|
| 50 |
self.optimizer.state = state
|
| 51 |
@property
|
| 52 |
+
|
| 53 |
def param_groups(self):
|
| 54 |
return self.optimizer.param_groups
|
| 55 |
@param_groups.setter
|
| 56 |
+
|
| 57 |
def param_groups(self, param_groups):
|
| 58 |
self.optimizer.param_groups = param_groups
|
| 59 |
@property
|
| 60 |
+
|
| 61 |
def defaults(self):
|
| 62 |
return self.optimizer.defaults
|
| 63 |
@defaults.setter
|
| 64 |
+
|
| 65 |
def defaults(self, defaults):
|
| 66 |
self.optimizer.defaults = defaults
|
| 67 |
+
|
| 68 |
def add_param_group(self, param_group):
|
| 69 |
self.optimizer.add_param_group(param_group)
|
| 70 |
+
|
| 71 |
def load_state_dict(self, state_dict):
|
| 72 |
if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:
|
| 73 |
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
|
| 74 |
self.optimizer.load_state_dict(state_dict)
|
| 75 |
+
|
| 76 |
def state_dict(self):
|
| 77 |
return self.optimizer.state_dict()
|
| 78 |
+
|
| 79 |
def zero_grad(self, set_to_none=None):
|
| 80 |
if self.gradient_state.sync_gradients:
|
| 81 |
accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
|
|
|
|
| 87 |
if set_to_none is not None:
|
| 88 |
raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
|
| 89 |
self.optimizer.zero_grad()
|
| 90 |
+
|
| 91 |
def step(self, closure=None):
|
| 92 |
if self.gradient_state.sync_gradients:
|
| 93 |
if self.accelerator_state.distributed_type == DistributedType.TPU:
|
|
|
|
| 108 |
self._accelerate_step_called = False
|
| 109 |
else:
|
| 110 |
self.optimizer.step(closure)
|
| 111 |
+
|
| 112 |
def _switch_parameters(self, parameters_map):
|
| 113 |
for param_group in self.optimizer.param_groups:
|
| 114 |
param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
|
| 115 |
@property
|
| 116 |
+
|
| 117 |
def is_overflow(self):
|
| 118 |
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
|
| 119 |
warnings.warn(
|
|
|
|
| 123 |
)
|
| 124 |
return self._is_overflow
|
| 125 |
@property
|
| 126 |
+
|
| 127 |
def step_was_skipped(self):
|
| 128 |
"""Whether or not the optimizer step was skipped."""
|
| 129 |
return self._is_overflow
|
| 130 |
+
|
| 131 |
def __getstate__(self):
|
| 132 |
_ignored_keys = [
|
| 133 |
"_accelerate_step_called",
|
|
|
|
| 135 |
"_optimizer_patched_step_method",
|
| 136 |
]
|
| 137 |
return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
|
| 138 |
+
|
| 139 |
def __setstate__(self, state):
|
| 140 |
self.__dict__.update(state)
|
| 141 |
if self.scaler is not None:
|
|
|
|
| 143 |
self._optimizer_original_step_method = self.optimizer.step
|
| 144 |
self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
|
| 145 |
def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
|
| 146 |
+
|
| 147 |
def patched_step(*args, **kwargs):
|
| 148 |
accelerated_optimizer._accelerate_step_called = True
|
| 149 |
return method(*args, **kwargs)
|
src/scheduler.py
CHANGED
|
@@ -18,12 +18,14 @@ class AcceleratedScheduler:
|
|
| 18 |
regardless of the number of processes) or create batches on each process (so batch size is the original
|
| 19 |
batch size multiplied by the number of processes).
|
| 20 |
"""
|
|
|
|
| 21 |
def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
|
| 22 |
self.scheduler = scheduler
|
| 23 |
self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
|
| 24 |
self.split_batches = split_batches
|
| 25 |
self.step_with_optimizer = step_with_optimizer
|
| 26 |
self.gradient_state = GradientState()
|
|
|
|
| 27 |
def step(self, *args, **kwargs):
|
| 28 |
if not self.step_with_optimizer:
|
| 29 |
# No link between scheduler and optimizer -> just step
|
|
@@ -52,13 +54,18 @@ class AcceleratedScheduler:
|
|
| 52 |
else:
|
| 53 |
self.scheduler.step(*args, **kwargs)
|
| 54 |
# Passthroughs
|
|
|
|
| 55 |
def get_last_lr(self):
|
| 56 |
return self.scheduler.get_last_lr()
|
|
|
|
| 57 |
def state_dict(self):
|
| 58 |
return self.scheduler.state_dict()
|
|
|
|
| 59 |
def load_state_dict(self, state_dict):
|
| 60 |
self.scheduler.load_state_dict(state_dict)
|
|
|
|
| 61 |
def get_lr(self):
|
| 62 |
return self.scheduler.get_lr()
|
|
|
|
| 63 |
def print_lr(self, *args, **kwargs):
|
| 64 |
return self.scheduler.print_lr(*args, **kwargs)
|
|
|
|
| 18 |
regardless of the number of processes) or create batches on each process (so batch size is the original
|
| 19 |
batch size multiplied by the number of processes).
|
| 20 |
"""
|
| 21 |
+
|
| 22 |
def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
|
| 23 |
self.scheduler = scheduler
|
| 24 |
self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
|
| 25 |
self.split_batches = split_batches
|
| 26 |
self.step_with_optimizer = step_with_optimizer
|
| 27 |
self.gradient_state = GradientState()
|
| 28 |
+
|
| 29 |
def step(self, *args, **kwargs):
|
| 30 |
if not self.step_with_optimizer:
|
| 31 |
# No link between scheduler and optimizer -> just step
|
|
|
|
| 54 |
else:
|
| 55 |
self.scheduler.step(*args, **kwargs)
|
| 56 |
# Passthroughs
|
| 57 |
+
|
| 58 |
def get_last_lr(self):
|
| 59 |
return self.scheduler.get_last_lr()
|
| 60 |
+
|
| 61 |
def state_dict(self):
|
| 62 |
return self.scheduler.state_dict()
|
| 63 |
+
|
| 64 |
def load_state_dict(self, state_dict):
|
| 65 |
self.scheduler.load_state_dict(state_dict)
|
| 66 |
+
|
| 67 |
def get_lr(self):
|
| 68 |
return self.scheduler.get_lr()
|
| 69 |
+
|
| 70 |
def print_lr(self, *args, **kwargs):
|
| 71 |
return self.scheduler.print_lr(*args, **kwargs)
|
src/state.py
CHANGED
|
@@ -20,10 +20,13 @@ class ThreadLocalSharedDict(threading.local):
|
|
| 20 |
This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
|
| 21 |
See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
|
| 22 |
"""
|
|
|
|
| 23 |
def __init__(self, thread_local: bool = False):
|
| 24 |
self._storage = {}
|
|
|
|
| 25 |
def __get__(self, obj, objtype=None):
|
| 26 |
return self._storage
|
|
|
|
| 27 |
def __set__(self, obj, value):
|
| 28 |
self._storage = value
|
| 29 |
# Prefer global shared dictionary, except when using TPU.
|
|
@@ -49,6 +52,7 @@ class PartialState:
|
|
| 49 |
- **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
|
| 50 |
"""
|
| 51 |
_shared_state = SharedDict()
|
|
|
|
| 52 |
def __init__(self, cpu: bool = False, **kwargs):
|
| 53 |
self.__dict__ = self._shared_state
|
| 54 |
if not self.initialized:
|
|
@@ -240,6 +244,7 @@ class PartialState:
|
|
| 240 |
if self.device is None:
|
| 241 |
self.device = torch.device("cpu") if cpu else self.default_device
|
| 242 |
self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
|
|
|
|
| 243 |
def __repr__(self) -> str:
|
| 244 |
return (
|
| 245 |
f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
|
|
@@ -249,30 +254,36 @@ class PartialState:
|
|
| 249 |
f"Device: {self.device}\n"
|
| 250 |
)
|
| 251 |
@staticmethod
|
|
|
|
| 252 |
def _reset_state():
|
| 253 |
"Resets `_shared_state`, is used internally and should not be called"
|
| 254 |
PartialState._shared_state.clear()
|
| 255 |
@property
|
|
|
|
| 256 |
def initialized(self) -> bool:
|
| 257 |
"Returns whether the `PartialState` has been initialized"
|
| 258 |
return self._shared_state != {}
|
| 259 |
@property
|
|
|
|
| 260 |
def use_distributed(self):
|
| 261 |
"""
|
| 262 |
Whether the Accelerator is configured for distributed training
|
| 263 |
"""
|
| 264 |
return self.distributed_type != DistributedType.NO and self.num_processes > 1
|
| 265 |
@property
|
|
|
|
| 266 |
def is_last_process(self) -> bool:
|
| 267 |
"Returns whether the current process is the last one"
|
| 268 |
return self.process_index == self.num_processes - 1
|
| 269 |
@property
|
|
|
|
| 270 |
def is_main_process(self) -> bool:
|
| 271 |
"Returns whether the current process is the main process"
|
| 272 |
return (
|
| 273 |
self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
|
| 274 |
)
|
| 275 |
@property
|
|
|
|
| 276 |
def is_local_main_process(self) -> bool:
|
| 277 |
"Returns whether the current process is the main process on the local node"
|
| 278 |
return (
|
|
@@ -280,6 +291,7 @@ class PartialState:
|
|
| 280 |
if self.distributed_type != DistributedType.MEGATRON_LM
|
| 281 |
else self.is_last_process
|
| 282 |
)
|
|
|
|
| 283 |
def wait_for_everyone(self):
|
| 284 |
"""
|
| 285 |
Will stop the execution of the current process until every other process has reached that point (so this does
|
|
@@ -310,6 +322,7 @@ class PartialState:
|
|
| 310 |
torch.distributed.barrier()
|
| 311 |
elif self.distributed_type == DistributedType.TPU:
|
| 312 |
xm.rendezvous("accelerate.utils.wait_for_everyone")
|
|
|
|
| 313 |
def _goes_first(self, is_main: bool):
|
| 314 |
if not is_main:
|
| 315 |
self.wait_for_everyone()
|
|
@@ -317,6 +330,7 @@ class PartialState:
|
|
| 317 |
if is_main:
|
| 318 |
self.wait_for_everyone()
|
| 319 |
@contextmanager
|
|
|
|
| 320 |
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 321 |
"""
|
| 322 |
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
|
@@ -362,7 +376,8 @@ class PartialState:
|
|
| 362 |
end_index = start_index + num_samples_per_process
|
| 363 |
if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
|
| 364 |
end_index = length
|
| 365 |
-
|
|
|
|
| 366 |
if isinstance(inputs, (list, tuple, torch.Tensor)):
|
| 367 |
if start_index >= len(inputs):
|
| 368 |
result = inputs[-1:]
|
|
@@ -385,6 +400,7 @@ class PartialState:
|
|
| 385 |
return inputs
|
| 386 |
yield _split_values(inputs, start_index, end_index)
|
| 387 |
@contextmanager
|
|
|
|
| 388 |
def main_process_first(self):
|
| 389 |
"""
|
| 390 |
Lets the main process go first inside a with block.
|
|
@@ -401,6 +417,7 @@ class PartialState:
|
|
| 401 |
"""
|
| 402 |
yield from self._goes_first(self.is_main_process)
|
| 403 |
@contextmanager
|
|
|
|
| 404 |
def local_main_process_first(self):
|
| 405 |
"""
|
| 406 |
Lets the local main process go inside a with block.
|
|
@@ -416,6 +433,7 @@ class PartialState:
|
|
| 416 |
```
|
| 417 |
"""
|
| 418 |
yield from self._goes_first(self.is_local_main_process)
|
|
|
|
| 419 |
def on_main_process(self, function: Callable[..., Any] = None):
|
| 420 |
"""
|
| 421 |
Decorator that only runs the decorated function on the main process.
|
|
@@ -437,6 +455,7 @@ class PartialState:
|
|
| 437 |
if self.is_main_process or not self.use_distributed:
|
| 438 |
return function
|
| 439 |
return do_nothing
|
|
|
|
| 440 |
def on_local_main_process(self, function: Callable[..., Any] = None):
|
| 441 |
"""
|
| 442 |
Decorator that only runs the decorated function on the local main process.
|
|
@@ -448,7 +467,8 @@ class PartialState:
|
|
| 448 |
from accelerate.state import PartialState
|
| 449 |
state = PartialState()
|
| 450 |
@state.on_local_main_process
|
| 451 |
-
|
|
|
|
| 452 |
print("This will be printed by process 0 only on each server.")
|
| 453 |
print_something()
|
| 454 |
# On server 1:
|
|
@@ -460,6 +480,7 @@ class PartialState:
|
|
| 460 |
if self.is_local_main_process or not self.use_distributed:
|
| 461 |
return function
|
| 462 |
return do_nothing
|
|
|
|
| 463 |
def on_last_process(self, function: Callable[..., Any]):
|
| 464 |
"""
|
| 465 |
Decorator that only runs the decorated function on the last process.
|
|
@@ -471,7 +492,8 @@ class PartialState:
|
|
| 471 |
from accelerate.state import PartialState
|
| 472 |
state = PartialState()
|
| 473 |
@state.on_last_process
|
| 474 |
-
|
|
|
|
| 475 |
print(f"Printed on process {state.process_index}")
|
| 476 |
print_something()
|
| 477 |
"Printed on process 3"
|
|
@@ -480,6 +502,7 @@ class PartialState:
|
|
| 480 |
if self.is_last_process or not self.use_distributed:
|
| 481 |
return function
|
| 482 |
return do_nothing
|
|
|
|
| 483 |
def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
|
| 484 |
"""
|
| 485 |
Decorator that only runs the decorated function on the process with the given index.
|
|
@@ -494,7 +517,8 @@ class PartialState:
|
|
| 494 |
from accelerate.state import PartialState
|
| 495 |
state = PartialState()
|
| 496 |
@state.on_process(process_index=2)
|
| 497 |
-
|
|
|
|
| 498 |
print(f"Printed on process {state.process_index}")
|
| 499 |
print_something()
|
| 500 |
"Printed on process 2"
|
|
@@ -505,6 +529,7 @@ class PartialState:
|
|
| 505 |
if (self.process_index == process_index) or (not self.use_distributed):
|
| 506 |
return function
|
| 507 |
return do_nothing
|
|
|
|
| 508 |
def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
|
| 509 |
"""
|
| 510 |
Decorator that only runs the decorated function on the process with the given index on the current node.
|
|
@@ -519,7 +544,8 @@ class PartialState:
|
|
| 519 |
from accelerate import Accelerator
|
| 520 |
accelerator = Accelerator()
|
| 521 |
@accelerator.on_local_process(local_process_index=2)
|
| 522 |
-
|
|
|
|
| 523 |
print(f"Printed on process {accelerator.local_process_index}")
|
| 524 |
print_something()
|
| 525 |
# On server 1:
|
|
@@ -533,10 +559,12 @@ class PartialState:
|
|
| 533 |
if (self.local_process_index == local_process_index) or (not self.use_distributed):
|
| 534 |
return function
|
| 535 |
return do_nothing
|
|
|
|
| 536 |
def print(self, *args, **kwargs):
|
| 537 |
if self.is_local_main_process:
|
| 538 |
print(*args, **kwargs)
|
| 539 |
@property
|
|
|
|
| 540 |
def default_device(self) -> torch.device:
|
| 541 |
"""
|
| 542 |
Returns the default device which is:
|
|
@@ -575,6 +603,7 @@ class AcceleratorState:
|
|
| 575 |
- **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
|
| 576 |
"""
|
| 577 |
_shared_state = SharedDict()
|
|
|
|
| 578 |
def __init__(
|
| 579 |
self,
|
| 580 |
mixed_precision: str = None,
|
|
@@ -669,13 +698,16 @@ class AcceleratorState:
|
|
| 669 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 670 |
PartialState._shared_state["distributed_type"] = self.distributed_type
|
| 671 |
@property
|
|
|
|
| 672 |
def initialized(self) -> bool:
|
| 673 |
return self._shared_state != PartialState._shared_state
|
|
|
|
| 674 |
def __repr__(self):
|
| 675 |
repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
|
| 676 |
if self.distributed_type == DistributedType.DEEPSPEED:
|
| 677 |
repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
|
| 678 |
return repr
|
|
|
|
| 679 |
def _check_initialized(self, mixed_precision=None, cpu=None):
|
| 680 |
"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
|
| 681 |
if self.initialized:
|
|
@@ -690,6 +722,7 @@ class AcceleratorState:
|
|
| 690 |
raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
|
| 691 |
# For backward compatibility
|
| 692 |
@property
|
|
|
|
| 693 |
def use_fp16(self):
|
| 694 |
warnings.warn(
|
| 695 |
"The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
|
|
@@ -698,6 +731,7 @@ class AcceleratorState:
|
|
| 698 |
)
|
| 699 |
return self._mixed_precision != "no"
|
| 700 |
@property
|
|
|
|
| 701 |
def mixed_precision(self):
|
| 702 |
if self.distributed_type == DistributedType.DEEPSPEED:
|
| 703 |
config = self.deepspeed_plugin.deepspeed_config
|
|
@@ -711,32 +745,39 @@ class AcceleratorState:
|
|
| 711 |
mixed_precision = self._mixed_precision
|
| 712 |
return mixed_precision
|
| 713 |
@staticmethod
|
|
|
|
| 714 |
def _reset_state(reset_partial_state: bool = False):
|
| 715 |
"Resets `_shared_state`, is used internally and should not be called"
|
| 716 |
AcceleratorState._shared_state.clear()
|
| 717 |
if reset_partial_state:
|
| 718 |
PartialState._reset_state()
|
| 719 |
@property
|
|
|
|
| 720 |
def use_distributed(self):
|
| 721 |
"""
|
| 722 |
Whether the Accelerator is configured for distributed training
|
| 723 |
"""
|
| 724 |
return PartialState().use_distributed
|
| 725 |
@property
|
|
|
|
| 726 |
def is_last_process(self) -> bool:
|
| 727 |
"Returns whether the current process is the last one"
|
| 728 |
return PartialState().is_last_process
|
| 729 |
@property
|
|
|
|
| 730 |
def is_main_process(self) -> bool:
|
| 731 |
"Returns whether the current process is the main process"
|
| 732 |
return PartialState().is_main_process
|
| 733 |
@property
|
|
|
|
| 734 |
def is_local_main_process(self) -> bool:
|
| 735 |
"Returns whether the current process is the main process on the local node"
|
| 736 |
return PartialState().is_local_main_process
|
|
|
|
| 737 |
def wait_for_everyone(self):
|
| 738 |
PartialState().wait_for_everyone()
|
| 739 |
@contextmanager
|
|
|
|
| 740 |
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 741 |
"""
|
| 742 |
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
|
@@ -771,6 +812,7 @@ class AcceleratorState:
|
|
| 771 |
with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
|
| 772 |
yield inputs
|
| 773 |
@contextmanager
|
|
|
|
| 774 |
def main_process_first(self):
|
| 775 |
"""
|
| 776 |
Lets the main process go first inside a with block.
|
|
@@ -779,6 +821,7 @@ class AcceleratorState:
|
|
| 779 |
with PartialState().main_process_first():
|
| 780 |
yield
|
| 781 |
@contextmanager
|
|
|
|
| 782 |
def local_main_process_first(self):
|
| 783 |
"""
|
| 784 |
Lets the local main process go inside a with block.
|
|
@@ -786,6 +829,7 @@ class AcceleratorState:
|
|
| 786 |
"""
|
| 787 |
with PartialState().local_main_process_first():
|
| 788 |
yield
|
|
|
|
| 789 |
def print(self, *args, **kwargs):
|
| 790 |
PartialState().print(*args, **kwargs)
|
| 791 |
class GradientState:
|
|
@@ -805,6 +849,7 @@ class GradientState:
|
|
| 805 |
iteration and the number of total steps reset
|
| 806 |
"""
|
| 807 |
_shared_state = SharedDict()
|
|
|
|
| 808 |
def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
|
| 809 |
self.__dict__ = self._shared_state
|
| 810 |
if not self.initialized:
|
|
@@ -818,33 +863,40 @@ class GradientState:
|
|
| 818 |
if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
|
| 819 |
self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
|
| 820 |
@property
|
|
|
|
| 821 |
def num_steps(self) -> int:
|
| 822 |
"Returns the number of steps to accumulate over"
|
| 823 |
return self.plugin_kwargs.get("num_steps", 1)
|
| 824 |
@property
|
|
|
|
| 825 |
def adjust_scheduler(self) -> bool:
|
| 826 |
"Returns whether the scheduler should be adjusted"
|
| 827 |
return self.plugin_kwargs.get("adjust_scheduler", False)
|
| 828 |
@property
|
|
|
|
| 829 |
def sync_with_dataloader(self) -> bool:
|
| 830 |
"Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
|
| 831 |
return self.plugin_kwargs.get("sync_with_dataloader", True)
|
| 832 |
@property
|
|
|
|
| 833 |
def initialized(self) -> bool:
|
| 834 |
"Returns whether the `GradientState` has been initialized"
|
| 835 |
return GradientState._shared_state != {}
|
| 836 |
@property
|
|
|
|
| 837 |
def end_of_dataloader(self) -> bool:
|
| 838 |
"Returns whether we have reached the end of the current dataloader"
|
| 839 |
if not self.in_dataloader:
|
| 840 |
return False
|
| 841 |
return self.active_dataloader.end_of_dataloader
|
| 842 |
@property
|
|
|
|
| 843 |
def remainder(self) -> int:
|
| 844 |
"Returns the number of extra samples that were added from padding the dataloader"
|
| 845 |
if not self.in_dataloader:
|
| 846 |
return -1
|
| 847 |
return self.active_dataloader.remainder
|
|
|
|
| 848 |
def __repr__(self):
|
| 849 |
return (
|
| 850 |
f"Sync Gradients: {self.sync_gradients}\n"
|
|
@@ -852,22 +904,27 @@ class GradientState:
|
|
| 852 |
f"Extra samples added: {self.remainder}\n"
|
| 853 |
f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
|
| 854 |
)
|
|
|
|
| 855 |
def _set_sync_gradients(self, sync_gradients):
|
| 856 |
"Private function that sets whether gradients should be synchronized. Users should not have to call this."
|
| 857 |
self.sync_gradients = sync_gradients
|
|
|
|
| 858 |
def _add_dataloader(self, dataloader):
|
| 859 |
"Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
|
| 860 |
self.active_dataloader = dataloader
|
| 861 |
self.dataloader_references.append(self.active_dataloader)
|
|
|
|
| 862 |
def _remove_dataloader(self, dataloader):
|
| 863 |
"Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
|
| 864 |
self.dataloader_references.remove(dataloader)
|
| 865 |
self.active_dataloader = self.dataloader_references[-1]
|
| 866 |
@property
|
|
|
|
| 867 |
def in_dataloader(self) -> bool:
|
| 868 |
"Returns whether the current process is in a dataloader"
|
| 869 |
return self.active_dataloader is not None
|
| 870 |
@staticmethod
|
|
|
|
| 871 |
def _reset_state():
|
| 872 |
"Resets `_shared_state`, is used internally and should not be called"
|
| 873 |
GradientState._shared_state.clear()
|
|
|
|
| 20 |
This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
|
| 21 |
See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
|
| 22 |
"""
|
| 23 |
+
|
| 24 |
def __init__(self, thread_local: bool = False):
|
| 25 |
self._storage = {}
|
| 26 |
+
|
| 27 |
def __get__(self, obj, objtype=None):
|
| 28 |
return self._storage
|
| 29 |
+
|
| 30 |
def __set__(self, obj, value):
|
| 31 |
self._storage = value
|
| 32 |
# Prefer global shared dictionary, except when using TPU.
|
|
|
|
| 52 |
- **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
|
| 53 |
"""
|
| 54 |
_shared_state = SharedDict()
|
| 55 |
+
|
| 56 |
def __init__(self, cpu: bool = False, **kwargs):
|
| 57 |
self.__dict__ = self._shared_state
|
| 58 |
if not self.initialized:
|
|
|
|
| 244 |
if self.device is None:
|
| 245 |
self.device = torch.device("cpu") if cpu else self.default_device
|
| 246 |
self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
|
| 247 |
+
|
| 248 |
def __repr__(self) -> str:
|
| 249 |
return (
|
| 250 |
f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
|
|
|
|
| 254 |
f"Device: {self.device}\n"
|
| 255 |
)
|
| 256 |
@staticmethod
|
| 257 |
+
|
| 258 |
def _reset_state():
|
| 259 |
"Resets `_shared_state`, is used internally and should not be called"
|
| 260 |
PartialState._shared_state.clear()
|
| 261 |
@property
|
| 262 |
+
|
| 263 |
def initialized(self) -> bool:
|
| 264 |
"Returns whether the `PartialState` has been initialized"
|
| 265 |
return self._shared_state != {}
|
| 266 |
@property
|
| 267 |
+
|
| 268 |
def use_distributed(self):
|
| 269 |
"""
|
| 270 |
Whether the Accelerator is configured for distributed training
|
| 271 |
"""
|
| 272 |
return self.distributed_type != DistributedType.NO and self.num_processes > 1
|
| 273 |
@property
|
| 274 |
+
|
| 275 |
def is_last_process(self) -> bool:
|
| 276 |
"Returns whether the current process is the last one"
|
| 277 |
return self.process_index == self.num_processes - 1
|
| 278 |
@property
|
| 279 |
+
|
| 280 |
def is_main_process(self) -> bool:
|
| 281 |
"Returns whether the current process is the main process"
|
| 282 |
return (
|
| 283 |
self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
|
| 284 |
)
|
| 285 |
@property
|
| 286 |
+
|
| 287 |
def is_local_main_process(self) -> bool:
|
| 288 |
"Returns whether the current process is the main process on the local node"
|
| 289 |
return (
|
|
|
|
| 291 |
if self.distributed_type != DistributedType.MEGATRON_LM
|
| 292 |
else self.is_last_process
|
| 293 |
)
|
| 294 |
+
|
| 295 |
def wait_for_everyone(self):
|
| 296 |
"""
|
| 297 |
Will stop the execution of the current process until every other process has reached that point (so this does
|
|
|
|
| 322 |
torch.distributed.barrier()
|
| 323 |
elif self.distributed_type == DistributedType.TPU:
|
| 324 |
xm.rendezvous("accelerate.utils.wait_for_everyone")
|
| 325 |
+
|
| 326 |
def _goes_first(self, is_main: bool):
|
| 327 |
if not is_main:
|
| 328 |
self.wait_for_everyone()
|
|
|
|
| 330 |
if is_main:
|
| 331 |
self.wait_for_everyone()
|
| 332 |
@contextmanager
|
| 333 |
+
|
| 334 |
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 335 |
"""
|
| 336 |
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
|
|
|
| 376 |
end_index = start_index + num_samples_per_process
|
| 377 |
if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
|
| 378 |
end_index = length
|
| 379 |
+
|
| 380 |
+
def _split_values(inputs, start_index, end_index):
|
| 381 |
if isinstance(inputs, (list, tuple, torch.Tensor)):
|
| 382 |
if start_index >= len(inputs):
|
| 383 |
result = inputs[-1:]
|
|
|
|
| 400 |
return inputs
|
| 401 |
yield _split_values(inputs, start_index, end_index)
|
| 402 |
@contextmanager
|
| 403 |
+
|
| 404 |
def main_process_first(self):
|
| 405 |
"""
|
| 406 |
Lets the main process go first inside a with block.
|
|
|
|
| 417 |
"""
|
| 418 |
yield from self._goes_first(self.is_main_process)
|
| 419 |
@contextmanager
|
| 420 |
+
|
| 421 |
def local_main_process_first(self):
|
| 422 |
"""
|
| 423 |
Lets the local main process go inside a with block.
|
|
|
|
| 433 |
```
|
| 434 |
"""
|
| 435 |
yield from self._goes_first(self.is_local_main_process)
|
| 436 |
+
|
| 437 |
def on_main_process(self, function: Callable[..., Any] = None):
|
| 438 |
"""
|
| 439 |
Decorator that only runs the decorated function on the main process.
|
|
|
|
| 455 |
if self.is_main_process or not self.use_distributed:
|
| 456 |
return function
|
| 457 |
return do_nothing
|
| 458 |
+
|
| 459 |
def on_local_main_process(self, function: Callable[..., Any] = None):
|
| 460 |
"""
|
| 461 |
Decorator that only runs the decorated function on the local main process.
|
|
|
|
| 467 |
from accelerate.state import PartialState
|
| 468 |
state = PartialState()
|
| 469 |
@state.on_local_main_process
|
| 470 |
+
|
| 471 |
+
def print_something():
|
| 472 |
print("This will be printed by process 0 only on each server.")
|
| 473 |
print_something()
|
| 474 |
# On server 1:
|
|
|
|
| 480 |
if self.is_local_main_process or not self.use_distributed:
|
| 481 |
return function
|
| 482 |
return do_nothing
|
| 483 |
+
|
| 484 |
def on_last_process(self, function: Callable[..., Any]):
|
| 485 |
"""
|
| 486 |
Decorator that only runs the decorated function on the last process.
|
|
|
|
| 492 |
from accelerate.state import PartialState
|
| 493 |
state = PartialState()
|
| 494 |
@state.on_last_process
|
| 495 |
+
|
| 496 |
+
def print_something():
|
| 497 |
print(f"Printed on process {state.process_index}")
|
| 498 |
print_something()
|
| 499 |
"Printed on process 3"
|
|
|
|
| 502 |
if self.is_last_process or not self.use_distributed:
|
| 503 |
return function
|
| 504 |
return do_nothing
|
| 505 |
+
|
| 506 |
def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
|
| 507 |
"""
|
| 508 |
Decorator that only runs the decorated function on the process with the given index.
|
|
|
|
| 517 |
from accelerate.state import PartialState
|
| 518 |
state = PartialState()
|
| 519 |
@state.on_process(process_index=2)
|
| 520 |
+
|
| 521 |
+
def print_something():
|
| 522 |
print(f"Printed on process {state.process_index}")
|
| 523 |
print_something()
|
| 524 |
"Printed on process 2"
|
|
|
|
| 529 |
if (self.process_index == process_index) or (not self.use_distributed):
|
| 530 |
return function
|
| 531 |
return do_nothing
|
| 532 |
+
|
| 533 |
def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
|
| 534 |
"""
|
| 535 |
Decorator that only runs the decorated function on the process with the given index on the current node.
|
|
|
|
| 544 |
from accelerate import Accelerator
|
| 545 |
accelerator = Accelerator()
|
| 546 |
@accelerator.on_local_process(local_process_index=2)
|
| 547 |
+
|
| 548 |
+
def print_something():
|
| 549 |
print(f"Printed on process {accelerator.local_process_index}")
|
| 550 |
print_something()
|
| 551 |
# On server 1:
|
|
|
|
| 559 |
if (self.local_process_index == local_process_index) or (not self.use_distributed):
|
| 560 |
return function
|
| 561 |
return do_nothing
|
| 562 |
+
|
| 563 |
def print(self, *args, **kwargs):
|
| 564 |
if self.is_local_main_process:
|
| 565 |
print(*args, **kwargs)
|
| 566 |
@property
|
| 567 |
+
|
| 568 |
def default_device(self) -> torch.device:
|
| 569 |
"""
|
| 570 |
Returns the default device which is:
|
|
|
|
| 603 |
- **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
|
| 604 |
"""
|
| 605 |
_shared_state = SharedDict()
|
| 606 |
+
|
| 607 |
def __init__(
|
| 608 |
self,
|
| 609 |
mixed_precision: str = None,
|
|
|
|
| 698 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 699 |
PartialState._shared_state["distributed_type"] = self.distributed_type
|
| 700 |
@property
|
| 701 |
+
|
| 702 |
def initialized(self) -> bool:
|
| 703 |
return self._shared_state != PartialState._shared_state
|
| 704 |
+
|
| 705 |
def __repr__(self):
|
| 706 |
repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
|
| 707 |
if self.distributed_type == DistributedType.DEEPSPEED:
|
| 708 |
repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
|
| 709 |
return repr
|
| 710 |
+
|
| 711 |
def _check_initialized(self, mixed_precision=None, cpu=None):
|
| 712 |
"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
|
| 713 |
if self.initialized:
|
|
|
|
| 722 |
raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
|
| 723 |
# For backward compatibility
|
| 724 |
@property
|
| 725 |
+
|
| 726 |
def use_fp16(self):
|
| 727 |
warnings.warn(
|
| 728 |
"The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
|
|
|
|
| 731 |
)
|
| 732 |
return self._mixed_precision != "no"
|
| 733 |
@property
|
| 734 |
+
|
| 735 |
def mixed_precision(self):
|
| 736 |
if self.distributed_type == DistributedType.DEEPSPEED:
|
| 737 |
config = self.deepspeed_plugin.deepspeed_config
|
|
|
|
| 745 |
mixed_precision = self._mixed_precision
|
| 746 |
return mixed_precision
|
| 747 |
@staticmethod
|
| 748 |
+
|
| 749 |
def _reset_state(reset_partial_state: bool = False):
|
| 750 |
"Resets `_shared_state`, is used internally and should not be called"
|
| 751 |
AcceleratorState._shared_state.clear()
|
| 752 |
if reset_partial_state:
|
| 753 |
PartialState._reset_state()
|
| 754 |
@property
|
| 755 |
+
|
| 756 |
def use_distributed(self):
|
| 757 |
"""
|
| 758 |
Whether the Accelerator is configured for distributed training
|
| 759 |
"""
|
| 760 |
return PartialState().use_distributed
|
| 761 |
@property
|
| 762 |
+
|
| 763 |
def is_last_process(self) -> bool:
|
| 764 |
"Returns whether the current process is the last one"
|
| 765 |
return PartialState().is_last_process
|
| 766 |
@property
|
| 767 |
+
|
| 768 |
def is_main_process(self) -> bool:
|
| 769 |
"Returns whether the current process is the main process"
|
| 770 |
return PartialState().is_main_process
|
| 771 |
@property
|
| 772 |
+
|
| 773 |
def is_local_main_process(self) -> bool:
|
| 774 |
"Returns whether the current process is the main process on the local node"
|
| 775 |
return PartialState().is_local_main_process
|
| 776 |
+
|
| 777 |
def wait_for_everyone(self):
|
| 778 |
PartialState().wait_for_everyone()
|
| 779 |
@contextmanager
|
| 780 |
+
|
| 781 |
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 782 |
"""
|
| 783 |
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
|
|
|
| 812 |
with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
|
| 813 |
yield inputs
|
| 814 |
@contextmanager
|
| 815 |
+
|
| 816 |
def main_process_first(self):
|
| 817 |
"""
|
| 818 |
Lets the main process go first inside a with block.
|
|
|
|
| 821 |
with PartialState().main_process_first():
|
| 822 |
yield
|
| 823 |
@contextmanager
|
| 824 |
+
|
| 825 |
def local_main_process_first(self):
|
| 826 |
"""
|
| 827 |
Lets the local main process go inside a with block.
|
|
|
|
| 829 |
"""
|
| 830 |
with PartialState().local_main_process_first():
|
| 831 |
yield
|
| 832 |
+
|
| 833 |
def print(self, *args, **kwargs):
|
| 834 |
PartialState().print(*args, **kwargs)
|
| 835 |
class GradientState:
|
|
|
|
| 849 |
iteration and the number of total steps reset
|
| 850 |
"""
|
| 851 |
_shared_state = SharedDict()
|
| 852 |
+
|
| 853 |
def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
|
| 854 |
self.__dict__ = self._shared_state
|
| 855 |
if not self.initialized:
|
|
|
|
| 863 |
if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
|
| 864 |
self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
|
| 865 |
@property
|
| 866 |
+
|
| 867 |
def num_steps(self) -> int:
|
| 868 |
"Returns the number of steps to accumulate over"
|
| 869 |
return self.plugin_kwargs.get("num_steps", 1)
|
| 870 |
@property
|
| 871 |
+
|
| 872 |
def adjust_scheduler(self) -> bool:
|
| 873 |
"Returns whether the scheduler should be adjusted"
|
| 874 |
return self.plugin_kwargs.get("adjust_scheduler", False)
|
| 875 |
@property
|
| 876 |
+
|
| 877 |
def sync_with_dataloader(self) -> bool:
|
| 878 |
"Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
|
| 879 |
return self.plugin_kwargs.get("sync_with_dataloader", True)
|
| 880 |
@property
|
| 881 |
+
|
| 882 |
def initialized(self) -> bool:
|
| 883 |
"Returns whether the `GradientState` has been initialized"
|
| 884 |
return GradientState._shared_state != {}
|
| 885 |
@property
|
| 886 |
+
|
| 887 |
def end_of_dataloader(self) -> bool:
|
| 888 |
"Returns whether we have reached the end of the current dataloader"
|
| 889 |
if not self.in_dataloader:
|
| 890 |
return False
|
| 891 |
return self.active_dataloader.end_of_dataloader
|
| 892 |
@property
|
| 893 |
+
|
| 894 |
def remainder(self) -> int:
|
| 895 |
"Returns the number of extra samples that were added from padding the dataloader"
|
| 896 |
if not self.in_dataloader:
|
| 897 |
return -1
|
| 898 |
return self.active_dataloader.remainder
|
| 899 |
+
|
| 900 |
def __repr__(self):
|
| 901 |
return (
|
| 902 |
f"Sync Gradients: {self.sync_gradients}\n"
|
|
|
|
| 904 |
f"Extra samples added: {self.remainder}\n"
|
| 905 |
f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
|
| 906 |
)
|
| 907 |
+
|
| 908 |
def _set_sync_gradients(self, sync_gradients):
|
| 909 |
"Private function that sets whether gradients should be synchronized. Users should not have to call this."
|
| 910 |
self.sync_gradients = sync_gradients
|
| 911 |
+
|
| 912 |
def _add_dataloader(self, dataloader):
|
| 913 |
"Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
|
| 914 |
self.active_dataloader = dataloader
|
| 915 |
self.dataloader_references.append(self.active_dataloader)
|
| 916 |
+
|
| 917 |
def _remove_dataloader(self, dataloader):
|
| 918 |
"Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
|
| 919 |
self.dataloader_references.remove(dataloader)
|
| 920 |
self.active_dataloader = self.dataloader_references[-1]
|
| 921 |
@property
|
| 922 |
+
|
| 923 |
def in_dataloader(self) -> bool:
|
| 924 |
"Returns whether the current process is in a dataloader"
|
| 925 |
return self.active_dataloader is not None
|
| 926 |
@staticmethod
|
| 927 |
+
|
| 928 |
def _reset_state():
|
| 929 |
"Resets `_shared_state`, is used internally and should not be called"
|
| 930 |
GradientState._shared_state.clear()
|
src/tracking.py
CHANGED
|
@@ -24,6 +24,7 @@ def on_main_process(function):
|
|
| 24 |
`PartialState`.
|
| 25 |
"""
|
| 26 |
@wraps(function)
|
|
|
|
| 27 |
def execute_on_main_process(self, *args, **kwargs):
|
| 28 |
if getattr(self, "main_process_only", False):
|
| 29 |
return PartialState().on_main_process(function)(self, *args, **kwargs)
|
|
@@ -46,6 +47,7 @@ class GeneralTracker:
|
|
| 46 |
other functions should occur on the main process or across all processes (by default will use `True`)
|
| 47 |
"""
|
| 48 |
main_process_only = True
|
|
|
|
| 49 |
def __init__(self, _blank=False):
|
| 50 |
if not _blank:
|
| 51 |
err = ""
|
|
@@ -66,6 +68,7 @@ class GeneralTracker:
|
|
| 66 |
f"required attributes. Please define them in the class definition: "
|
| 67 |
f"{err}"
|
| 68 |
)
|
|
|
|
| 69 |
def store_init_configuration(self, values: dict):
|
| 70 |
"""
|
| 71 |
Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
|
|
@@ -76,6 +79,7 @@ class GeneralTracker:
|
|
| 76 |
`str`, `float`, `int`, or `None`.
|
| 77 |
"""
|
| 78 |
pass
|
|
|
|
| 79 |
def log(self, values: dict, step: Optional[int], **kwargs):
|
| 80 |
"""
|
| 81 |
Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
|
|
@@ -87,6 +91,7 @@ class GeneralTracker:
|
|
| 87 |
The run step. If included, the log will be affiliated with this step.
|
| 88 |
"""
|
| 89 |
pass
|
|
|
|
| 90 |
def finish(self):
|
| 91 |
"""
|
| 92 |
Should run any finalizing functions within the tracking API. If the API should not have one, just don't
|
|
@@ -107,6 +112,7 @@ class TensorBoardTracker(GeneralTracker):
|
|
| 107 |
name = "tensorboard"
|
| 108 |
requires_logging_directory = True
|
| 109 |
@on_main_process
|
|
|
|
| 110 |
def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
|
| 111 |
try:
|
| 112 |
from torch.utils import tensorboard
|
|
@@ -121,9 +127,11 @@ class TensorBoardTracker(GeneralTracker):
|
|
| 121 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 122 |
)
|
| 123 |
@property
|
|
|
|
| 124 |
def tracker(self):
|
| 125 |
return self.writer
|
| 126 |
@on_main_process
|
|
|
|
| 127 |
def store_init_configuration(self, values: dict):
|
| 128 |
"""
|
| 129 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
|
|
@@ -146,6 +154,7 @@ class TensorBoardTracker(GeneralTracker):
|
|
| 146 |
raise
|
| 147 |
logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
|
| 148 |
@on_main_process
|
|
|
|
| 149 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 150 |
"""
|
| 151 |
Logs `values` to the current run.
|
|
@@ -170,6 +179,7 @@ class TensorBoardTracker(GeneralTracker):
|
|
| 170 |
self.writer.flush()
|
| 171 |
logger.debug("Successfully logged to TensorBoard")
|
| 172 |
@on_main_process
|
|
|
|
| 173 |
def log_images(self, values: dict, step: Optional[int], **kwargs):
|
| 174 |
"""
|
| 175 |
Logs `images` to the current run.
|
|
@@ -185,6 +195,7 @@ class TensorBoardTracker(GeneralTracker):
|
|
| 185 |
self.writer.add_images(k, v, global_step=step, **kwargs)
|
| 186 |
logger.debug("Successfully logged images to TensorBoard")
|
| 187 |
@on_main_process
|
|
|
|
| 188 |
def finish(self):
|
| 189 |
"""
|
| 190 |
Closes `TensorBoard` writer
|
|
@@ -204,6 +215,7 @@ class WandBTracker(GeneralTracker):
|
|
| 204 |
requires_logging_directory = False
|
| 205 |
main_process_only = False
|
| 206 |
@on_main_process
|
|
|
|
| 207 |
def __init__(self, run_name: str, **kwargs):
|
| 208 |
super().__init__()
|
| 209 |
self.run_name = run_name
|
|
@@ -214,9 +226,11 @@ class WandBTracker(GeneralTracker):
|
|
| 214 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 215 |
)
|
| 216 |
@property
|
|
|
|
| 217 |
def tracker(self):
|
| 218 |
return self.run
|
| 219 |
@on_main_process
|
|
|
|
| 220 |
def store_init_configuration(self, values: dict):
|
| 221 |
"""
|
| 222 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
@@ -229,6 +243,7 @@ class WandBTracker(GeneralTracker):
|
|
| 229 |
wandb.config.update(values, allow_val_change=True)
|
| 230 |
logger.debug("Stored initial configuration hyperparameters to WandB")
|
| 231 |
@on_main_process
|
|
|
|
| 232 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 233 |
"""
|
| 234 |
Logs `values` to the current run.
|
|
@@ -244,6 +259,7 @@ class WandBTracker(GeneralTracker):
|
|
| 244 |
self.run.log(values, step=step, **kwargs)
|
| 245 |
logger.debug("Successfully logged to WandB")
|
| 246 |
@on_main_process
|
|
|
|
| 247 |
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 248 |
"""
|
| 249 |
Logs `images` to the current run.
|
|
@@ -260,6 +276,7 @@ class WandBTracker(GeneralTracker):
|
|
| 260 |
self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
|
| 261 |
logger.debug("Successfully logged images to WandB")
|
| 262 |
@on_main_process
|
|
|
|
| 263 |
def log_table(
|
| 264 |
self,
|
| 265 |
table_name: str,
|
|
@@ -288,6 +305,7 @@ class WandBTracker(GeneralTracker):
|
|
| 288 |
values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
|
| 289 |
self.log(values, step=step, **kwargs)
|
| 290 |
@on_main_process
|
|
|
|
| 291 |
def finish(self):
|
| 292 |
"""
|
| 293 |
Closes `wandb` writer
|
|
@@ -307,6 +325,7 @@ class CometMLTracker(GeneralTracker):
|
|
| 307 |
name = "comet_ml"
|
| 308 |
requires_logging_directory = False
|
| 309 |
@on_main_process
|
|
|
|
| 310 |
def __init__(self, run_name: str, **kwargs):
|
| 311 |
super().__init__()
|
| 312 |
self.run_name = run_name
|
|
@@ -317,9 +336,11 @@ class CometMLTracker(GeneralTracker):
|
|
| 317 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 318 |
)
|
| 319 |
@property
|
|
|
|
| 320 |
def tracker(self):
|
| 321 |
return self.writer
|
| 322 |
@on_main_process
|
|
|
|
| 323 |
def store_init_configuration(self, values: dict):
|
| 324 |
"""
|
| 325 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
@@ -331,6 +352,7 @@ class CometMLTracker(GeneralTracker):
|
|
| 331 |
self.writer.log_parameters(values)
|
| 332 |
logger.debug("Stored initial configuration hyperparameters to CometML")
|
| 333 |
@on_main_process
|
|
|
|
| 334 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 335 |
"""
|
| 336 |
Logs `values` to the current run.
|
|
@@ -355,6 +377,7 @@ class CometMLTracker(GeneralTracker):
|
|
| 355 |
self.writer.log_metrics(v, step=step, **kwargs)
|
| 356 |
logger.debug("Successfully logged to CometML")
|
| 357 |
@on_main_process
|
|
|
|
| 358 |
def finish(self):
|
| 359 |
"""
|
| 360 |
Closes `comet-ml` writer
|
|
@@ -373,6 +396,7 @@ class AimTracker(GeneralTracker):
|
|
| 373 |
name = "aim"
|
| 374 |
requires_logging_directory = True
|
| 375 |
@on_main_process
|
|
|
|
| 376 |
def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
|
| 377 |
self.run_name = run_name
|
| 378 |
from aim import Run
|
|
@@ -383,9 +407,11 @@ class AimTracker(GeneralTracker):
|
|
| 383 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 384 |
)
|
| 385 |
@property
|
|
|
|
| 386 |
def tracker(self):
|
| 387 |
return self.writer
|
| 388 |
@on_main_process
|
|
|
|
| 389 |
def store_init_configuration(self, values: dict):
|
| 390 |
"""
|
| 391 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
@@ -395,6 +421,7 @@ class AimTracker(GeneralTracker):
|
|
| 395 |
"""
|
| 396 |
self.writer["hparams"] = values
|
| 397 |
@on_main_process
|
|
|
|
| 398 |
def log(self, values: dict, step: Optional[int], **kwargs):
|
| 399 |
"""
|
| 400 |
Logs `values` to the current run.
|
|
@@ -410,6 +437,7 @@ class AimTracker(GeneralTracker):
|
|
| 410 |
for key, value in values.items():
|
| 411 |
self.writer.track(value, name=key, step=step, **kwargs)
|
| 412 |
@on_main_process
|
|
|
|
| 413 |
def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):
|
| 414 |
"""
|
| 415 |
Logs `images` to the current run.
|
|
@@ -437,6 +465,7 @@ class AimTracker(GeneralTracker):
|
|
| 437 |
aim_image = aim.Image(img, caption=caption, **aim_image_kw)
|
| 438 |
self.writer.track(aim_image, name=key, step=step, **track_kw)
|
| 439 |
@on_main_process
|
|
|
|
| 440 |
def finish(self):
|
| 441 |
"""
|
| 442 |
Closes `aim` writer
|
|
@@ -471,6 +500,7 @@ class MLflowTracker(GeneralTracker):
|
|
| 471 |
name = "mlflow"
|
| 472 |
requires_logging_directory = False
|
| 473 |
@on_main_process
|
|
|
|
| 474 |
def __init__(
|
| 475 |
self,
|
| 476 |
experiment_name: str = None,
|
|
@@ -512,9 +542,11 @@ class MLflowTracker(GeneralTracker):
|
|
| 512 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 513 |
)
|
| 514 |
@property
|
|
|
|
| 515 |
def tracker(self):
|
| 516 |
return self.active_run
|
| 517 |
@on_main_process
|
|
|
|
| 518 |
def store_init_configuration(self, values: dict):
|
| 519 |
"""
|
| 520 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
@@ -537,6 +569,7 @@ class MLflowTracker(GeneralTracker):
|
|
| 537 |
mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
|
| 538 |
logger.debug("Stored initial configuration hyperparameters to MLflow")
|
| 539 |
@on_main_process
|
|
|
|
| 540 |
def log(self, values: dict, step: Optional[int]):
|
| 541 |
"""
|
| 542 |
Logs `values` to the current run.
|
|
@@ -559,6 +592,7 @@ class MLflowTracker(GeneralTracker):
|
|
| 559 |
mlflow.log_metrics(metrics, step=step)
|
| 560 |
logger.debug("Successfully logged to mlflow")
|
| 561 |
@on_main_process
|
|
|
|
| 562 |
def finish(self):
|
| 563 |
"""
|
| 564 |
End the active MLflow run.
|
|
@@ -578,6 +612,7 @@ class ClearMLTracker(GeneralTracker):
|
|
| 578 |
name = "clearml"
|
| 579 |
requires_logging_directory = False
|
| 580 |
@on_main_process
|
|
|
|
| 581 |
def __init__(self, run_name: str = None, **kwargs):
|
| 582 |
from clearml import Task
|
| 583 |
current_task = Task.current_task()
|
|
@@ -590,9 +625,11 @@ class ClearMLTracker(GeneralTracker):
|
|
| 590 |
kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
|
| 591 |
self.task = Task.init(**kwargs)
|
| 592 |
@property
|
|
|
|
| 593 |
def tracker(self):
|
| 594 |
return self.task
|
| 595 |
@on_main_process
|
|
|
|
| 596 |
def store_init_configuration(self, values: dict):
|
| 597 |
"""
|
| 598 |
Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
|
|
@@ -602,6 +639,7 @@ class ClearMLTracker(GeneralTracker):
|
|
| 602 |
"""
|
| 603 |
return self.task.connect_configuration(values)
|
| 604 |
@on_main_process
|
|
|
|
| 605 |
def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
|
| 606 |
"""
|
| 607 |
Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
|
|
@@ -634,6 +672,7 @@ class ClearMLTracker(GeneralTracker):
|
|
| 634 |
title, series = ClearMLTracker._get_title_series(k)
|
| 635 |
clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
|
| 636 |
@on_main_process
|
|
|
|
| 637 |
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 638 |
"""
|
| 639 |
Logs `images` to the current run.
|
|
@@ -650,6 +689,7 @@ class ClearMLTracker(GeneralTracker):
|
|
| 650 |
title, series = ClearMLTracker._get_title_series(k)
|
| 651 |
clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
|
| 652 |
@on_main_process
|
|
|
|
| 653 |
def log_table(
|
| 654 |
self,
|
| 655 |
table_name: str,
|
|
@@ -686,6 +726,7 @@ class ClearMLTracker(GeneralTracker):
|
|
| 686 |
title, series = ClearMLTracker._get_title_series(table_name)
|
| 687 |
self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
|
| 688 |
@on_main_process
|
|
|
|
| 689 |
def finish(self):
|
| 690 |
"""
|
| 691 |
Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this
|
|
@@ -694,6 +735,7 @@ class ClearMLTracker(GeneralTracker):
|
|
| 694 |
if self.task and not self._initialized_externally:
|
| 695 |
self.task.close()
|
| 696 |
@staticmethod
|
|
|
|
| 697 |
def _get_title_series(name):
|
| 698 |
for prefix in ["eval", "test", "train"]:
|
| 699 |
if name.startswith(prefix + "_"):
|
|
@@ -717,14 +759,17 @@ class DVCLiveTracker(GeneralTracker):
|
|
| 717 |
name = "dvclive"
|
| 718 |
requires_logging_directory = False
|
| 719 |
@on_main_process
|
|
|
|
| 720 |
def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
|
| 721 |
from dvclive import Live
|
| 722 |
super().__init__()
|
| 723 |
self.live = live if live is not None else Live(**kwargs)
|
| 724 |
@property
|
|
|
|
| 725 |
def tracker(self):
|
| 726 |
return self.live
|
| 727 |
@on_main_process
|
|
|
|
| 728 |
def store_init_configuration(self, values: dict):
|
| 729 |
"""
|
| 730 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
|
|
@@ -736,6 +781,7 @@ class DVCLiveTracker(GeneralTracker):
|
|
| 736 |
"""
|
| 737 |
self.live.log_params(values)
|
| 738 |
@on_main_process
|
|
|
|
| 739 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 740 |
"""
|
| 741 |
Logs `values` to the current run.
|
|
@@ -761,6 +807,7 @@ class DVCLiveTracker(GeneralTracker):
|
|
| 761 |
"is incorrect so we dropped this attribute."
|
| 762 |
)
|
| 763 |
@on_main_process
|
|
|
|
| 764 |
def finish(self):
|
| 765 |
"""
|
| 766 |
Closes `dvclive.Live()`.
|
|
|
|
| 24 |
`PartialState`.
|
| 25 |
"""
|
| 26 |
@wraps(function)
|
| 27 |
+
|
| 28 |
def execute_on_main_process(self, *args, **kwargs):
|
| 29 |
if getattr(self, "main_process_only", False):
|
| 30 |
return PartialState().on_main_process(function)(self, *args, **kwargs)
|
|
|
|
| 47 |
other functions should occur on the main process or across all processes (by default will use `True`)
|
| 48 |
"""
|
| 49 |
main_process_only = True
|
| 50 |
+
|
| 51 |
def __init__(self, _blank=False):
|
| 52 |
if not _blank:
|
| 53 |
err = ""
|
|
|
|
| 68 |
f"required attributes. Please define them in the class definition: "
|
| 69 |
f"{err}"
|
| 70 |
)
|
| 71 |
+
|
| 72 |
def store_init_configuration(self, values: dict):
|
| 73 |
"""
|
| 74 |
Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
|
|
|
|
| 79 |
`str`, `float`, `int`, or `None`.
|
| 80 |
"""
|
| 81 |
pass
|
| 82 |
+
|
| 83 |
def log(self, values: dict, step: Optional[int], **kwargs):
|
| 84 |
"""
|
| 85 |
Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
|
|
|
|
| 91 |
The run step. If included, the log will be affiliated with this step.
|
| 92 |
"""
|
| 93 |
pass
|
| 94 |
+
|
| 95 |
def finish(self):
|
| 96 |
"""
|
| 97 |
Should run any finalizing functions within the tracking API. If the API should not have one, just don't
|
|
|
|
| 112 |
name = "tensorboard"
|
| 113 |
requires_logging_directory = True
|
| 114 |
@on_main_process
|
| 115 |
+
|
| 116 |
def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
|
| 117 |
try:
|
| 118 |
from torch.utils import tensorboard
|
|
|
|
| 127 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 128 |
)
|
| 129 |
@property
|
| 130 |
+
|
| 131 |
def tracker(self):
|
| 132 |
return self.writer
|
| 133 |
@on_main_process
|
| 134 |
+
|
| 135 |
def store_init_configuration(self, values: dict):
|
| 136 |
"""
|
| 137 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
|
|
|
|
| 154 |
raise
|
| 155 |
logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
|
| 156 |
@on_main_process
|
| 157 |
+
|
| 158 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 159 |
"""
|
| 160 |
Logs `values` to the current run.
|
|
|
|
| 179 |
self.writer.flush()
|
| 180 |
logger.debug("Successfully logged to TensorBoard")
|
| 181 |
@on_main_process
|
| 182 |
+
|
| 183 |
def log_images(self, values: dict, step: Optional[int], **kwargs):
|
| 184 |
"""
|
| 185 |
Logs `images` to the current run.
|
|
|
|
| 195 |
self.writer.add_images(k, v, global_step=step, **kwargs)
|
| 196 |
logger.debug("Successfully logged images to TensorBoard")
|
| 197 |
@on_main_process
|
| 198 |
+
|
| 199 |
def finish(self):
|
| 200 |
"""
|
| 201 |
Closes `TensorBoard` writer
|
|
|
|
| 215 |
requires_logging_directory = False
|
| 216 |
main_process_only = False
|
| 217 |
@on_main_process
|
| 218 |
+
|
| 219 |
def __init__(self, run_name: str, **kwargs):
|
| 220 |
super().__init__()
|
| 221 |
self.run_name = run_name
|
|
|
|
| 226 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 227 |
)
|
| 228 |
@property
|
| 229 |
+
|
| 230 |
def tracker(self):
|
| 231 |
return self.run
|
| 232 |
@on_main_process
|
| 233 |
+
|
| 234 |
def store_init_configuration(self, values: dict):
|
| 235 |
"""
|
| 236 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
|
|
| 243 |
wandb.config.update(values, allow_val_change=True)
|
| 244 |
logger.debug("Stored initial configuration hyperparameters to WandB")
|
| 245 |
@on_main_process
|
| 246 |
+
|
| 247 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 248 |
"""
|
| 249 |
Logs `values` to the current run.
|
|
|
|
| 259 |
self.run.log(values, step=step, **kwargs)
|
| 260 |
logger.debug("Successfully logged to WandB")
|
| 261 |
@on_main_process
|
| 262 |
+
|
| 263 |
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 264 |
"""
|
| 265 |
Logs `images` to the current run.
|
|
|
|
| 276 |
self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
|
| 277 |
logger.debug("Successfully logged images to WandB")
|
| 278 |
@on_main_process
|
| 279 |
+
|
| 280 |
def log_table(
|
| 281 |
self,
|
| 282 |
table_name: str,
|
|
|
|
| 305 |
values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
|
| 306 |
self.log(values, step=step, **kwargs)
|
| 307 |
@on_main_process
|
| 308 |
+
|
| 309 |
def finish(self):
|
| 310 |
"""
|
| 311 |
Closes `wandb` writer
|
|
|
|
| 325 |
name = "comet_ml"
|
| 326 |
requires_logging_directory = False
|
| 327 |
@on_main_process
|
| 328 |
+
|
| 329 |
def __init__(self, run_name: str, **kwargs):
|
| 330 |
super().__init__()
|
| 331 |
self.run_name = run_name
|
|
|
|
| 336 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 337 |
)
|
| 338 |
@property
|
| 339 |
+
|
| 340 |
def tracker(self):
|
| 341 |
return self.writer
|
| 342 |
@on_main_process
|
| 343 |
+
|
| 344 |
def store_init_configuration(self, values: dict):
|
| 345 |
"""
|
| 346 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
|
|
| 352 |
self.writer.log_parameters(values)
|
| 353 |
logger.debug("Stored initial configuration hyperparameters to CometML")
|
| 354 |
@on_main_process
|
| 355 |
+
|
| 356 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 357 |
"""
|
| 358 |
Logs `values` to the current run.
|
|
|
|
| 377 |
self.writer.log_metrics(v, step=step, **kwargs)
|
| 378 |
logger.debug("Successfully logged to CometML")
|
| 379 |
@on_main_process
|
| 380 |
+
|
| 381 |
def finish(self):
|
| 382 |
"""
|
| 383 |
Closes `comet-ml` writer
|
|
|
|
| 396 |
name = "aim"
|
| 397 |
requires_logging_directory = True
|
| 398 |
@on_main_process
|
| 399 |
+
|
| 400 |
def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
|
| 401 |
self.run_name = run_name
|
| 402 |
from aim import Run
|
|
|
|
| 407 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 408 |
)
|
| 409 |
@property
|
| 410 |
+
|
| 411 |
def tracker(self):
|
| 412 |
return self.writer
|
| 413 |
@on_main_process
|
| 414 |
+
|
| 415 |
def store_init_configuration(self, values: dict):
|
| 416 |
"""
|
| 417 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
|
|
| 421 |
"""
|
| 422 |
self.writer["hparams"] = values
|
| 423 |
@on_main_process
|
| 424 |
+
|
| 425 |
def log(self, values: dict, step: Optional[int], **kwargs):
|
| 426 |
"""
|
| 427 |
Logs `values` to the current run.
|
|
|
|
| 437 |
for key, value in values.items():
|
| 438 |
self.writer.track(value, name=key, step=step, **kwargs)
|
| 439 |
@on_main_process
|
| 440 |
+
|
| 441 |
def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):
|
| 442 |
"""
|
| 443 |
Logs `images` to the current run.
|
|
|
|
| 465 |
aim_image = aim.Image(img, caption=caption, **aim_image_kw)
|
| 466 |
self.writer.track(aim_image, name=key, step=step, **track_kw)
|
| 467 |
@on_main_process
|
| 468 |
+
|
| 469 |
def finish(self):
|
| 470 |
"""
|
| 471 |
Closes `aim` writer
|
|
|
|
| 500 |
name = "mlflow"
|
| 501 |
requires_logging_directory = False
|
| 502 |
@on_main_process
|
| 503 |
+
|
| 504 |
def __init__(
|
| 505 |
self,
|
| 506 |
experiment_name: str = None,
|
|
|
|
| 542 |
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
|
| 543 |
)
|
| 544 |
@property
|
| 545 |
+
|
| 546 |
def tracker(self):
|
| 547 |
return self.active_run
|
| 548 |
@on_main_process
|
| 549 |
+
|
| 550 |
def store_init_configuration(self, values: dict):
|
| 551 |
"""
|
| 552 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
|
|
|
| 569 |
mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
|
| 570 |
logger.debug("Stored initial configuration hyperparameters to MLflow")
|
| 571 |
@on_main_process
|
| 572 |
+
|
| 573 |
def log(self, values: dict, step: Optional[int]):
|
| 574 |
"""
|
| 575 |
Logs `values` to the current run.
|
|
|
|
| 592 |
mlflow.log_metrics(metrics, step=step)
|
| 593 |
logger.debug("Successfully logged to mlflow")
|
| 594 |
@on_main_process
|
| 595 |
+
|
| 596 |
def finish(self):
|
| 597 |
"""
|
| 598 |
End the active MLflow run.
|
|
|
|
| 612 |
name = "clearml"
|
| 613 |
requires_logging_directory = False
|
| 614 |
@on_main_process
|
| 615 |
+
|
| 616 |
def __init__(self, run_name: str = None, **kwargs):
|
| 617 |
from clearml import Task
|
| 618 |
current_task = Task.current_task()
|
|
|
|
| 625 |
kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
|
| 626 |
self.task = Task.init(**kwargs)
|
| 627 |
@property
|
| 628 |
+
|
| 629 |
def tracker(self):
|
| 630 |
return self.task
|
| 631 |
@on_main_process
|
| 632 |
+
|
| 633 |
def store_init_configuration(self, values: dict):
|
| 634 |
"""
|
| 635 |
Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
|
|
|
|
| 639 |
"""
|
| 640 |
return self.task.connect_configuration(values)
|
| 641 |
@on_main_process
|
| 642 |
+
|
| 643 |
def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
|
| 644 |
"""
|
| 645 |
Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
|
|
|
|
| 672 |
title, series = ClearMLTracker._get_title_series(k)
|
| 673 |
clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
|
| 674 |
@on_main_process
|
| 675 |
+
|
| 676 |
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 677 |
"""
|
| 678 |
Logs `images` to the current run.
|
|
|
|
| 689 |
title, series = ClearMLTracker._get_title_series(k)
|
| 690 |
clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
|
| 691 |
@on_main_process
|
| 692 |
+
|
| 693 |
def log_table(
|
| 694 |
self,
|
| 695 |
table_name: str,
|
|
|
|
| 726 |
title, series = ClearMLTracker._get_title_series(table_name)
|
| 727 |
self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
|
| 728 |
@on_main_process
|
| 729 |
+
|
| 730 |
def finish(self):
|
| 731 |
"""
|
| 732 |
Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this
|
|
|
|
| 735 |
if self.task and not self._initialized_externally:
|
| 736 |
self.task.close()
|
| 737 |
@staticmethod
|
| 738 |
+
|
| 739 |
def _get_title_series(name):
|
| 740 |
for prefix in ["eval", "test", "train"]:
|
| 741 |
if name.startswith(prefix + "_"):
|
|
|
|
| 759 |
name = "dvclive"
|
| 760 |
requires_logging_directory = False
|
| 761 |
@on_main_process
|
| 762 |
+
|
| 763 |
def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
|
| 764 |
from dvclive import Live
|
| 765 |
super().__init__()
|
| 766 |
self.live = live if live is not None else Live(**kwargs)
|
| 767 |
@property
|
| 768 |
+
|
| 769 |
def tracker(self):
|
| 770 |
return self.live
|
| 771 |
@on_main_process
|
| 772 |
+
|
| 773 |
def store_init_configuration(self, values: dict):
|
| 774 |
"""
|
| 775 |
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
|
|
|
|
| 781 |
"""
|
| 782 |
self.live.log_params(values)
|
| 783 |
@on_main_process
|
| 784 |
+
|
| 785 |
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
| 786 |
"""
|
| 787 |
Logs `values` to the current run.
|
|
|
|
| 807 |
"is incorrect so we dropped this attribute."
|
| 808 |
)
|
| 809 |
@on_main_process
|
| 810 |
+
|
| 811 |
def finish(self):
|
| 812 |
"""
|
| 813 |
Closes `dvclive.Live()`.
|
src/utils/dataclasses.py
CHANGED
|
@@ -5,8 +5,10 @@ class KwargsHandler:
|
|
| 5 |
"""
|
| 6 |
Internal mixin that implements a `to_kwargs()` method for a dataclass.
|
| 7 |
"""
|
|
|
|
| 8 |
def to_dict(self):
|
| 9 |
return copy.deepcopy(self.__dict__)
|
|
|
|
| 10 |
def to_kwargs(self):
|
| 11 |
"""
|
| 12 |
Returns a dictionary containing the attributes with values different from the default of this class.
|
|
@@ -159,6 +161,7 @@ class FP8RecipeKwargs(KwargsHandler):
|
|
| 159 |
amax_history_len: int = 1
|
| 160 |
amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
|
| 161 |
override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
|
|
|
|
| 162 |
def __post_init__(self):
|
| 163 |
self.backend = self.backend.upper()
|
| 164 |
if self.backend not in get_args(Backend):
|
|
@@ -175,6 +178,7 @@ class FP8RecipeKwargs(KwargsHandler):
|
|
| 175 |
raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
|
| 176 |
class EnumWithContains(enum.EnumMeta):
|
| 177 |
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
|
|
|
|
| 178 |
def __contains__(cls, item):
|
| 179 |
try:
|
| 180 |
cls(item)
|
|
@@ -183,9 +187,11 @@ class EnumWithContains(enum.EnumMeta):
|
|
| 183 |
return True
|
| 184 |
class BaseEnum(enum.Enum, metaclass=EnumWithContains):
|
| 185 |
"An enum class that can get the value of an item with `str(Enum.key)`"
|
|
|
|
| 186 |
def __str__(self):
|
| 187 |
return self.value
|
| 188 |
@classmethod
|
|
|
|
| 189 |
def list(cls):
|
| 190 |
"Method to list all the possible items in `cls`"
|
| 191 |
return list(map(str, cls))
|
|
@@ -354,11 +360,13 @@ class ProjectConfiguration:
|
|
| 354 |
)
|
| 355 |
},
|
| 356 |
)
|
|
|
|
| 357 |
def set_directories(self, project_dir: str = None):
|
| 358 |
"Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
|
| 359 |
self.project_dir = project_dir
|
| 360 |
if self.logging_dir is None:
|
| 361 |
self.logging_dir = project_dir
|
|
|
|
| 362 |
def __post_init__(self):
|
| 363 |
self.set_directories(self.project_dir)
|
| 364 |
@dataclass
|
|
@@ -395,6 +403,7 @@ class TorchDynamoPlugin(KwargsHandler):
|
|
| 395 |
dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
|
| 396 |
options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
|
| 397 |
disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
|
|
|
|
| 398 |
def __post_init__(self):
|
| 399 |
prefix = "ACCELERATE_DYNAMO_"
|
| 400 |
if self.backend is None:
|
|
@@ -406,6 +415,7 @@ class TorchDynamoPlugin(KwargsHandler):
|
|
| 406 |
self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
|
| 407 |
if self.dynamic is None:
|
| 408 |
self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
|
|
|
|
| 409 |
def to_dict(self):
|
| 410 |
dynamo_config = copy.deepcopy(self.__dict__)
|
| 411 |
dynamo_config["backend"] = dynamo_config["backend"].value.lower()
|
|
@@ -463,6 +473,7 @@ class DeepSpeedPlugin:
|
|
| 463 |
default=None,
|
| 464 |
metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
|
| 465 |
)
|
|
|
|
| 466 |
def __post_init__(self):
|
| 467 |
from .deepspeed import HfDeepSpeedConfig
|
| 468 |
if self.gradient_accumulation_steps is None:
|
|
@@ -554,6 +565,7 @@ class DeepSpeedPlugin:
|
|
| 554 |
if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
|
| 555 |
warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
|
| 556 |
self.zero3_init_flag = False
|
|
|
|
| 557 |
def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
|
| 558 |
mismatches = [] if mismatches is None else mismatches
|
| 559 |
config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
|
|
@@ -575,6 +587,7 @@ class DeepSpeedPlugin:
|
|
| 575 |
if ds_val is not None and ds_key_long in kwargs:
|
| 576 |
if ds_val != kwargs[ds_key_long]:
|
| 577 |
mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
|
|
|
|
| 578 |
def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
|
| 579 |
"""Process the DeepSpeed config with the values from the kwargs."""
|
| 580 |
mismatches = [] if mismatches is None else mismatches
|
|
@@ -593,6 +606,7 @@ class DeepSpeedPlugin:
|
|
| 593 |
"Please correct the following DeepSpeed config values that mismatch kwargs "
|
| 594 |
f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
|
| 595 |
)
|
|
|
|
| 596 |
def set_mixed_precision(self, mixed_precision):
|
| 597 |
ds_config = self.deepspeed_config
|
| 598 |
kwargs = {
|
|
@@ -616,6 +630,7 @@ class DeepSpeedPlugin:
|
|
| 616 |
ds_config[dtype] = {"enabled": False}
|
| 617 |
self.fill_match("fp16.enabled", must_match=False, **kwargs)
|
| 618 |
self.fill_match("bf16.enabled", must_match=False, **kwargs)
|
|
|
|
| 619 |
def set_deepspeed_weakref(self):
|
| 620 |
from .imports import is_transformers_available
|
| 621 |
if self.zero3_init_flag:
|
|
@@ -639,9 +654,11 @@ class DeepSpeedPlugin:
|
|
| 639 |
else:
|
| 640 |
from transformers.integrations import HfDeepSpeedConfig
|
| 641 |
self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
|
|
|
|
| 642 |
def is_zero3_init_enabled(self):
|
| 643 |
return self.zero3_init_flag
|
| 644 |
@contextmanager
|
|
|
|
| 645 |
def zero3_init_context_manager(self, enable=False):
|
| 646 |
old = self.zero3_init_flag
|
| 647 |
if old == enable:
|
|
@@ -654,6 +671,7 @@ class DeepSpeedPlugin:
|
|
| 654 |
self.zero3_init_flag = old
|
| 655 |
self.dschf = None
|
| 656 |
self.set_deepspeed_weakref()
|
|
|
|
| 657 |
def _deepspeed_config_checks(self):
|
| 658 |
env_variable_names_to_ignore = [
|
| 659 |
"ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
|
|
@@ -784,6 +802,7 @@ class FullyShardedDataParallelPlugin:
|
|
| 784 |
"for reduced memory usage."
|
| 785 |
},
|
| 786 |
)
|
|
|
|
| 787 |
def __post_init__(self):
|
| 788 |
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
|
| 789 |
prefix = "FSDP_"
|
|
@@ -824,6 +843,7 @@ class FullyShardedDataParallelPlugin:
|
|
| 824 |
)
|
| 825 |
self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
|
| 826 |
@staticmethod
|
|
|
|
| 827 |
def get_module_class_from_name(module, name):
|
| 828 |
"""
|
| 829 |
Gets a class from a module by its name.
|
|
@@ -841,6 +861,7 @@ class FullyShardedDataParallelPlugin:
|
|
| 841 |
module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
|
| 842 |
if module_class is not None:
|
| 843 |
return module_class
|
|
|
|
| 844 |
def set_auto_wrap_policy(self, model):
|
| 845 |
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
|
| 846 |
default_transformer_cls_names_to_wrap = (
|
|
@@ -870,6 +891,7 @@ class FullyShardedDataParallelPlugin:
|
|
| 870 |
self.auto_wrap_policy = functools.partial(
|
| 871 |
size_based_auto_wrap_policy, min_num_params=min_num_params
|
| 872 |
)
|
|
|
|
| 873 |
def set_mixed_precision(self, mixed_precision):
|
| 874 |
if mixed_precision == "fp16":
|
| 875 |
dtype = torch.float16
|
|
@@ -880,6 +902,7 @@ class FullyShardedDataParallelPlugin:
|
|
| 880 |
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
|
| 881 |
if self.mixed_precision_policy is None:
|
| 882 |
self.mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
|
|
|
|
| 883 |
def set_state_dict_type(self, state_dict_type_policy):
|
| 884 |
from torch.distributed.fsdp.fully_sharded_data_parallel import (
|
| 885 |
FullOptimStateDictConfig,
|
|
@@ -1047,6 +1070,7 @@ class MegatronLMPlugin:
|
|
| 1047 |
default=None,
|
| 1048 |
metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
|
| 1049 |
)
|
|
|
|
| 1050 |
def __post_init__(self):
|
| 1051 |
prefix = "MEGATRON_LM_"
|
| 1052 |
if self.tp_degree is None:
|
|
@@ -1100,6 +1124,7 @@ class MegatronLMPlugin:
|
|
| 1100 |
self.set_tensorboard_logging_options()
|
| 1101 |
if self.other_megatron_args is not None:
|
| 1102 |
self.megatron_lm_default_args.update(self.other_megatron_args)
|
|
|
|
| 1103 |
def set_network_size_args(self, model, batch_data=None):
|
| 1104 |
# Check if the model is either BERT, GPT or T5 else raise error
|
| 1105 |
# set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
|
|
@@ -1180,6 +1205,7 @@ class MegatronLMPlugin:
|
|
| 1180 |
self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
|
| 1181 |
if model_type_name == "bert":
|
| 1182 |
self.megatron_lm_default_args["num_labels"] = num_labels
|
|
|
|
| 1183 |
def set_mixed_precision(self, mixed_precision):
|
| 1184 |
if mixed_precision == "fp16":
|
| 1185 |
self.megatron_lm_default_args["fp16"] = True
|
|
@@ -1187,6 +1213,7 @@ class MegatronLMPlugin:
|
|
| 1187 |
self.megatron_lm_default_args["bf16"] = True
|
| 1188 |
self.DDP_impl = "local"
|
| 1189 |
self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
|
|
|
|
| 1190 |
def set_training_args(self, micro_batch_size, dp_degree):
|
| 1191 |
self.data_parallel_size = dp_degree
|
| 1192 |
self.micro_batch_size = micro_batch_size
|
|
@@ -1194,6 +1221,7 @@ class MegatronLMPlugin:
|
|
| 1194 |
self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
|
| 1195 |
self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
|
| 1196 |
self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
|
|
|
|
| 1197 |
def set_optimizer_type(self, optimizer):
|
| 1198 |
optimizer_name = optimizer.__class__.__name__.lower()
|
| 1199 |
if "adam" in optimizer_name:
|
|
@@ -1208,6 +1236,7 @@ class MegatronLMPlugin:
|
|
| 1208 |
raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
|
| 1209 |
self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
|
| 1210 |
self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
|
|
|
|
| 1211 |
def set_scheduler_args(self, scheduler):
|
| 1212 |
if self.train_iters is None:
|
| 1213 |
self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
|
|
@@ -1235,6 +1264,7 @@ class MegatronLMPlugin:
|
|
| 1235 |
self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
|
| 1236 |
self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
|
| 1237 |
self.megatron_lm_default_args["min_lr"] = self.min_lr
|
|
|
|
| 1238 |
def set_tensorboard_logging_options(self):
|
| 1239 |
from megatron.arguments import _add_logging_args
|
| 1240 |
parser = argparse.ArgumentParser()
|
|
@@ -1292,6 +1322,7 @@ class BnbQuantizationConfig:
|
|
| 1292 |
default=None,
|
| 1293 |
metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
|
| 1294 |
)
|
|
|
|
| 1295 |
def __post_init__(self):
|
| 1296 |
"""
|
| 1297 |
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
|
|
|
|
| 5 |
"""
|
| 6 |
Internal mixin that implements a `to_kwargs()` method for a dataclass.
|
| 7 |
"""
|
| 8 |
+
|
| 9 |
def to_dict(self):
|
| 10 |
return copy.deepcopy(self.__dict__)
|
| 11 |
+
|
| 12 |
def to_kwargs(self):
|
| 13 |
"""
|
| 14 |
Returns a dictionary containing the attributes with values different from the default of this class.
|
|
|
|
| 161 |
amax_history_len: int = 1
|
| 162 |
amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
|
| 163 |
override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
|
| 164 |
+
|
| 165 |
def __post_init__(self):
|
| 166 |
self.backend = self.backend.upper()
|
| 167 |
if self.backend not in get_args(Backend):
|
|
|
|
| 178 |
raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
|
| 179 |
class EnumWithContains(enum.EnumMeta):
|
| 180 |
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
|
| 181 |
+
|
| 182 |
def __contains__(cls, item):
|
| 183 |
try:
|
| 184 |
cls(item)
|
|
|
|
| 187 |
return True
|
| 188 |
class BaseEnum(enum.Enum, metaclass=EnumWithContains):
|
| 189 |
"An enum class that can get the value of an item with `str(Enum.key)`"
|
| 190 |
+
|
| 191 |
def __str__(self):
|
| 192 |
return self.value
|
| 193 |
@classmethod
|
| 194 |
+
|
| 195 |
def list(cls):
|
| 196 |
"Method to list all the possible items in `cls`"
|
| 197 |
return list(map(str, cls))
|
|
|
|
| 360 |
)
|
| 361 |
},
|
| 362 |
)
|
| 363 |
+
|
| 364 |
def set_directories(self, project_dir: str = None):
|
| 365 |
"Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
|
| 366 |
self.project_dir = project_dir
|
| 367 |
if self.logging_dir is None:
|
| 368 |
self.logging_dir = project_dir
|
| 369 |
+
|
| 370 |
def __post_init__(self):
|
| 371 |
self.set_directories(self.project_dir)
|
| 372 |
@dataclass
|
|
|
|
| 403 |
dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
|
| 404 |
options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
|
| 405 |
disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
|
| 406 |
+
|
| 407 |
def __post_init__(self):
|
| 408 |
prefix = "ACCELERATE_DYNAMO_"
|
| 409 |
if self.backend is None:
|
|
|
|
| 415 |
self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
|
| 416 |
if self.dynamic is None:
|
| 417 |
self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
|
| 418 |
+
|
| 419 |
def to_dict(self):
|
| 420 |
dynamo_config = copy.deepcopy(self.__dict__)
|
| 421 |
dynamo_config["backend"] = dynamo_config["backend"].value.lower()
|
|
|
|
| 473 |
default=None,
|
| 474 |
metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
|
| 475 |
)
|
| 476 |
+
|
| 477 |
def __post_init__(self):
|
| 478 |
from .deepspeed import HfDeepSpeedConfig
|
| 479 |
if self.gradient_accumulation_steps is None:
|
|
|
|
| 565 |
if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
|
| 566 |
warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
|
| 567 |
self.zero3_init_flag = False
|
| 568 |
+
|
| 569 |
def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
|
| 570 |
mismatches = [] if mismatches is None else mismatches
|
| 571 |
config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
|
|
|
|
| 587 |
if ds_val is not None and ds_key_long in kwargs:
|
| 588 |
if ds_val != kwargs[ds_key_long]:
|
| 589 |
mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
|
| 590 |
+
|
| 591 |
def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
|
| 592 |
"""Process the DeepSpeed config with the values from the kwargs."""
|
| 593 |
mismatches = [] if mismatches is None else mismatches
|
|
|
|
| 606 |
"Please correct the following DeepSpeed config values that mismatch kwargs "
|
| 607 |
f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
|
| 608 |
)
|
| 609 |
+
|
| 610 |
def set_mixed_precision(self, mixed_precision):
|
| 611 |
ds_config = self.deepspeed_config
|
| 612 |
kwargs = {
|
|
|
|
| 630 |
ds_config[dtype] = {"enabled": False}
|
| 631 |
self.fill_match("fp16.enabled", must_match=False, **kwargs)
|
| 632 |
self.fill_match("bf16.enabled", must_match=False, **kwargs)
|
| 633 |
+
|
| 634 |
def set_deepspeed_weakref(self):
|
| 635 |
from .imports import is_transformers_available
|
| 636 |
if self.zero3_init_flag:
|
|
|
|
| 654 |
else:
|
| 655 |
from transformers.integrations import HfDeepSpeedConfig
|
| 656 |
self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
|
| 657 |
+
|
| 658 |
def is_zero3_init_enabled(self):
|
| 659 |
return self.zero3_init_flag
|
| 660 |
@contextmanager
|
| 661 |
+
|
| 662 |
def zero3_init_context_manager(self, enable=False):
|
| 663 |
old = self.zero3_init_flag
|
| 664 |
if old == enable:
|
|
|
|
| 671 |
self.zero3_init_flag = old
|
| 672 |
self.dschf = None
|
| 673 |
self.set_deepspeed_weakref()
|
| 674 |
+
|
| 675 |
def _deepspeed_config_checks(self):
|
| 676 |
env_variable_names_to_ignore = [
|
| 677 |
"ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
|
|
|
|
| 802 |
"for reduced memory usage."
|
| 803 |
},
|
| 804 |
)
|
| 805 |
+
|
| 806 |
def __post_init__(self):
|
| 807 |
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
|
| 808 |
prefix = "FSDP_"
|
|
|
|
| 843 |
)
|
| 844 |
self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
|
| 845 |
@staticmethod
|
| 846 |
+
|
| 847 |
def get_module_class_from_name(module, name):
|
| 848 |
"""
|
| 849 |
Gets a class from a module by its name.
|
|
|
|
| 861 |
module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
|
| 862 |
if module_class is not None:
|
| 863 |
return module_class
|
| 864 |
+
|
| 865 |
def set_auto_wrap_policy(self, model):
|
| 866 |
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
|
| 867 |
default_transformer_cls_names_to_wrap = (
|
|
|
|
| 891 |
self.auto_wrap_policy = functools.partial(
|
| 892 |
size_based_auto_wrap_policy, min_num_params=min_num_params
|
| 893 |
)
|
| 894 |
+
|
| 895 |
def set_mixed_precision(self, mixed_precision):
|
| 896 |
if mixed_precision == "fp16":
|
| 897 |
dtype = torch.float16
|
|
|
|
| 902 |
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
|
| 903 |
if self.mixed_precision_policy is None:
|
| 904 |
self.mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
|
| 905 |
+
|
| 906 |
def set_state_dict_type(self, state_dict_type_policy):
|
| 907 |
from torch.distributed.fsdp.fully_sharded_data_parallel import (
|
| 908 |
FullOptimStateDictConfig,
|
|
|
|
| 1070 |
default=None,
|
| 1071 |
metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
|
| 1072 |
)
|
| 1073 |
+
|
| 1074 |
def __post_init__(self):
|
| 1075 |
prefix = "MEGATRON_LM_"
|
| 1076 |
if self.tp_degree is None:
|
|
|
|
| 1124 |
self.set_tensorboard_logging_options()
|
| 1125 |
if self.other_megatron_args is not None:
|
| 1126 |
self.megatron_lm_default_args.update(self.other_megatron_args)
|
| 1127 |
+
|
| 1128 |
def set_network_size_args(self, model, batch_data=None):
|
| 1129 |
# Check if the model is either BERT, GPT or T5 else raise error
|
| 1130 |
# set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
|
|
|
|
| 1205 |
self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
|
| 1206 |
if model_type_name == "bert":
|
| 1207 |
self.megatron_lm_default_args["num_labels"] = num_labels
|
| 1208 |
+
|
| 1209 |
def set_mixed_precision(self, mixed_precision):
|
| 1210 |
if mixed_precision == "fp16":
|
| 1211 |
self.megatron_lm_default_args["fp16"] = True
|
|
|
|
| 1213 |
self.megatron_lm_default_args["bf16"] = True
|
| 1214 |
self.DDP_impl = "local"
|
| 1215 |
self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
|
| 1216 |
+
|
| 1217 |
def set_training_args(self, micro_batch_size, dp_degree):
|
| 1218 |
self.data_parallel_size = dp_degree
|
| 1219 |
self.micro_batch_size = micro_batch_size
|
|
|
|
| 1221 |
self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
|
| 1222 |
self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
|
| 1223 |
self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
|
| 1224 |
+
|
| 1225 |
def set_optimizer_type(self, optimizer):
|
| 1226 |
optimizer_name = optimizer.__class__.__name__.lower()
|
| 1227 |
if "adam" in optimizer_name:
|
|
|
|
| 1236 |
raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
|
| 1237 |
self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
|
| 1238 |
self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
|
| 1239 |
+
|
| 1240 |
def set_scheduler_args(self, scheduler):
|
| 1241 |
if self.train_iters is None:
|
| 1242 |
self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
|
|
|
|
| 1264 |
self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
|
| 1265 |
self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
|
| 1266 |
self.megatron_lm_default_args["min_lr"] = self.min_lr
|
| 1267 |
+
|
| 1268 |
def set_tensorboard_logging_options(self):
|
| 1269 |
from megatron.arguments import _add_logging_args
|
| 1270 |
parser = argparse.ArgumentParser()
|
|
|
|
| 1322 |
default=None,
|
| 1323 |
metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
|
| 1324 |
)
|
| 1325 |
+
|
| 1326 |
def __post_init__(self):
|
| 1327 |
"""
|
| 1328 |
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
|
src/utils/deepspeed.py
CHANGED
|
@@ -10,6 +10,7 @@ class HfDeepSpeedConfig:
|
|
| 10 |
Args:
|
| 11 |
config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
|
| 12 |
"""
|
|
|
|
| 13 |
def __init__(self, config_file_or_dict):
|
| 14 |
if isinstance(config_file_or_dict, dict):
|
| 15 |
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
|
|
@@ -28,6 +29,7 @@ class HfDeepSpeedConfig:
|
|
| 28 |
)
|
| 29 |
self.config = config
|
| 30 |
self.set_stage_and_offload()
|
|
|
|
| 31 |
def set_stage_and_offload(self):
|
| 32 |
# zero stage - this is done as early as possible, before model is created, to allow
|
| 33 |
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
|
|
@@ -45,6 +47,7 @@ class HfDeepSpeedConfig:
|
|
| 45 |
)
|
| 46 |
if len(offload_devices & offload_devices_valid) > 0:
|
| 47 |
self._offload = True
|
|
|
|
| 48 |
def find_config_node(self, ds_key_long):
|
| 49 |
config = self.config
|
| 50 |
# find the config node of interest if it exists
|
|
@@ -55,6 +58,7 @@ class HfDeepSpeedConfig:
|
|
| 55 |
if config is None:
|
| 56 |
return None, ds_key
|
| 57 |
return config, ds_key
|
|
|
|
| 58 |
def get_value(self, ds_key_long, default=None):
|
| 59 |
"""
|
| 60 |
Returns the set value or `default` if no value is set
|
|
@@ -63,6 +67,7 @@ class HfDeepSpeedConfig:
|
|
| 63 |
if config is None:
|
| 64 |
return default
|
| 65 |
return config.get(ds_key, default)
|
|
|
|
| 66 |
def del_config_sub_tree(self, ds_key_long, must_exist=False):
|
| 67 |
"""
|
| 68 |
Deletes a sub-section of the config file if it's found.
|
|
@@ -82,6 +87,7 @@ class HfDeepSpeedConfig:
|
|
| 82 |
# if found remove it
|
| 83 |
if parent_config is not None:
|
| 84 |
parent_config.pop(node)
|
|
|
|
| 85 |
def is_true(self, ds_key_long):
|
| 86 |
"""
|
| 87 |
Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
|
|
@@ -89,6 +95,7 @@ class HfDeepSpeedConfig:
|
|
| 89 |
"""
|
| 90 |
value = self.get_value(ds_key_long)
|
| 91 |
return False if value is None else bool(value)
|
|
|
|
| 92 |
def is_false(self, ds_key_long):
|
| 93 |
"""
|
| 94 |
Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
|
|
@@ -96,10 +103,13 @@ class HfDeepSpeedConfig:
|
|
| 96 |
"""
|
| 97 |
value = self.get_value(ds_key_long)
|
| 98 |
return False if value is None else not bool(value)
|
|
|
|
| 99 |
def is_zero2(self):
|
| 100 |
return self._stage == 2
|
|
|
|
| 101 |
def is_zero3(self):
|
| 102 |
return self._stage == 3
|
|
|
|
| 103 |
def is_offload(self):
|
| 104 |
return self._offload
|
| 105 |
class DeepSpeedEngineWrapper:
|
|
@@ -108,8 +118,10 @@ class DeepSpeedEngineWrapper:
|
|
| 108 |
Args:
|
| 109 |
engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap
|
| 110 |
"""
|
|
|
|
| 111 |
def __init__(self, engine):
|
| 112 |
self.engine = engine
|
|
|
|
| 113 |
def backward(self, loss, **kwargs):
|
| 114 |
# runs backpropagation and handles mixed precision
|
| 115 |
self.engine.backward(loss, **kwargs)
|
|
@@ -131,14 +143,18 @@ class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):
|
|
| 131 |
optimizer (`torch.optim.optimizer.Optimizer`):
|
| 132 |
The optimizer to wrap.
|
| 133 |
"""
|
|
|
|
| 134 |
def __init__(self, optimizer):
|
| 135 |
super().__init__(optimizer, device_placement=False, scaler=None)
|
| 136 |
self.__has_overflow__ = hasattr(self.optimizer, "overflow")
|
|
|
|
| 137 |
def zero_grad(self, set_to_none=None):
|
| 138 |
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
|
|
|
| 139 |
def step(self):
|
| 140 |
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
| 141 |
@property
|
|
|
|
| 142 |
def step_was_skipped(self):
|
| 143 |
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
|
| 144 |
if self.__has_overflow__:
|
|
@@ -152,8 +168,10 @@ class DeepSpeedSchedulerWrapper(AcceleratedScheduler):
|
|
| 152 |
The scheduler to wrap.
|
| 153 |
optimizers (one or a list of `torch.optim.Optimizer`):
|
| 154 |
"""
|
|
|
|
| 155 |
def __init__(self, scheduler, optimizers):
|
| 156 |
super().__init__(scheduler, optimizers)
|
|
|
|
| 157 |
def step(self):
|
| 158 |
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
| 159 |
class DummyOptim:
|
|
@@ -170,6 +188,7 @@ class DummyOptim:
|
|
| 170 |
**kwargs:
|
| 171 |
Other arguments.
|
| 172 |
"""
|
|
|
|
| 173 |
def __init__(self, params, lr=0.001, weight_decay=0, **kwargs):
|
| 174 |
self.params = params
|
| 175 |
self.lr = lr
|
|
@@ -191,6 +210,7 @@ class DummyScheduler:
|
|
| 191 |
**kwargs:
|
| 192 |
Other arguments.
|
| 193 |
"""
|
|
|
|
| 194 |
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs):
|
| 195 |
self.optimizer = optimizer
|
| 196 |
self.total_num_steps = total_num_steps
|
|
|
|
| 10 |
Args:
|
| 11 |
config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
|
| 12 |
"""
|
| 13 |
+
|
| 14 |
def __init__(self, config_file_or_dict):
|
| 15 |
if isinstance(config_file_or_dict, dict):
|
| 16 |
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
|
|
|
|
| 29 |
)
|
| 30 |
self.config = config
|
| 31 |
self.set_stage_and_offload()
|
| 32 |
+
|
| 33 |
def set_stage_and_offload(self):
|
| 34 |
# zero stage - this is done as early as possible, before model is created, to allow
|
| 35 |
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
|
|
|
|
| 47 |
)
|
| 48 |
if len(offload_devices & offload_devices_valid) > 0:
|
| 49 |
self._offload = True
|
| 50 |
+
|
| 51 |
def find_config_node(self, ds_key_long):
|
| 52 |
config = self.config
|
| 53 |
# find the config node of interest if it exists
|
|
|
|
| 58 |
if config is None:
|
| 59 |
return None, ds_key
|
| 60 |
return config, ds_key
|
| 61 |
+
|
| 62 |
def get_value(self, ds_key_long, default=None):
|
| 63 |
"""
|
| 64 |
Returns the set value or `default` if no value is set
|
|
|
|
| 67 |
if config is None:
|
| 68 |
return default
|
| 69 |
return config.get(ds_key, default)
|
| 70 |
+
|
| 71 |
def del_config_sub_tree(self, ds_key_long, must_exist=False):
|
| 72 |
"""
|
| 73 |
Deletes a sub-section of the config file if it's found.
|
|
|
|
| 87 |
# if found remove it
|
| 88 |
if parent_config is not None:
|
| 89 |
parent_config.pop(node)
|
| 90 |
+
|
| 91 |
def is_true(self, ds_key_long):
|
| 92 |
"""
|
| 93 |
Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
|
|
|
|
| 95 |
"""
|
| 96 |
value = self.get_value(ds_key_long)
|
| 97 |
return False if value is None else bool(value)
|
| 98 |
+
|
| 99 |
def is_false(self, ds_key_long):
|
| 100 |
"""
|
| 101 |
Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
|
|
|
|
| 103 |
"""
|
| 104 |
value = self.get_value(ds_key_long)
|
| 105 |
return False if value is None else not bool(value)
|
| 106 |
+
|
| 107 |
def is_zero2(self):
|
| 108 |
return self._stage == 2
|
| 109 |
+
|
| 110 |
def is_zero3(self):
|
| 111 |
return self._stage == 3
|
| 112 |
+
|
| 113 |
def is_offload(self):
|
| 114 |
return self._offload
|
| 115 |
class DeepSpeedEngineWrapper:
|
|
|
|
| 118 |
Args:
|
| 119 |
engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap
|
| 120 |
"""
|
| 121 |
+
|
| 122 |
def __init__(self, engine):
|
| 123 |
self.engine = engine
|
| 124 |
+
|
| 125 |
def backward(self, loss, **kwargs):
|
| 126 |
# runs backpropagation and handles mixed precision
|
| 127 |
self.engine.backward(loss, **kwargs)
|
|
|
|
| 143 |
optimizer (`torch.optim.optimizer.Optimizer`):
|
| 144 |
The optimizer to wrap.
|
| 145 |
"""
|
| 146 |
+
|
| 147 |
def __init__(self, optimizer):
|
| 148 |
super().__init__(optimizer, device_placement=False, scaler=None)
|
| 149 |
self.__has_overflow__ = hasattr(self.optimizer, "overflow")
|
| 150 |
+
|
| 151 |
def zero_grad(self, set_to_none=None):
|
| 152 |
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
| 153 |
+
|
| 154 |
def step(self):
|
| 155 |
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
| 156 |
@property
|
| 157 |
+
|
| 158 |
def step_was_skipped(self):
|
| 159 |
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
|
| 160 |
if self.__has_overflow__:
|
|
|
|
| 168 |
The scheduler to wrap.
|
| 169 |
optimizers (one or a list of `torch.optim.Optimizer`):
|
| 170 |
"""
|
| 171 |
+
|
| 172 |
def __init__(self, scheduler, optimizers):
|
| 173 |
super().__init__(scheduler, optimizers)
|
| 174 |
+
|
| 175 |
def step(self):
|
| 176 |
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
| 177 |
class DummyOptim:
|
|
|
|
| 188 |
**kwargs:
|
| 189 |
Other arguments.
|
| 190 |
"""
|
| 191 |
+
|
| 192 |
def __init__(self, params, lr=0.001, weight_decay=0, **kwargs):
|
| 193 |
self.params = params
|
| 194 |
self.lr = lr
|
|
|
|
| 210 |
**kwargs:
|
| 211 |
Other arguments.
|
| 212 |
"""
|
| 213 |
+
|
| 214 |
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs):
|
| 215 |
self.optimizer = optimizer
|
| 216 |
self.total_num_steps = total_num_steps
|
src/utils/imports.py
CHANGED
|
@@ -149,6 +149,7 @@ def is_mlflow_available():
|
|
| 149 |
def is_mps_available():
|
| 150 |
return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built()
|
| 151 |
def is_ipex_available():
|
|
|
|
| 152 |
def get_major_and_minor_from_version(full_version):
|
| 153 |
return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
|
| 154 |
_torch_version = importlib.metadata.version("torch")
|
|
|
|
| 149 |
def is_mps_available():
|
| 150 |
return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built()
|
| 151 |
def is_ipex_available():
|
| 152 |
+
|
| 153 |
def get_major_and_minor_from_version(full_version):
|
| 154 |
return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
|
| 155 |
_torch_version = importlib.metadata.version("torch")
|
src/utils/launch.py
CHANGED
|
@@ -297,6 +297,7 @@ def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
|
|
| 297 |
if len(nargs) < 0:
|
| 298 |
return {}
|
| 299 |
# helper function to infer type for argsparser
|
|
|
|
| 300 |
def _infer_type(s):
|
| 301 |
try:
|
| 302 |
s = float(s)
|
|
@@ -449,10 +450,12 @@ class PrepareForLaunch:
|
|
| 449 |
debug (`bool`, *optional*, defaults to `False`):
|
| 450 |
Whether or not this is a debug launch.
|
| 451 |
"""
|
|
|
|
| 452 |
def __init__(self, launcher, distributed_type="NO", debug=False):
|
| 453 |
self.launcher = launcher
|
| 454 |
self.distributed_type = DistributedType(distributed_type)
|
| 455 |
self.debug = debug
|
|
|
|
| 456 |
def __call__(self, index, *args):
|
| 457 |
if self.debug:
|
| 458 |
world_size = int(os.environ.get("WORLD_SIZE"))
|
|
|
|
| 297 |
if len(nargs) < 0:
|
| 298 |
return {}
|
| 299 |
# helper function to infer type for argsparser
|
| 300 |
+
|
| 301 |
def _infer_type(s):
|
| 302 |
try:
|
| 303 |
s = float(s)
|
|
|
|
| 450 |
debug (`bool`, *optional*, defaults to `False`):
|
| 451 |
Whether or not this is a debug launch.
|
| 452 |
"""
|
| 453 |
+
|
| 454 |
def __init__(self, launcher, distributed_type="NO", debug=False):
|
| 455 |
self.launcher = launcher
|
| 456 |
self.distributed_type = DistributedType(distributed_type)
|
| 457 |
self.debug = debug
|
| 458 |
+
|
| 459 |
def __call__(self, index, *args):
|
| 460 |
if self.debug:
|
| 461 |
world_size = int(os.environ.get("WORLD_SIZE"))
|
src/utils/megatron_lm.py
CHANGED
|
@@ -63,6 +63,7 @@ class MegatronLMDummyDataLoader:
|
|
| 63 |
Args:
|
| 64 |
**dataset_kwargs: Megatron data arguments.
|
| 65 |
"""
|
|
|
|
| 66 |
def __init__(self, **dataset_kwargs):
|
| 67 |
parser = argparse.ArgumentParser()
|
| 68 |
parser = _add_data_args(parser)
|
|
@@ -71,12 +72,15 @@ class MegatronLMDummyDataLoader:
|
|
| 71 |
self.dataset_args = vars(data_args[0])
|
| 72 |
self.dataset_args.update(dataset_kwargs)
|
| 73 |
self.dataset_args["megatron_dataset_flag"] = True
|
|
|
|
| 74 |
def set_megatron_data_args(self):
|
| 75 |
args = get_args()
|
| 76 |
for key, value in self.dataset_args.items():
|
| 77 |
setattr(args, key, value)
|
|
|
|
| 78 |
def get_train_valid_test_datasets_provider(self):
|
| 79 |
-
|
|
|
|
| 80 |
"""Build train, valid, and test datasets."""
|
| 81 |
args = get_args()
|
| 82 |
dataset_args = {
|
|
@@ -121,6 +125,7 @@ class MegatronLMDummyDataLoader:
|
|
| 121 |
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)
|
| 122 |
return train_ds, valid_ds, test_ds
|
| 123 |
return train_valid_test_datasets_provider
|
|
|
|
| 124 |
def build_pretraining_data_loader(self, dataset, consumed_samples):
|
| 125 |
if dataset is None:
|
| 126 |
return None
|
|
@@ -151,8 +156,10 @@ class MegatronLMDummyDataLoader:
|
|
| 151 |
return torch.utils.data.DataLoader(
|
| 152 |
dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True
|
| 153 |
)
|
|
|
|
| 154 |
def build_train_valid_test_data_iterators(self):
|
| 155 |
-
|
|
|
|
| 156 |
while True:
|
| 157 |
for x in iter:
|
| 158 |
yield x
|
|
@@ -275,13 +282,17 @@ def prepare_data_loader(accelerator, dataloader):
|
|
| 275 |
return train_data_iterator, valid_data_iterator, test_data_iterator
|
| 276 |
# optimizer utilities
|
| 277 |
class MegatronLMOptimizerWrapper(AcceleratedOptimizer):
|
|
|
|
| 278 |
def __init__(self, optimizer):
|
| 279 |
super().__init__(optimizer, device_placement=False, scaler=None)
|
|
|
|
| 280 |
def zero_grad(self, set_to_none=None):
|
| 281 |
pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
|
|
|
|
| 282 |
def step(self):
|
| 283 |
pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
|
| 284 |
@property
|
|
|
|
| 285 |
def step_was_skipped(self):
|
| 286 |
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
|
| 287 |
return self.optimizer.skipped_iter
|
|
@@ -305,14 +316,17 @@ class MegatronLMDummyScheduler:
|
|
| 305 |
**kwargs:
|
| 306 |
Other arguments.
|
| 307 |
"""
|
|
|
|
| 308 |
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
|
| 309 |
self.optimizer = optimizer
|
| 310 |
self.total_num_steps = total_num_steps
|
| 311 |
self.warmup_num_steps = warmup_num_steps
|
| 312 |
self.kwargs = kwargs
|
| 313 |
class MegatronLMSchedulerWrapper(AcceleratedScheduler):
|
|
|
|
| 314 |
def __init__(self, scheduler, optimizers):
|
| 315 |
super().__init__(scheduler, optimizers)
|
|
|
|
| 316 |
def step(self, *args, **kwargs):
|
| 317 |
return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
|
| 318 |
def prepare_scheduler(accelerator, optimizer, scheduler):
|
|
@@ -321,13 +335,17 @@ def prepare_scheduler(accelerator, optimizer, scheduler):
|
|
| 321 |
return scheduler
|
| 322 |
class AbstractTrainStep(ABC):
|
| 323 |
"""Abstract class for batching, forward pass and loss handler."""
|
|
|
|
| 324 |
def __init__(self, name):
|
| 325 |
super().__init__()
|
| 326 |
self.name = name
|
|
|
|
| 327 |
def get_batch_func(self):
|
| 328 |
pass
|
|
|
|
| 329 |
def get_forward_step_func(self):
|
| 330 |
pass
|
|
|
|
| 331 |
def get_loss_func(self):
|
| 332 |
pass
|
| 333 |
class BertTrainStep(AbstractTrainStep):
|
|
@@ -336,6 +354,7 @@ class BertTrainStep(AbstractTrainStep):
|
|
| 336 |
Args:
|
| 337 |
args (`argparse.Namespace`): Megatron-LM arguments.
|
| 338 |
"""
|
|
|
|
| 339 |
def __init__(self, args):
|
| 340 |
super().__init__("BertTrainStep")
|
| 341 |
self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
|
|
@@ -345,8 +364,10 @@ class BertTrainStep(AbstractTrainStep):
|
|
| 345 |
self.model_output_class = None
|
| 346 |
else:
|
| 347 |
self.model_output_class = SequenceClassifierOutput
|
|
|
|
| 348 |
def get_batch_func(self, megatron_dataset_flag):
|
| 349 |
-
|
|
|
|
| 350 |
"""Build the batch."""
|
| 351 |
# Items and their type.
|
| 352 |
keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"]
|
|
@@ -365,7 +386,8 @@ class BertTrainStep(AbstractTrainStep):
|
|
| 365 |
lm_labels = data_b["labels"].long()
|
| 366 |
padding_mask = data_b["padding_mask"].long()
|
| 367 |
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
|
| 368 |
-
|
|
|
|
| 369 |
"""Build the batch."""
|
| 370 |
data = next(data_iterator)
|
| 371 |
data = send_to_device(data, torch.cuda.current_device())
|
|
@@ -391,8 +413,10 @@ class BertTrainStep(AbstractTrainStep):
|
|
| 391 |
return get_batch_megatron
|
| 392 |
else:
|
| 393 |
return get_batch_transformer
|
|
|
|
| 394 |
def get_loss_func(self, pretraining_flag, num_labels):
|
| 395 |
-
|
|
|
|
| 396 |
lm_loss_, sop_logits = output_tensor
|
| 397 |
lm_loss_ = lm_loss_.float()
|
| 398 |
loss_mask = loss_mask.float()
|
|
@@ -407,7 +431,8 @@ class BertTrainStep(AbstractTrainStep):
|
|
| 407 |
loss = lm_loss
|
| 408 |
averaged_losses = average_losses_across_data_parallel_group([lm_loss])
|
| 409 |
return loss, {"lm loss": averaged_losses[0]}
|
| 410 |
-
|
|
|
|
| 411 |
if num_labels == 1:
|
| 412 |
# We are doing regression
|
| 413 |
loss_fct = MSELoss()
|
|
@@ -424,8 +449,10 @@ class BertTrainStep(AbstractTrainStep):
|
|
| 424 |
return loss_func_pretrain
|
| 425 |
else:
|
| 426 |
return loss_func_finetune
|
|
|
|
| 427 |
def get_forward_step_func(self, pretraining_flag, bert_binary_head):
|
| 428 |
-
|
|
|
|
| 429 |
"""Forward step."""
|
| 430 |
tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator)
|
| 431 |
if not bert_binary_head:
|
|
@@ -444,6 +471,7 @@ class GPTTrainStep(AbstractTrainStep):
|
|
| 444 |
Args:
|
| 445 |
args (`argparse.Namespace`): Megatron-LM arguments.
|
| 446 |
"""
|
|
|
|
| 447 |
def __init__(self, args):
|
| 448 |
super().__init__("GPTTrainStep")
|
| 449 |
self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
|
|
@@ -460,8 +488,10 @@ class GPTTrainStep(AbstractTrainStep):
|
|
| 460 |
self.model_output_class = None
|
| 461 |
else:
|
| 462 |
self.model_output_class = CausalLMOutputWithCrossAttentions
|
|
|
|
| 463 |
def get_batch_func(self, megatron_dataset_flag):
|
| 464 |
-
|
|
|
|
| 465 |
"""Generate a batch"""
|
| 466 |
# Items and their type.
|
| 467 |
keys = ["text"]
|
|
@@ -481,7 +511,8 @@ class GPTTrainStep(AbstractTrainStep):
|
|
| 481 |
tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss
|
| 482 |
)
|
| 483 |
return tokens, labels, loss_mask, attention_mask, position_ids
|
| 484 |
-
|
|
|
|
| 485 |
data = next(data_iterator)
|
| 486 |
data = {"input_ids": data["input_ids"]}
|
| 487 |
data = send_to_device(data, torch.cuda.current_device())
|
|
@@ -499,9 +530,11 @@ class GPTTrainStep(AbstractTrainStep):
|
|
| 499 |
return get_batch_megatron
|
| 500 |
else:
|
| 501 |
return get_batch_transformer
|
|
|
|
| 502 |
def get_loss_func(self):
|
| 503 |
args = get_args()
|
| 504 |
-
|
|
|
|
| 505 |
if args.return_logits:
|
| 506 |
losses, logits = output_tensor
|
| 507 |
else:
|
|
@@ -516,8 +549,10 @@ class GPTTrainStep(AbstractTrainStep):
|
|
| 516 |
output_dict.update({"logits": logits})
|
| 517 |
return loss, output_dict
|
| 518 |
return loss_func
|
|
|
|
| 519 |
def get_forward_step_func(self):
|
| 520 |
-
|
|
|
|
| 521 |
"""Forward step."""
|
| 522 |
# Get the batch.
|
| 523 |
tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)
|
|
@@ -530,6 +565,7 @@ class T5TrainStep(AbstractTrainStep):
|
|
| 530 |
Args:
|
| 531 |
args (`argparse.Namespace`): Megatron-LM arguments.
|
| 532 |
"""
|
|
|
|
| 533 |
def __init__(self, args):
|
| 534 |
super().__init__("T5TrainStep")
|
| 535 |
self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
|
|
@@ -540,6 +576,7 @@ class T5TrainStep(AbstractTrainStep):
|
|
| 540 |
else:
|
| 541 |
self.model_output_class = Seq2SeqLMOutput
|
| 542 |
@staticmethod
|
|
|
|
| 543 |
def attn_mask_postprocess(attention_mask):
|
| 544 |
# We create a 3D attention mask from a 2D tensor mask.
|
| 545 |
# [b, 1, s]
|
|
@@ -552,11 +589,13 @@ class T5TrainStep(AbstractTrainStep):
|
|
| 552 |
extended_attention_mask = attention_mask_bss < 0.5
|
| 553 |
return extended_attention_mask
|
| 554 |
@staticmethod
|
|
|
|
| 555 |
def get_decoder_mask(seq_length, device):
|
| 556 |
attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))
|
| 557 |
attention_mask = attention_mask < 0.5
|
| 558 |
return attention_mask
|
| 559 |
@staticmethod
|
|
|
|
| 560 |
def get_enc_dec_mask(attention_mask, dec_seq_length, device):
|
| 561 |
batch_size, _ = attention_mask.shape
|
| 562 |
# We create a 3D attention mask from a 2D tensor mask.
|
|
@@ -567,8 +606,10 @@ class T5TrainStep(AbstractTrainStep):
|
|
| 567 |
attention_mask_bss = attention_mask_bs1 * attention_mask_b1s
|
| 568 |
extended_attention_mask = attention_mask_bss < 0.5
|
| 569 |
return extended_attention_mask
|
|
|
|
| 570 |
def get_batch_func(self, megatron_dataset_flag):
|
| 571 |
-
|
|
|
|
| 572 |
"""Build the batch."""
|
| 573 |
keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"]
|
| 574 |
datatype = torch.int64
|
|
@@ -587,7 +628,8 @@ class T5TrainStep(AbstractTrainStep):
|
|
| 587 |
dec_mask = data_b["dec_mask"] < 0.5
|
| 588 |
enc_dec_mask = data_b["enc_dec_mask"] < 0.5
|
| 589 |
return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
|
| 590 |
-
|
|
|
|
| 591 |
"""Build the batch."""
|
| 592 |
data = next(data_iterator)
|
| 593 |
data = send_to_device(data, torch.cuda.current_device())
|
|
@@ -611,16 +653,20 @@ class T5TrainStep(AbstractTrainStep):
|
|
| 611 |
return get_batch_megatron
|
| 612 |
else:
|
| 613 |
return get_batch_transformer
|
|
|
|
| 614 |
def get_loss_func(self):
|
| 615 |
-
|
|
|
|
| 616 |
lm_loss_ = output_tensor.float()
|
| 617 |
lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
|
| 618 |
loss = lm_loss
|
| 619 |
averaged_losses = average_losses_across_data_parallel_group([lm_loss])
|
| 620 |
return loss, {"lm loss": averaged_losses[0]}
|
| 621 |
return loss_func
|
|
|
|
| 622 |
def get_forward_step_func(self):
|
| 623 |
-
|
|
|
|
| 624 |
"""Forward step."""
|
| 625 |
# Get the batch.
|
| 626 |
tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch(
|
|
@@ -658,6 +704,7 @@ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
|
|
| 658 |
# tensorboard-writer, and timers.
|
| 659 |
set_global_variables(args)
|
| 660 |
# torch.distributed initialization
|
|
|
|
| 661 |
def finish_mpu_init():
|
| 662 |
args = get_args()
|
| 663 |
# Pytorch distributed.
|
|
@@ -710,6 +757,7 @@ class MegatronEngine(torch.nn.Module):
|
|
| 710 |
optimizer: Megatron-LM optimizer
|
| 711 |
lr_scheduler: Megatron-LM lr scheduler
|
| 712 |
"""
|
|
|
|
| 713 |
def __init__(self, accelerator, model, optimizer, scheduler):
|
| 714 |
super(MegatronEngine, self).__init__()
|
| 715 |
self.module = model
|
|
@@ -737,13 +785,16 @@ class MegatronEngine(torch.nn.Module):
|
|
| 737 |
self.report_memory_flag = True
|
| 738 |
if args.tensorboard_dir is not None:
|
| 739 |
write_args_to_tensorboard()
|
|
|
|
| 740 |
def train(self):
|
| 741 |
for model_module in self.module:
|
| 742 |
model_module.train()
|
| 743 |
self.log_eval_results()
|
|
|
|
| 744 |
def eval(self):
|
| 745 |
for model_module in self.module:
|
| 746 |
model_module.eval()
|
|
|
|
| 747 |
def train_step(self, **batch_data):
|
| 748 |
"""
|
| 749 |
Training step for Megatron-LM
|
|
@@ -829,6 +880,7 @@ class MegatronEngine(torch.nn.Module):
|
|
| 829 |
loss_reduced[key] = torch.concat(losses_reduced_for_key)
|
| 830 |
return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad
|
| 831 |
return {}, skipped_iter, grad_norm, num_zeros_in_grad
|
|
|
|
| 832 |
def eval_step(self, **batch_data):
|
| 833 |
"""
|
| 834 |
Evaluation step for Megatron-LM
|
|
@@ -875,6 +927,7 @@ class MegatronEngine(torch.nn.Module):
|
|
| 875 |
return loss_reduced
|
| 876 |
else:
|
| 877 |
return {}
|
|
|
|
| 878 |
def forward(self, **batch_data):
|
| 879 |
# During training, we use train_step()
|
| 880 |
# model(**batch_data) performs following operations by delegating it to `self.train_step`:
|
|
@@ -933,6 +986,7 @@ class MegatronEngine(torch.nn.Module):
|
|
| 933 |
if self.train_step_handler.model_output_class is not None:
|
| 934 |
return self.train_step_handler.model_output_class(loss=loss, logits=logits)
|
| 935 |
return loss
|
|
|
|
| 936 |
def log_eval_results(self):
|
| 937 |
args = get_args()
|
| 938 |
if args.tensorboard_dir is None or self.iteration == 0:
|
|
@@ -957,6 +1011,7 @@ class MegatronEngine(torch.nn.Module):
|
|
| 957 |
print_rank_last(string)
|
| 958 |
print_rank_last("-" * length)
|
| 959 |
self.eval_total_loss_dict = {}
|
|
|
|
| 960 |
def save_checkpoint(self, output_dir):
|
| 961 |
self.log_eval_results()
|
| 962 |
args = get_args()
|
|
@@ -964,6 +1019,7 @@ class MegatronEngine(torch.nn.Module):
|
|
| 964 |
torch.distributed.barrier()
|
| 965 |
save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)
|
| 966 |
torch.distributed.barrier()
|
|
|
|
| 967 |
def load_checkpoint(self, input_dir):
|
| 968 |
args = get_args()
|
| 969 |
args.load = input_dir
|
|
@@ -975,6 +1031,7 @@ class MegatronEngine(torch.nn.Module):
|
|
| 975 |
self.iteration = iteration
|
| 976 |
if args.fp16 and self.iteration == 0:
|
| 977 |
self.optimizer.reload_model_params()
|
|
|
|
| 978 |
def megatron_generate(
|
| 979 |
self,
|
| 980 |
inputs,
|
|
@@ -1143,6 +1200,7 @@ def gather_across_data_parallel_groups(tensor):
|
|
| 1143 |
tensor (nested list/tuple/dictionary of `torch.Tensor`):
|
| 1144 |
The data to gather across data parallel ranks.
|
| 1145 |
"""
|
|
|
|
| 1146 |
def _gpu_gather_one(tensor):
|
| 1147 |
if tensor.ndim == 0:
|
| 1148 |
tensor = tensor.clone()[None]
|
|
|
|
| 63 |
Args:
|
| 64 |
**dataset_kwargs: Megatron data arguments.
|
| 65 |
"""
|
| 66 |
+
|
| 67 |
def __init__(self, **dataset_kwargs):
|
| 68 |
parser = argparse.ArgumentParser()
|
| 69 |
parser = _add_data_args(parser)
|
|
|
|
| 72 |
self.dataset_args = vars(data_args[0])
|
| 73 |
self.dataset_args.update(dataset_kwargs)
|
| 74 |
self.dataset_args["megatron_dataset_flag"] = True
|
| 75 |
+
|
| 76 |
def set_megatron_data_args(self):
|
| 77 |
args = get_args()
|
| 78 |
for key, value in self.dataset_args.items():
|
| 79 |
setattr(args, key, value)
|
| 80 |
+
|
| 81 |
def get_train_valid_test_datasets_provider(self):
|
| 82 |
+
|
| 83 |
+
def train_valid_test_datasets_provider(train_val_test_num_samples):
|
| 84 |
"""Build train, valid, and test datasets."""
|
| 85 |
args = get_args()
|
| 86 |
dataset_args = {
|
|
|
|
| 125 |
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)
|
| 126 |
return train_ds, valid_ds, test_ds
|
| 127 |
return train_valid_test_datasets_provider
|
| 128 |
+
|
| 129 |
def build_pretraining_data_loader(self, dataset, consumed_samples):
|
| 130 |
if dataset is None:
|
| 131 |
return None
|
|
|
|
| 156 |
return torch.utils.data.DataLoader(
|
| 157 |
dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True
|
| 158 |
)
|
| 159 |
+
|
| 160 |
def build_train_valid_test_data_iterators(self):
|
| 161 |
+
|
| 162 |
+
def cyclic_iter(iter):
|
| 163 |
while True:
|
| 164 |
for x in iter:
|
| 165 |
yield x
|
|
|
|
| 282 |
return train_data_iterator, valid_data_iterator, test_data_iterator
|
| 283 |
# optimizer utilities
|
| 284 |
class MegatronLMOptimizerWrapper(AcceleratedOptimizer):
|
| 285 |
+
|
| 286 |
def __init__(self, optimizer):
|
| 287 |
super().__init__(optimizer, device_placement=False, scaler=None)
|
| 288 |
+
|
| 289 |
def zero_grad(self, set_to_none=None):
|
| 290 |
pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
|
| 291 |
+
|
| 292 |
def step(self):
|
| 293 |
pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
|
| 294 |
@property
|
| 295 |
+
|
| 296 |
def step_was_skipped(self):
|
| 297 |
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
|
| 298 |
return self.optimizer.skipped_iter
|
|
|
|
| 316 |
**kwargs:
|
| 317 |
Other arguments.
|
| 318 |
"""
|
| 319 |
+
|
| 320 |
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
|
| 321 |
self.optimizer = optimizer
|
| 322 |
self.total_num_steps = total_num_steps
|
| 323 |
self.warmup_num_steps = warmup_num_steps
|
| 324 |
self.kwargs = kwargs
|
| 325 |
class MegatronLMSchedulerWrapper(AcceleratedScheduler):
|
| 326 |
+
|
| 327 |
def __init__(self, scheduler, optimizers):
|
| 328 |
super().__init__(scheduler, optimizers)
|
| 329 |
+
|
| 330 |
def step(self, *args, **kwargs):
|
| 331 |
return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
|
| 332 |
def prepare_scheduler(accelerator, optimizer, scheduler):
|
|
|
|
| 335 |
return scheduler
|
| 336 |
class AbstractTrainStep(ABC):
|
| 337 |
"""Abstract class for batching, forward pass and loss handler."""
|
| 338 |
+
|
| 339 |
def __init__(self, name):
|
| 340 |
super().__init__()
|
| 341 |
self.name = name
|
| 342 |
+
|
| 343 |
def get_batch_func(self):
|
| 344 |
pass
|
| 345 |
+
|
| 346 |
def get_forward_step_func(self):
|
| 347 |
pass
|
| 348 |
+
|
| 349 |
def get_loss_func(self):
|
| 350 |
pass
|
| 351 |
class BertTrainStep(AbstractTrainStep):
|
|
|
|
| 354 |
Args:
|
| 355 |
args (`argparse.Namespace`): Megatron-LM arguments.
|
| 356 |
"""
|
| 357 |
+
|
| 358 |
def __init__(self, args):
|
| 359 |
super().__init__("BertTrainStep")
|
| 360 |
self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
|
|
|
|
| 364 |
self.model_output_class = None
|
| 365 |
else:
|
| 366 |
self.model_output_class = SequenceClassifierOutput
|
| 367 |
+
|
| 368 |
def get_batch_func(self, megatron_dataset_flag):
|
| 369 |
+
|
| 370 |
+
def get_batch_megatron(data_iterator):
|
| 371 |
"""Build the batch."""
|
| 372 |
# Items and their type.
|
| 373 |
keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"]
|
|
|
|
| 386 |
lm_labels = data_b["labels"].long()
|
| 387 |
padding_mask = data_b["padding_mask"].long()
|
| 388 |
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
|
| 389 |
+
|
| 390 |
+
def get_batch_transformer(data_iterator):
|
| 391 |
"""Build the batch."""
|
| 392 |
data = next(data_iterator)
|
| 393 |
data = send_to_device(data, torch.cuda.current_device())
|
|
|
|
| 413 |
return get_batch_megatron
|
| 414 |
else:
|
| 415 |
return get_batch_transformer
|
| 416 |
+
|
| 417 |
def get_loss_func(self, pretraining_flag, num_labels):
|
| 418 |
+
|
| 419 |
+
def loss_func_pretrain(loss_mask, sentence_order, output_tensor):
|
| 420 |
lm_loss_, sop_logits = output_tensor
|
| 421 |
lm_loss_ = lm_loss_.float()
|
| 422 |
loss_mask = loss_mask.float()
|
|
|
|
| 431 |
loss = lm_loss
|
| 432 |
averaged_losses = average_losses_across_data_parallel_group([lm_loss])
|
| 433 |
return loss, {"lm loss": averaged_losses[0]}
|
| 434 |
+
|
| 435 |
+
def loss_func_finetune(labels, logits):
|
| 436 |
if num_labels == 1:
|
| 437 |
# We are doing regression
|
| 438 |
loss_fct = MSELoss()
|
|
|
|
| 449 |
return loss_func_pretrain
|
| 450 |
else:
|
| 451 |
return loss_func_finetune
|
| 452 |
+
|
| 453 |
def get_forward_step_func(self, pretraining_flag, bert_binary_head):
|
| 454 |
+
|
| 455 |
+
def forward_step(data_iterator, model):
|
| 456 |
"""Forward step."""
|
| 457 |
tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator)
|
| 458 |
if not bert_binary_head:
|
|
|
|
| 471 |
Args:
|
| 472 |
args (`argparse.Namespace`): Megatron-LM arguments.
|
| 473 |
"""
|
| 474 |
+
|
| 475 |
def __init__(self, args):
|
| 476 |
super().__init__("GPTTrainStep")
|
| 477 |
self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
|
|
|
|
| 488 |
self.model_output_class = None
|
| 489 |
else:
|
| 490 |
self.model_output_class = CausalLMOutputWithCrossAttentions
|
| 491 |
+
|
| 492 |
def get_batch_func(self, megatron_dataset_flag):
|
| 493 |
+
|
| 494 |
+
def get_batch_megatron(data_iterator):
|
| 495 |
"""Generate a batch"""
|
| 496 |
# Items and their type.
|
| 497 |
keys = ["text"]
|
|
|
|
| 511 |
tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss
|
| 512 |
)
|
| 513 |
return tokens, labels, loss_mask, attention_mask, position_ids
|
| 514 |
+
|
| 515 |
+
def get_batch_transformer(data_iterator):
|
| 516 |
data = next(data_iterator)
|
| 517 |
data = {"input_ids": data["input_ids"]}
|
| 518 |
data = send_to_device(data, torch.cuda.current_device())
|
|
|
|
| 530 |
return get_batch_megatron
|
| 531 |
else:
|
| 532 |
return get_batch_transformer
|
| 533 |
+
|
| 534 |
def get_loss_func(self):
|
| 535 |
args = get_args()
|
| 536 |
+
|
| 537 |
+
def loss_func(loss_mask, output_tensor):
|
| 538 |
if args.return_logits:
|
| 539 |
losses, logits = output_tensor
|
| 540 |
else:
|
|
|
|
| 549 |
output_dict.update({"logits": logits})
|
| 550 |
return loss, output_dict
|
| 551 |
return loss_func
|
| 552 |
+
|
| 553 |
def get_forward_step_func(self):
|
| 554 |
+
|
| 555 |
+
def forward_step(data_iterator, model):
|
| 556 |
"""Forward step."""
|
| 557 |
# Get the batch.
|
| 558 |
tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)
|
|
|
|
| 565 |
Args:
|
| 566 |
args (`argparse.Namespace`): Megatron-LM arguments.
|
| 567 |
"""
|
| 568 |
+
|
| 569 |
def __init__(self, args):
|
| 570 |
super().__init__("T5TrainStep")
|
| 571 |
self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
|
|
|
|
| 576 |
else:
|
| 577 |
self.model_output_class = Seq2SeqLMOutput
|
| 578 |
@staticmethod
|
| 579 |
+
|
| 580 |
def attn_mask_postprocess(attention_mask):
|
| 581 |
# We create a 3D attention mask from a 2D tensor mask.
|
| 582 |
# [b, 1, s]
|
|
|
|
| 589 |
extended_attention_mask = attention_mask_bss < 0.5
|
| 590 |
return extended_attention_mask
|
| 591 |
@staticmethod
|
| 592 |
+
|
| 593 |
def get_decoder_mask(seq_length, device):
|
| 594 |
attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))
|
| 595 |
attention_mask = attention_mask < 0.5
|
| 596 |
return attention_mask
|
| 597 |
@staticmethod
|
| 598 |
+
|
| 599 |
def get_enc_dec_mask(attention_mask, dec_seq_length, device):
|
| 600 |
batch_size, _ = attention_mask.shape
|
| 601 |
# We create a 3D attention mask from a 2D tensor mask.
|
|
|
|
| 606 |
attention_mask_bss = attention_mask_bs1 * attention_mask_b1s
|
| 607 |
extended_attention_mask = attention_mask_bss < 0.5
|
| 608 |
return extended_attention_mask
|
| 609 |
+
|
| 610 |
def get_batch_func(self, megatron_dataset_flag):
|
| 611 |
+
|
| 612 |
+
def get_batch_megatron(data_iterator):
|
| 613 |
"""Build the batch."""
|
| 614 |
keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"]
|
| 615 |
datatype = torch.int64
|
|
|
|
| 628 |
dec_mask = data_b["dec_mask"] < 0.5
|
| 629 |
enc_dec_mask = data_b["enc_dec_mask"] < 0.5
|
| 630 |
return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
|
| 631 |
+
|
| 632 |
+
def get_batch_transformer(data_iterator):
|
| 633 |
"""Build the batch."""
|
| 634 |
data = next(data_iterator)
|
| 635 |
data = send_to_device(data, torch.cuda.current_device())
|
|
|
|
| 653 |
return get_batch_megatron
|
| 654 |
else:
|
| 655 |
return get_batch_transformer
|
| 656 |
+
|
| 657 |
def get_loss_func(self):
|
| 658 |
+
|
| 659 |
+
def loss_func(loss_mask, output_tensor):
|
| 660 |
lm_loss_ = output_tensor.float()
|
| 661 |
lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
|
| 662 |
loss = lm_loss
|
| 663 |
averaged_losses = average_losses_across_data_parallel_group([lm_loss])
|
| 664 |
return loss, {"lm loss": averaged_losses[0]}
|
| 665 |
return loss_func
|
| 666 |
+
|
| 667 |
def get_forward_step_func(self):
|
| 668 |
+
|
| 669 |
+
def forward_step(data_iterator, model):
|
| 670 |
"""Forward step."""
|
| 671 |
# Get the batch.
|
| 672 |
tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch(
|
|
|
|
| 704 |
# tensorboard-writer, and timers.
|
| 705 |
set_global_variables(args)
|
| 706 |
# torch.distributed initialization
|
| 707 |
+
|
| 708 |
def finish_mpu_init():
|
| 709 |
args = get_args()
|
| 710 |
# Pytorch distributed.
|
|
|
|
| 757 |
optimizer: Megatron-LM optimizer
|
| 758 |
lr_scheduler: Megatron-LM lr scheduler
|
| 759 |
"""
|
| 760 |
+
|
| 761 |
def __init__(self, accelerator, model, optimizer, scheduler):
|
| 762 |
super(MegatronEngine, self).__init__()
|
| 763 |
self.module = model
|
|
|
|
| 785 |
self.report_memory_flag = True
|
| 786 |
if args.tensorboard_dir is not None:
|
| 787 |
write_args_to_tensorboard()
|
| 788 |
+
|
| 789 |
def train(self):
|
| 790 |
for model_module in self.module:
|
| 791 |
model_module.train()
|
| 792 |
self.log_eval_results()
|
| 793 |
+
|
| 794 |
def eval(self):
|
| 795 |
for model_module in self.module:
|
| 796 |
model_module.eval()
|
| 797 |
+
|
| 798 |
def train_step(self, **batch_data):
|
| 799 |
"""
|
| 800 |
Training step for Megatron-LM
|
|
|
|
| 880 |
loss_reduced[key] = torch.concat(losses_reduced_for_key)
|
| 881 |
return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad
|
| 882 |
return {}, skipped_iter, grad_norm, num_zeros_in_grad
|
| 883 |
+
|
| 884 |
def eval_step(self, **batch_data):
|
| 885 |
"""
|
| 886 |
Evaluation step for Megatron-LM
|
|
|
|
| 927 |
return loss_reduced
|
| 928 |
else:
|
| 929 |
return {}
|
| 930 |
+
|
| 931 |
def forward(self, **batch_data):
|
| 932 |
# During training, we use train_step()
|
| 933 |
# model(**batch_data) performs following operations by delegating it to `self.train_step`:
|
|
|
|
| 986 |
if self.train_step_handler.model_output_class is not None:
|
| 987 |
return self.train_step_handler.model_output_class(loss=loss, logits=logits)
|
| 988 |
return loss
|
| 989 |
+
|
| 990 |
def log_eval_results(self):
|
| 991 |
args = get_args()
|
| 992 |
if args.tensorboard_dir is None or self.iteration == 0:
|
|
|
|
| 1011 |
print_rank_last(string)
|
| 1012 |
print_rank_last("-" * length)
|
| 1013 |
self.eval_total_loss_dict = {}
|
| 1014 |
+
|
| 1015 |
def save_checkpoint(self, output_dir):
|
| 1016 |
self.log_eval_results()
|
| 1017 |
args = get_args()
|
|
|
|
| 1019 |
torch.distributed.barrier()
|
| 1020 |
save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)
|
| 1021 |
torch.distributed.barrier()
|
| 1022 |
+
|
| 1023 |
def load_checkpoint(self, input_dir):
|
| 1024 |
args = get_args()
|
| 1025 |
args.load = input_dir
|
|
|
|
| 1031 |
self.iteration = iteration
|
| 1032 |
if args.fp16 and self.iteration == 0:
|
| 1033 |
self.optimizer.reload_model_params()
|
| 1034 |
+
|
| 1035 |
def megatron_generate(
|
| 1036 |
self,
|
| 1037 |
inputs,
|
|
|
|
| 1200 |
tensor (nested list/tuple/dictionary of `torch.Tensor`):
|
| 1201 |
The data to gather across data parallel ranks.
|
| 1202 |
"""
|
| 1203 |
+
|
| 1204 |
def _gpu_gather_one(tensor):
|
| 1205 |
if tensor.ndim == 0:
|
| 1206 |
tensor = tensor.clone()[None]
|
src/utils/memory.py
CHANGED
|
@@ -69,6 +69,7 @@ def find_executable_batch_size(function: callable = None, starting_batch_size: i
|
|
| 69 |
if function is None:
|
| 70 |
return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)
|
| 71 |
batch_size = starting_batch_size
|
|
|
|
| 72 |
def decorator(*args, **kwargs):
|
| 73 |
nonlocal batch_size
|
| 74 |
gc.collect()
|
|
|
|
| 69 |
if function is None:
|
| 70 |
return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)
|
| 71 |
batch_size = starting_batch_size
|
| 72 |
+
|
| 73 |
def decorator(*args, **kwargs):
|
| 74 |
nonlocal batch_size
|
| 75 |
gc.collect()
|
src/utils/modeling.py
CHANGED
|
@@ -352,8 +352,10 @@ class FindTiedParametersResult(list):
|
|
| 352 |
This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
|
| 353 |
a list or on the `values` method as in the future this will be removed.
|
| 354 |
"""
|
|
|
|
| 355 |
def __init__(self, *args, **kwargs):
|
| 356 |
super().__init__(*args, **kwargs)
|
|
|
|
| 357 |
def values(self):
|
| 358 |
# TODO: at the next Transformers release (4.28.0) issue a deprecation warning here.
|
| 359 |
return sum([x[1:] for x in self], [])
|
|
|
|
| 352 |
This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
|
| 353 |
a list or on the `values` method as in the future this will be removed.
|
| 354 |
"""
|
| 355 |
+
|
| 356 |
def __init__(self, *args, **kwargs):
|
| 357 |
super().__init__(*args, **kwargs)
|
| 358 |
+
|
| 359 |
def values(self):
|
| 360 |
# TODO: at the next Transformers release (4.28.0) issue a deprecation warning here.
|
| 361 |
return sum([x[1:] for x in self], [])
|
src/utils/offload.py
CHANGED
|
@@ -68,13 +68,17 @@ class PrefixedDataset(Mapping):
|
|
| 68 |
dataset (`Mapping`): Any map with string keys.
|
| 69 |
prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
|
| 70 |
"""
|
|
|
|
| 71 |
def __init__(self, dataset: Mapping, prefix: str):
|
| 72 |
self.dataset = dataset
|
| 73 |
self.prefix = prefix
|
|
|
|
| 74 |
def __getitem__(self, key):
|
| 75 |
return self.dataset[f"{self.prefix}{key}"]
|
|
|
|
| 76 |
def __iter__(self):
|
| 77 |
return iter([key for key in self.dataset if key.startswith(self.prefix)])
|
|
|
|
| 78 |
def __len__(self):
|
| 79 |
return len(self.dataset)
|
| 80 |
class OffloadedWeightsLoader(Mapping):
|
|
@@ -89,6 +93,7 @@ class OffloadedWeightsLoader(Mapping):
|
|
| 89 |
A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
|
| 90 |
to the index saved in `save_folder`.
|
| 91 |
"""
|
|
|
|
| 92 |
def __init__(
|
| 93 |
self,
|
| 94 |
state_dict: Dict[str, torch.Tensor] = None,
|
|
@@ -107,6 +112,7 @@ class OffloadedWeightsLoader(Mapping):
|
|
| 107 |
self.all_keys = list(self.state_dict.keys())
|
| 108 |
self.all_keys.extend([key for key in self.index if key not in self.all_keys])
|
| 109 |
self.device = device
|
|
|
|
| 110 |
def __getitem__(self, key: str):
|
| 111 |
# State dict gets priority
|
| 112 |
if key in self.state_dict:
|
|
@@ -129,8 +135,10 @@ class OffloadedWeightsLoader(Mapping):
|
|
| 129 |
return tensor
|
| 130 |
weight_file = os.path.join(self.save_folder, f"{key}.dat")
|
| 131 |
return load_offloaded_weight(weight_file, weight_info)
|
|
|
|
| 132 |
def __iter__(self):
|
| 133 |
return iter(self.all_keys)
|
|
|
|
| 134 |
def __len__(self):
|
| 135 |
return len(self.all_keys)
|
| 136 |
def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
|
|
|
|
| 68 |
dataset (`Mapping`): Any map with string keys.
|
| 69 |
prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
|
| 70 |
"""
|
| 71 |
+
|
| 72 |
def __init__(self, dataset: Mapping, prefix: str):
|
| 73 |
self.dataset = dataset
|
| 74 |
self.prefix = prefix
|
| 75 |
+
|
| 76 |
def __getitem__(self, key):
|
| 77 |
return self.dataset[f"{self.prefix}{key}"]
|
| 78 |
+
|
| 79 |
def __iter__(self):
|
| 80 |
return iter([key for key in self.dataset if key.startswith(self.prefix)])
|
| 81 |
+
|
| 82 |
def __len__(self):
|
| 83 |
return len(self.dataset)
|
| 84 |
class OffloadedWeightsLoader(Mapping):
|
|
|
|
| 93 |
A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
|
| 94 |
to the index saved in `save_folder`.
|
| 95 |
"""
|
| 96 |
+
|
| 97 |
def __init__(
|
| 98 |
self,
|
| 99 |
state_dict: Dict[str, torch.Tensor] = None,
|
|
|
|
| 112 |
self.all_keys = list(self.state_dict.keys())
|
| 113 |
self.all_keys.extend([key for key in self.index if key not in self.all_keys])
|
| 114 |
self.device = device
|
| 115 |
+
|
| 116 |
def __getitem__(self, key: str):
|
| 117 |
# State dict gets priority
|
| 118 |
if key in self.state_dict:
|
|
|
|
| 135 |
return tensor
|
| 136 |
weight_file = os.path.join(self.save_folder, f"{key}.dat")
|
| 137 |
return load_offloaded_weight(weight_file, weight_info)
|
| 138 |
+
|
| 139 |
def __iter__(self):
|
| 140 |
return iter(self.all_keys)
|
| 141 |
+
|
| 142 |
def __len__(self):
|
| 143 |
return len(self.all_keys)
|
| 144 |
def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
|
src/utils/operations.py
CHANGED
|
@@ -130,6 +130,7 @@ def get_data_structure(data):
|
|
| 130 |
Returns:
|
| 131 |
The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
|
| 132 |
"""
|
|
|
|
| 133 |
def _get_data_structure(tensor):
|
| 134 |
return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
|
| 135 |
return recursively_apply(_get_data_structure, data)
|
|
@@ -142,6 +143,7 @@ def get_shape(data):
|
|
| 142 |
Returns:
|
| 143 |
The same data structure as `data` with lists of tensor shapes instead of tensors.
|
| 144 |
"""
|
|
|
|
| 145 |
def _get_shape(tensor):
|
| 146 |
return list(tensor.shape)
|
| 147 |
return recursively_apply(_get_shape, data)
|
|
@@ -151,6 +153,7 @@ def initialize_tensors(data_structure):
|
|
| 151 |
Returns:
|
| 152 |
The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
|
| 153 |
"""
|
|
|
|
| 154 |
def _initialize_tensor(tensor_info):
|
| 155 |
return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
|
| 156 |
return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
|
|
@@ -180,6 +183,7 @@ def listify(data):
|
|
| 180 |
Returns:
|
| 181 |
The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
|
| 182 |
"""
|
|
|
|
| 183 |
def _convert_to_list(tensor):
|
| 184 |
tensor = tensor.detach().cpu()
|
| 185 |
if tensor.dtype == torch.bfloat16:
|
|
@@ -190,6 +194,7 @@ def listify(data):
|
|
| 190 |
return tensor.tolist()
|
| 191 |
return recursively_apply(_convert_to_list, data)
|
| 192 |
def _tpu_gather(tensor):
|
|
|
|
| 193 |
def _tpu_gather_one(tensor):
|
| 194 |
if tensor.ndim == 0:
|
| 195 |
tensor = tensor.clone()[None]
|
|
@@ -206,6 +211,7 @@ def _gpu_gather(tensor):
|
|
| 206 |
gather_op = torch.distributed.all_gather_into_tensor
|
| 207 |
else:
|
| 208 |
gather_op = torch.distributed._all_gather_base
|
|
|
|
| 209 |
def _gpu_gather_one(tensor):
|
| 210 |
if tensor.ndim == 0:
|
| 211 |
tensor = tensor.clone()[None]
|
|
@@ -243,6 +249,7 @@ def verify_operation(function):
|
|
| 243 |
Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
|
| 244 |
"""
|
| 245 |
@wraps(function)
|
|
|
|
| 246 |
def wrapper(*args, **kwargs):
|
| 247 |
if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
|
| 248 |
return function(*args, **kwargs)
|
|
@@ -275,6 +282,7 @@ def chained_operation(function):
|
|
| 275 |
`DistributedOperationException`.
|
| 276 |
"""
|
| 277 |
@wraps(function)
|
|
|
|
| 278 |
def wrapper(*args, **kwargs):
|
| 279 |
try:
|
| 280 |
return function(*args, **kwargs)
|
|
@@ -321,6 +329,7 @@ def gather_object(object: Any):
|
|
| 321 |
else:
|
| 322 |
return object
|
| 323 |
def _gpu_broadcast(data, src=0):
|
|
|
|
| 324 |
def _gpu_broadcast_one(tensor, src=0):
|
| 325 |
torch.distributed.broadcast(tensor, src=src)
|
| 326 |
return tensor
|
|
@@ -377,6 +386,7 @@ def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
|
|
| 377 |
Returns:
|
| 378 |
The same data structure as `data` with all the tensors slices.
|
| 379 |
"""
|
|
|
|
| 380 |
def _slice_tensor(tensor, tensor_slice):
|
| 381 |
return tensor[tensor_slice]
|
| 382 |
return recursively_apply(_slice_tensor, data, tensor_slice)
|
|
@@ -415,6 +425,7 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
|
|
| 415 |
pad_first (`bool`, *optional*, defaults to `False`):
|
| 416 |
Whether to pad at the beginning or the end.
|
| 417 |
"""
|
|
|
|
| 418 |
def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
|
| 419 |
if getattr(tensor, "is_nested", False):
|
| 420 |
warnings.warn(
|
|
@@ -461,6 +472,7 @@ def reduce(tensor, reduction="mean", scale=1.0):
|
|
| 461 |
Returns:
|
| 462 |
The same data structure as `data` with all the tensors reduced.
|
| 463 |
"""
|
|
|
|
| 464 |
def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
|
| 465 |
state = PartialState()
|
| 466 |
cloned_tensor = tensor.clone()
|
|
@@ -485,8 +497,10 @@ def convert_to_fp32(tensor):
|
|
| 485 |
Returns:
|
| 486 |
The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
|
| 487 |
"""
|
|
|
|
| 488 |
def _convert_to_fp32(tensor):
|
| 489 |
return tensor.float()
|
|
|
|
| 490 |
def _is_fp16_bf16_tensor(tensor):
|
| 491 |
return hasattr(tensor, "dtype") and tensor.dtype in (torch.float16, torch.bfloat16)
|
| 492 |
return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
|
|
@@ -500,17 +514,21 @@ class ConvertOutputsToFp32:
|
|
| 500 |
Returns:
|
| 501 |
The same function as `model_forward` but with converted outputs.
|
| 502 |
"""
|
|
|
|
| 503 |
def __init__(self, model_forward):
|
| 504 |
self.model_forward = model_forward
|
| 505 |
update_wrapper(self, model_forward)
|
|
|
|
| 506 |
def __call__(self, *args, **kwargs):
|
| 507 |
return convert_to_fp32(self.model_forward(*args, **kwargs))
|
|
|
|
| 508 |
def __getstate__(self):
|
| 509 |
raise pickle.PicklingError(
|
| 510 |
"Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
|
| 511 |
)
|
| 512 |
def convert_outputs_to_fp32(model_forward):
|
| 513 |
model_forward = ConvertOutputsToFp32(model_forward)
|
|
|
|
| 514 |
def forward(*args, **kwargs):
|
| 515 |
return model_forward(*args, **kwargs)
|
| 516 |
# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
|
|
|
|
| 130 |
Returns:
|
| 131 |
The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
|
| 132 |
"""
|
| 133 |
+
|
| 134 |
def _get_data_structure(tensor):
|
| 135 |
return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
|
| 136 |
return recursively_apply(_get_data_structure, data)
|
|
|
|
| 143 |
Returns:
|
| 144 |
The same data structure as `data` with lists of tensor shapes instead of tensors.
|
| 145 |
"""
|
| 146 |
+
|
| 147 |
def _get_shape(tensor):
|
| 148 |
return list(tensor.shape)
|
| 149 |
return recursively_apply(_get_shape, data)
|
|
|
|
| 153 |
Returns:
|
| 154 |
The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
|
| 155 |
"""
|
| 156 |
+
|
| 157 |
def _initialize_tensor(tensor_info):
|
| 158 |
return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
|
| 159 |
return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
|
|
|
|
| 183 |
Returns:
|
| 184 |
The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
|
| 185 |
"""
|
| 186 |
+
|
| 187 |
def _convert_to_list(tensor):
|
| 188 |
tensor = tensor.detach().cpu()
|
| 189 |
if tensor.dtype == torch.bfloat16:
|
|
|
|
| 194 |
return tensor.tolist()
|
| 195 |
return recursively_apply(_convert_to_list, data)
|
| 196 |
def _tpu_gather(tensor):
|
| 197 |
+
|
| 198 |
def _tpu_gather_one(tensor):
|
| 199 |
if tensor.ndim == 0:
|
| 200 |
tensor = tensor.clone()[None]
|
|
|
|
| 211 |
gather_op = torch.distributed.all_gather_into_tensor
|
| 212 |
else:
|
| 213 |
gather_op = torch.distributed._all_gather_base
|
| 214 |
+
|
| 215 |
def _gpu_gather_one(tensor):
|
| 216 |
if tensor.ndim == 0:
|
| 217 |
tensor = tensor.clone()[None]
|
|
|
|
| 249 |
Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
|
| 250 |
"""
|
| 251 |
@wraps(function)
|
| 252 |
+
|
| 253 |
def wrapper(*args, **kwargs):
|
| 254 |
if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
|
| 255 |
return function(*args, **kwargs)
|
|
|
|
| 282 |
`DistributedOperationException`.
|
| 283 |
"""
|
| 284 |
@wraps(function)
|
| 285 |
+
|
| 286 |
def wrapper(*args, **kwargs):
|
| 287 |
try:
|
| 288 |
return function(*args, **kwargs)
|
|
|
|
| 329 |
else:
|
| 330 |
return object
|
| 331 |
def _gpu_broadcast(data, src=0):
|
| 332 |
+
|
| 333 |
def _gpu_broadcast_one(tensor, src=0):
|
| 334 |
torch.distributed.broadcast(tensor, src=src)
|
| 335 |
return tensor
|
|
|
|
| 386 |
Returns:
|
| 387 |
The same data structure as `data` with all the tensors slices.
|
| 388 |
"""
|
| 389 |
+
|
| 390 |
def _slice_tensor(tensor, tensor_slice):
|
| 391 |
return tensor[tensor_slice]
|
| 392 |
return recursively_apply(_slice_tensor, data, tensor_slice)
|
|
|
|
| 425 |
pad_first (`bool`, *optional*, defaults to `False`):
|
| 426 |
Whether to pad at the beginning or the end.
|
| 427 |
"""
|
| 428 |
+
|
| 429 |
def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
|
| 430 |
if getattr(tensor, "is_nested", False):
|
| 431 |
warnings.warn(
|
|
|
|
| 472 |
Returns:
|
| 473 |
The same data structure as `data` with all the tensors reduced.
|
| 474 |
"""
|
| 475 |
+
|
| 476 |
def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
|
| 477 |
state = PartialState()
|
| 478 |
cloned_tensor = tensor.clone()
|
|
|
|
| 497 |
Returns:
|
| 498 |
The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
|
| 499 |
"""
|
| 500 |
+
|
| 501 |
def _convert_to_fp32(tensor):
|
| 502 |
return tensor.float()
|
| 503 |
+
|
| 504 |
def _is_fp16_bf16_tensor(tensor):
|
| 505 |
return hasattr(tensor, "dtype") and tensor.dtype in (torch.float16, torch.bfloat16)
|
| 506 |
return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
|
|
|
|
| 514 |
Returns:
|
| 515 |
The same function as `model_forward` but with converted outputs.
|
| 516 |
"""
|
| 517 |
+
|
| 518 |
def __init__(self, model_forward):
|
| 519 |
self.model_forward = model_forward
|
| 520 |
update_wrapper(self, model_forward)
|
| 521 |
+
|
| 522 |
def __call__(self, *args, **kwargs):
|
| 523 |
return convert_to_fp32(self.model_forward(*args, **kwargs))
|
| 524 |
+
|
| 525 |
def __getstate__(self):
|
| 526 |
raise pickle.PicklingError(
|
| 527 |
"Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
|
| 528 |
)
|
| 529 |
def convert_outputs_to_fp32(model_forward):
|
| 530 |
model_forward = ConvertOutputsToFp32(model_forward)
|
| 531 |
+
|
| 532 |
def forward(*args, **kwargs):
|
| 533 |
return model_forward(*args, **kwargs)
|
| 534 |
# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
|