Spaces:
Runtime error
Runtime error
| # Copyright 2024 EPFL and Apple Inc. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| # -------------------------------------------------------- | |
| # Based on timm code base | |
| # https://github.com/rwightman/pytorch-image-models/tree/master/timm | |
| # -------------------------------------------------------- | |
| import torch | |
| class NativeScalerWithGradNormCount: | |
| state_dict_key = "amp_scaler" | |
| def __init__(self, enabled=True): | |
| self._scaler = torch.cuda.amp.GradScaler(enabled=enabled) | |
| def __call__(self, loss, optimizer, clip_grad=None, skip_grad=None, parameters=None, create_graph=False, update_grad=True, compute_grad_norm=True): | |
| self._scaler.scale(loss).backward(create_graph=create_graph) | |
| if update_grad: | |
| if clip_grad is not None: | |
| assert parameters is not None | |
| self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place | |
| norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) | |
| elif skip_grad is not None: | |
| self._scaler.unscale_(optimizer) | |
| norm = get_grad_norm_(parameters) | |
| if norm >= skip_grad: | |
| self._scaler.update() | |
| return norm | |
| else: | |
| self._scaler.unscale_(optimizer) | |
| norm = get_grad_norm_(parameters) if compute_grad_norm else None | |
| self._scaler.step(optimizer) | |
| self._scaler.update() | |
| else: | |
| norm = None | |
| return norm | |
| def state_dict(self): | |
| return self._scaler.state_dict() | |
| def load_state_dict(self, state_dict): | |
| self._scaler.load_state_dict(state_dict) | |
| def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: | |
| if isinstance(parameters, torch.Tensor): | |
| parameters = [parameters] | |
| parameters = [p for p in parameters if p.grad is not None] | |
| norm_type = float(norm_type) | |
| if len(parameters) == 0: | |
| return torch.tensor(0.) | |
| device = parameters[0].grad.device | |
| total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) | |
| return total_norm |