id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
10,454
import os import sys import json import base64 import argparse import subprocess import collections from copy import deepcopy import signal import time import torch.cuda from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT from ..nebula.constants import NEBULA_EXPORT_ENVS from ..utils import logger from ..autotuning import Autotuner def parse_resource_filter(host_info, include_str="", exclude_str=""): def parse_inclusion_exclusion(resource_pool, inclusion, exclusion): active_resources = collections.OrderedDict() for hostname, slots in resource_pool.items(): active_resources[hostname] = list(range(slots)) return parse_resource_filter(active_resources, include_str=inclusion, exclude_str=exclusion)
null
10,455
import os import sys import json import base64 import argparse import subprocess import collections from copy import deepcopy import signal import time import torch.cuda from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT from ..nebula.constants import NEBULA_EXPORT_ENVS from ..utils import logger from ..autotuning import Autotuner def encode_world_info(world_info): world_info_json = json.dumps(world_info).encode('utf-8') world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8') return world_info_base64
null
10,456
import os import sys import json import base64 import argparse import subprocess import collections from copy import deepcopy import signal import time import torch.cuda from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT from ..nebula.constants import NEBULA_EXPORT_ENVS from ..utils import logger from ..autotuning import Autotuner def run_autotuning(args, active_resources): tuner = Autotuner(args, active_resources) logger.info("[Start] Running autotuning") tuner.tune() tuner.print_tuning_results() logger.info("[End] Running autotuning") tuner.write_optimal_config() if args.autotuning == "run": tuner.run_after_tuning()
null
10,457
import os import sys import json import base64 import argparse import subprocess import collections from copy import deepcopy import signal import time import torch.cuda from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT from ..nebula.constants import NEBULA_EXPORT_ENVS from ..utils import logger from ..autotuning import Autotuner def parse_num_nodes(str_num_nodes: str, elastic_training: bool): node_list = str_num_nodes.split(":") if len(node_list) == 1: min_nodes, max_nodes = int(node_list[0]), -1 elif len(node_list) == 2 and elastic_training: min_nodes, max_nodes = int(node_list[0]), int(node_list[1]) elif len(node_list) == 2 and not elastic_training: raise RuntimeError("MIN:MAX format is only supported in elastic training") else: raise RuntimeError("num_nodes {} is not in MIN:MAX format".format(str_num_nodes)) return min_nodes, max_nodes
null
10,458
import sys import subprocess import os import json import base64 import time import signal import psutil from collections import defaultdict from typing import Dict from argparse import ArgumentParser, REMAINDER from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT from ..nebula.constants import DLTS_POD_ENV_PATH from ..utils import logger from ..elasticity import is_torch_elastic_compatible from .constants import ELASTIC_TRAINING_ID_DEFAULT TORCH_DISTRIBUTED_DEFAULT_PORT = 29500 def parse_args(): parser = ArgumentParser(description="DeepSpeed distributed training launch" " utility that creates multiple distributed" " processes on a single node") # Optional arguments for the launch helper parser.add_argument("--node_rank", type=int, default=0, help="The rank of the node for multi-node distributed " "training") parser.add_argument("--master_addr", default="127.0.0.1", type=str, help="Master node (rank 0)'s address, should be either" " the IP address or the hostname of node 0, for" " single node multi-proc training, the" " --master_addr can simply be 127.0.0.1") parser.add_argument("--master_port", default=TORCH_DISTRIBUTED_DEFAULT_PORT, type=int, help="Master node (rank 0)'s free port that needs to " "be used for communication during distributed " "training") parser.add_argument("--world_info", default="None", type=str, help="world info base64 encoded dictionary") parser.add_argument("--module", action="store_true", help="Change each process to interpret the launch " "script as a Python module, executing with the same " "behavior as 'python -m'.") parser.add_argument("--no_python", action="store_true", help="Skip prepending the training script with " "'python' - just execute it directly.") parser.add_argument("--enable_elastic_training", action="store_true", help="Enable elastic training support.") parser.add_argument("--min_elastic_nodes", type=int, default=-1, help="Min number of nodes in elastic training.") parser.add_argument("--max_elastic_nodes", type=int, default=-1, help="Max number of nodes in elastic training.") parser.add_argument("--no_local_rank", action="store_true", help="Do not pass local_rank as an argument when calling " "the user's training script.") parser.add_argument("--save_pid", type=int, default=0, help="main launching process pid, for internal pid tracking") parser.add_argument( "--enable_each_rank_log", default="None", type=str, help="redirect the stdout and stderr from each rank into different log files") # positional parser.add_argument("training_script", type=str, help="The full path to the single GPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script") # rest from the training program parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args()
null
10,459
import sys import subprocess import os import json import base64 import time import signal import psutil from collections import defaultdict from typing import Dict from argparse import ArgumentParser, REMAINDER from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT from ..nebula.constants import DLTS_POD_ENV_PATH from ..utils import logger from ..elasticity import is_torch_elastic_compatible from .constants import ELASTIC_TRAINING_ID_DEFAULT def terminate_process_tree(pid): process = psutil.Process(pid) children = process.children(recursive=True) children.append(process) for child in children: try: child.terminate() except psutil.NoSuchProcess: pass gone, alive = psutil.wait_procs(children, timeout=30) for p in alive: p.kill()
null
10,460
import torch import math from torch import nn from torch.nn import init import deepspeed.comm as dist from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer from deepspeed.utils import logger g_mpu = None The provided code snippet includes necessary dependencies for implementing the `_reduce` function. Write a Python function `def _reduce(input_)` to solve the following problem: All-reduce the the input tensor across model parallel group. Here is the function: def _reduce(input_): """All-reduce the the input tensor across model parallel group.""" group = g_mpu.get_model_parallel_group() # Bypass the function if we are using only 1 GPU. if dist.get_world_size(group=group) == 1: return input_ # All-reduce. dist.all_reduce(input_, group=group) return input_
All-reduce the the input tensor across model parallel group.
10,461
import torch import math from torch import nn from torch.nn import init import deepspeed.comm as dist from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer from deepspeed.utils import logger g_mpu = None def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False): """Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. """ # Get the size and dimension. last_dim = tensor.dim() - 1 assert tensor.size()[last_dim] % num_partitions == 0 last_dim_size = tensor.size()[last_dim] // num_partitions # Split. tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) # Note: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list The provided code snippet includes necessary dependencies for implementing the `_split` function. Write a Python function `def _split(input_)` to solve the following problem: Split the tensor along its last dimension and keep the corresponding slice. Here is the function: def _split(input_): """Split the tensor along its last dimension and keep the corresponding slice.""" group = g_mpu.get_model_parallel_group() # Bypass the function if we are using only 1 GPU. if dist.get_world_size(group=group) == 1: return input_ # Split along last dimension. world_size = dist.get_world_size(group=group) input_list = split_tensor_along_last_dim(input_, world_size) # Note: torch.split does not create contiguous tensors by default. rank = dist.get_rank(group=group) output = input_list[rank].contiguous() return output
Split the tensor along its last dimension and keep the corresponding slice.
10,462
import torch import math from torch import nn from torch.nn import init import deepspeed.comm as dist from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer from deepspeed.utils import logger g_mpu = None The provided code snippet includes necessary dependencies for implementing the `_gather` function. Write a Python function `def _gather(input_)` to solve the following problem: Gather tensors and concatinate along the last dimension. Here is the function: def _gather(input_): """Gather tensors and concatinate along the last dimension.""" group = g_mpu.get_model_parallel_group() # Bypass the function if we are using only 1 GPU. if dist.get_world_size(group=group) == 1: return input_ # Size and dimension. last_dim = input_.dim() - 1 rank = dist.get_rank(group=group) world_size = dist.get_world_size(group=group) tensor_list = [torch.empty_like(input_) for _ in range(world_size)] tensor_list[rank] = input_ dist.all_gather(tensor_list, input_, group=group) # Note: torch.cat already creates a contiguous tensor. output = torch.cat(tensor_list, dim=last_dim).contiguous() return output
Gather tensors and concatinate along the last dimension.
10,463
import torch import math from torch import nn from torch.nn import init import deepspeed.comm as dist from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer from deepspeed.utils import logger class _CopyToModelParallelRegion(torch.autograd.Function): """Pass the input to the model parallel region.""" def forward(ctx, input_): return input_ def backward(ctx, grad_output): return _reduce(grad_output) def copy_to_model_parallel_region(input_): return _CopyToModelParallelRegion.apply(input_)
null
10,464
import torch import math from torch import nn from torch.nn import init import deepspeed.comm as dist from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer from deepspeed.utils import logger class _ReduceFromModelParallelRegion(torch.autograd.Function): def forward(ctx, input_): def backward(ctx, grad_output): def reduce_from_model_parallel_region(input_): return _ReduceFromModelParallelRegion.apply(input_)
null
10,465
import torch import math from torch import nn from torch.nn import init import deepspeed.comm as dist from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer from deepspeed.utils import logger class _ScatterToModelParallelRegion(torch.autograd.Function): """Split the input and keep only the corresponding chuck to the rank.""" def forward(ctx, input_): return _split(input_) def backward(ctx, grad_output): return _gather(grad_output) def scatter_to_model_parallel_region(input_): return _ScatterToModelParallelRegion.apply(input_)
null
10,466
import torch import math from torch import nn from torch.nn import init import deepspeed.comm as dist from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer from deepspeed.utils import logger class _GatherFromModelParallelRegion(torch.autograd.Function): """Gather the input from model parallel region and concatinate.""" def forward(ctx, input_): return _gather(input_) def backward(ctx, grad_output): return _split(grad_output) def gather_from_model_parallel_region(input_): return _GatherFromModelParallelRegion.apply(input_)
null
10,467
import re from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible from .config import get_compression_config from ..runtime.config_utils import dict_raise_error_on_duplicate_keys from .constants import * import os import json def check_deepspeed_config(config): if isinstance(config, dict): return config elif os.path.exists(config): return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys) else: raise ValueError( f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}" ) def get_compress_methods(model, compress_methods, mpu=None): # extract the compression module for each method in compress_methods layer_added_compress_methods = [] for method, method_content in compress_methods.items(): if LAYER_REDUCTION in method: continue # for loop different methods, i.e., weight quantization, activation quantization etc exist_module_name = set() shared_parameters = method_content[ SHARED_PARAMETERS] # get all the shared parameters for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items(): # for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc module_name_list = [] related_module_name_list = [] if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]: # this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them # otherwise we just mask those as zeros for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE], method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]): module_name, exist_module_name = get_module_name(group_name, model, key_word, exist_module_name, mpu=mpu) module_name_list.append(module_name) tmp_related_module_name_list = [] for rkw in related_key_words: # related key word can be a list, for instance the QKV for O matrix in Attention module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu) tmp_related_module_name_list.append(module_name) related_module_name_list.append(tmp_related_module_name_list) else: for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]: module_name, exist_module_name = get_module_name(group_name, model, key_word, exist_module_name, mpu=mpu) module_name_list.append(module_name) if module_name_list: # combine shared parameters with each group combined_method_parameters = { **(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)), **shared_parameters } compression_item = [ module_name_list, related_module_name_list, { method: combined_method_parameters } ] layer_added_compress_methods.append(compression_item) return layer_added_compress_methods def student_initialization(student_model, teacher_model, deepspeed_config): ''' Given a student model and a teacher model, select the Args: student_model (`torch.nn.Module`) The model we will update weight teacher_model (`torch.nn.Module`) The model guide the student to learn deepspeed_config (`DeepSpeedConfig`) The path of ds_config ''' config = get_compression_config(check_deepspeed_config(deepspeed_config)) compress_methods = config[LAYER_REDUCTION] module_name_prefix = compress_methods[MODULE_NAME_PREFIX] teacher_layer = compress_methods[TEACHER_LAYER] student_layer = [i for i in range(len(teacher_layer))] other_module_name = compress_methods[OTHER_MODULE_NAME] ''' name_prefix (`str`) The prefix name before the layer #. Example 1: bert.encoder.layer, for BERT_base model's prefix name Example 2: transformer.h, for GPT-2 hugging face prefix name teacher_layer (`list of intergers`) The layer of teacher will be used for student's reinitializedion Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student student_layer (`list` or None) The layer of student need to be re-intiialized Example 1: None, means we want to reinitialize all the layers Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers other_module_name (`list of string`) The modules will be used for student's reinitializedion Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embeddingn layers module to the student Note that teacher_layer should matches student layer ''' assert len(student_layer) == len(teacher_layer) for s_name, t_name in zip(student_layer, teacher_layer): s_module = recursive_getattr(student_model, module_name_prefix + '.' + str(s_name)) t_module = recursive_getattr(teacher_model, module_name_prefix + '.' + str(t_name)) for s_param, t_param in zip(s_module.parameters(), t_module.parameters()): s_param.data.copy_(t_param.data) for name in other_module_name: s_module = recursive_getattr(student_model, name) t_module = recursive_getattr(teacher_model, name) print(name) for s_param, t_param in zip(s_module.parameters(), t_module.parameters()): s_param.data.copy_(t_param.data) def compression_preparation(model, compression_techinique_list, mpu): """ Prepare the compression techniques of a model. Args: model (`torch.nn.Module`) The model to prepare the compression techniques of. compression_techinique_list (`list`) The list of compression techniques to prepare the model to. list[] """ # Here we first replace all module with our linear wrapper for module_name, module in model.named_modules(): if is_module_compressible(module, mpu): module_replacement(model, module_name, mpu=mpu) for module_name_lists, _, compression_technique in compression_techinique_list: for mnl in module_name_lists: for module_name in mnl: module_replacement(model, module_name, compression_technique) return model def get_compression_config(param_dict): # output = {} if COMPRESSION_TRAINING not in param_dict.keys(): param_dict[COMPRESSION_TRAINING] = {} sub_param_dict = param_dict[COMPRESSION_TRAINING] output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict) output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict) output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict) output[ROW_PRUNING] = get_row_pruning(sub_param_dict) output[HEAD_PRUNING] = get_head_pruning(sub_param_dict) output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict) output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict) return output LAYER_REDUCTION = "layer_reduction" LAYER_REDUCTION_ENABLED = "enabled" The provided code snippet includes necessary dependencies for implementing the `init_compression` function. Write a Python function `def init_compression(model, deepspeed_config, teacher_model=None, mpu=None)` to solve the following problem: Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules Args: model (`torch.nn.Module`) The model to compress. deepspeed_config (`DeepSpeedConfig`) The path of ds_config mpu The mpu module for Row/Column parallelism Here is the function: def init_compression(model, deepspeed_config, teacher_model=None, mpu=None): """ Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules Args: model (`torch.nn.Module`) The model to compress. deepspeed_config (`DeepSpeedConfig`) The path of ds_config mpu The mpu module for Row/Column parallelism """ compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config)) if hasattr(model, 'module'): c_model = model.module else: c_model = model # For layer reduction if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]: assert teacher_model is not None, "Teacher model is required for layer reduction" student_initialization(c_model, teacher_model, deepspeed_config) layer_added_compress_methods = get_compress_methods(c_model, compress_methods, mpu=mpu) compression_preparation(c_model, layer_added_compress_methods, mpu) return model
Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules Args: model (`torch.nn.Module`) The model to compress. deepspeed_config (`DeepSpeedConfig`) The path of ds_config mpu The mpu module for Row/Column parallelism
10,468
import re from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible from .config import get_compression_config from ..runtime.config_utils import dict_raise_error_on_duplicate_keys from .constants import * import os import json def check_deepspeed_config(config): if isinstance(config, dict): return config elif os.path.exists(config): return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys) else: raise ValueError( f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}" ) def get_compress_methods(model, compress_methods, mpu=None): # extract the compression module for each method in compress_methods layer_added_compress_methods = [] for method, method_content in compress_methods.items(): if LAYER_REDUCTION in method: continue # for loop different methods, i.e., weight quantization, activation quantization etc exist_module_name = set() shared_parameters = method_content[ SHARED_PARAMETERS] # get all the shared parameters for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items(): # for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc module_name_list = [] related_module_name_list = [] if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]: # this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them # otherwise we just mask those as zeros for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE], method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]): module_name, exist_module_name = get_module_name(group_name, model, key_word, exist_module_name, mpu=mpu) module_name_list.append(module_name) tmp_related_module_name_list = [] for rkw in related_key_words: # related key word can be a list, for instance the QKV for O matrix in Attention module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu) tmp_related_module_name_list.append(module_name) related_module_name_list.append(tmp_related_module_name_list) else: for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]: module_name, exist_module_name = get_module_name(group_name, model, key_word, exist_module_name, mpu=mpu) module_name_list.append(module_name) if module_name_list: # combine shared parameters with each group combined_method_parameters = { **(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)), **shared_parameters } compression_item = [ module_name_list, related_module_name_list, { method: combined_method_parameters } ] layer_added_compress_methods.append(compression_item) return layer_added_compress_methods def fix_compression(model, module_name, compression_technique, mask=None, dim_reduction=False): """ Fix the compression technique of a module. Args: model (`torch.nn.Module`) The model to fix the compression technique of. module_name (`str`) The name of the module to fix the compression technique of. compression_technique (`str`) The compression technique to fix the module to. """ # Here we can make things much simpler by just replacing the module module = recursive_getattr(model, module_name) for k, v in compression_technique.items(): if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[ WEIGHT_QUANTIZE_ENABLED]: return module.fix_weight_quantization() elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]: return module.fix_sparse_pruning_helper() elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None): return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction) elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None): return module.fix_head_pruning_helper(mask, v[HEAD_PRUNING_NUM_HEADS], dim_reduction=dim_reduction) elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None): return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction) def get_compression_config(param_dict): # output = {} if COMPRESSION_TRAINING not in param_dict.keys(): param_dict[COMPRESSION_TRAINING] = {} sub_param_dict = param_dict[COMPRESSION_TRAINING] output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict) output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict) output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict) output[ROW_PRUNING] = get_row_pruning(sub_param_dict) output[HEAD_PRUNING] = get_head_pruning(sub_param_dict) output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict) output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict) return output WEIGHT_QUANTIZATION = "weight_quantization" ACTIVATION_QUANTIZATION = "activation_quantization" SPARSE_PRUNING = "sparse_pruning" ROW_PRUNING = "row_pruning" HEAD_PRUNING = "head_pruning" CHANNEL_PRUNING = "channel_pruning" The provided code snippet includes necessary dependencies for implementing the `redundancy_clean` function. Write a Python function `def redundancy_clean(model, deepspeed_config, mpu=None)` to solve the following problem: Remove the redundancy of a model Args: model (`torch.nn.Module`) The model to compress. deepspeed_config (`DeepSpeedConfig`) The path of ds_config mpu The mpu module for Row/Column parallelism Here is the function: def redundancy_clean(model, deepspeed_config, mpu=None): """ Remove the redundancy of a model Args: model (`torch.nn.Module`) The model to compress. deepspeed_config (`DeepSpeedConfig`) The path of ds_config mpu The mpu module for Row/Column parallelism """ compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config)) if hasattr(model, 'module'): c_model = model.module else: c_model = model layer_added_compress_methods_tmp = get_compress_methods(c_model, compress_methods, mpu=mpu) # sort methods order_list = [ WEIGHT_QUANTIZATION, SPARSE_PRUNING, ROW_PRUNING, HEAD_PRUNING, CHANNEL_PRUNING, ACTIVATION_QUANTIZATION ] layer_added_compress_methods = sorted( layer_added_compress_methods_tmp, key=lambda x: order_list.index(list(x[2].keys())[0])) for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods: stored_mask = [] need_mask = True if related_module_name_lists else False for i, mnl in enumerate(module_name_lists): for module_name in mnl: mask = fix_compression(c_model, module_name, compression_technique, dim_reduction=need_mask) if need_mask: stored_mask.append(mask) if need_mask: for rmnl in related_module_name_lists[i]: for j, module_name in enumerate(rmnl): mask = fix_compression(c_model, module_name, compression_technique, mask=stored_mask[j], dim_reduction=True) return model
Remove the redundancy of a model Args: model (`torch.nn.Module`) The model to compress. deepspeed_config (`DeepSpeedConfig`) The path of ds_config mpu The mpu module for Row/Column parallelism
10,469
import torch from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress from .constants import * def recursive_getattr(model, module_name): """ Recursively get the attribute of a module. Args: model (`torch.nn.Module`) The model to get the attribute from. module_name (`str`) The name of the module to get the attribute from. """ split_list = module_name.split('.') output = model for name in split_list: output = getattr(output, name) return output def recursive_setattr(model, module_name, module): """ Recursively set the attribute of a module. Args: model (`torch.nn.Module`) The model to set the attribute in. module_name (`str`) The name of the module to set the attribute in. module (`torch.nn.Module`) The module to set the attribute to. """ split_list = module_name.split('.') output = model for name in split_list[:-1]: output = getattr(output, name) output.__setattr__(split_list[-1], module) The provided code snippet includes necessary dependencies for implementing the `convert_conv1d_to_linear` function. Write a Python function `def convert_conv1d_to_linear(model, convert_type)` to solve the following problem: This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF) Here is the function: def convert_conv1d_to_linear(model, convert_type): ''' This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF) ''' if hasattr(model, 'module'): c_model = model.module else: c_model = model for name, module in c_model.named_modules(): if isinstance(module, convert_type): old_module = recursive_getattr(c_model, name) new_module = torch.nn.Linear( old_module.weight.data.size(0), old_module.weight.data.size(1), bias=True if old_module.bias is not None else False) new_module.weight.data = old_module.weight.data.t() if new_module.bias is not None: new_module.bias.data = old_module.bias.data.view(-1) recursive_setattr(c_model, name, new_module) return model
This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF)
10,470
from .abstract_accelerator import DeepSpeedAccelerator ds_accelerator = None def _validate_accelerator(accel_obj): assert isinstance(accel_obj, DeepSpeedAccelerator), \ f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator' # TODO: turn off is_available test since this breaks tests #assert accel_obj.is_available(), \ # f'{accel_obj.__class__.__name__} accelerator fails is_available() test' class CUDA_Accelerator(DeepSpeedAccelerator): def __init__(self): self._name = 'cuda' self._communication_backend_name = 'nccl' # Device APIs def device_name(self, device_index=None): if device_index == None: return 'cuda' return 'cuda:{}'.format(device_index) def device(self, device_index=None): return torch.cuda.device(device_index) def set_device(self, device_index): torch.cuda.set_device(device_index) def current_device(self): return torch.cuda.current_device() def current_device_name(self): return 'cuda:{}'.format(torch.cuda.current_device()) def device_count(self): return torch.cuda.device_count() def synchronize(self, device_index=None): return torch.cuda.synchronize(device_index) # RNG APIs def random(self): return torch.random def set_rng_state(self, new_state, device_index=None): if device_index is None: return torch.cuda.set_rng_state(new_state) return torch.cuda.set_rng_state(new_state, device_index) def get_rng_state(self, device_index=None): if device_index is None: return torch.cuda.get_rng_state() return torch.cuda.get_rng_state(device_index) def manual_seed(self, seed): return torch.cuda.manual_seed(seed) def manual_seed_all(self, seed): return torch.cuda.manual_seed_all(seed) def initial_seed(self, seed): return torch.cuda.initial_seed(seed) def default_generator(self, device_index): return torch.cuda.default_generators[device_index] # Streams/Events def Stream(self, device=None, priority=0, **kwargs): return torch.cuda.Stream(device, priority, **kwargs) def StreamContext(self, stream): return torch.cuda.StreamContext(stream) def stream(self, stream): return torch.cuda.stream(stream) def current_stream(self, device_index=None): return torch.cuda.current_stream(device_index) def default_stream(self, device_index=None): return torch.cuda.default_stream(device_index) def Event(self, **kwargs): return torch.cuda.Event(**kwargs) # Memory management def empty_cache(self): return torch.cuda.empty_cache() def memory_allocated(self, device_index=None): return torch.cuda.memory_allocated(device_index) def max_memory_allocated(self, device_index=None): return torch.cuda.max_memory_allocated(device_index) def reset_max_memory_allocated(self, device_index=None): return torch.cuda.reset_max_memory_allocated(device_index) def memory_cached(self, device_index=None): return torch.cuda.memory_cached(device_index) def max_memory_cached(self, device_index=None): return torch.cuda.max_memory_cached(device_index) def reset_max_memory_cached(self, device_index=None): return torch.cuda.reset_max_memory_cached(device_index) def memory_stats(self, device_index=None): if hasattr(torch.cuda, 'memory_stats'): return torch.cuda.memory_stats(device_index) def reset_peak_memory_stats(self, device_index=None): if hasattr(torch.cuda, 'reset_peak_memory_stats'): return torch.cuda.reset_peak_memory_stats(device_index) def memory_reserved(self, device_index=None): if hasattr(torch.cuda, 'memory_reserved'): return torch.cuda.memory_reserved(device_index) def max_memory_reserved(self, device_index=None): if hasattr(torch.cuda, 'max_memory_reserved'): return torch.cuda.max_memory_reserved(device_index) def total_memory(self, device_index=None): return torch.cuda.get_device_properties(device_index).total_memory # Data types def is_bf16_supported(self): return torch.cuda.is_bf16_supported() def is_fp16_supported(self): major, _ = torch.cuda.get_device_capability() if major >= 7: return True else: return False # Misc def amp(self): if hasattr(torch.cuda, 'amp'): return torch.cuda.amp return None def is_available(self): return torch.cuda.is_available() def range_push(self, msg): if hasattr(torch.cuda.nvtx, 'range_push'): return torch.cuda.nvtx.range_push(msg) def range_pop(self): if hasattr(torch.cuda.nvtx, 'range_pop'): return torch.cuda.nvtx.range_pop() def lazy_call(self, callback): return torch.cuda._lazy_call(callback) def communication_backend_name(self): return self._communication_backend_name # Tensor operations def BFloat16Tensor(self): return torch.cuda.BFloat16Tensor def ByteTensor(self): return torch.cuda.ByteTensor def DoubleTensor(self): return torch.cuda.DoubleTensor def FloatTensor(self): return torch.cuda.FloatTensor def HalfTensor(self): return torch.cuda.HalfTensor def IntTensor(self): return torch.cuda.IntTensor def LongTensor(self): return torch.cuda.LongTensor def pin_memory(self, tensor): return tensor.pin_memory() def on_accelerator(self, tensor): device_str = str(tensor.device) if device_str.startswith('cuda:'): return True else: return False def op_builder_dir(self): return "deepspeed.ops.op_builder" def create_op_builder(self, class_name): from deepspeed.ops.op_builder import AsyncIOBuilder, CPUAdagradBuilder, CPUAdamBuilder, FusedAdamBuilder, FusedLambBuilder, QuantizerBuilder, SparseAttnBuilder, StochasticTransformerBuilder, TransformerBuilder, InferenceBuilder, UtilsBuilder from deepspeed.ops.op_builder.builder_names import AsyncIOBuilder as AsyncIOBuilderName from deepspeed.ops.op_builder.builder_names import CPUAdagradBuilder as CPUAdagradBuilderName from deepspeed.ops.op_builder.builder_names import CPUAdamBuilder as CPUAdamBuilderName from deepspeed.ops.op_builder.builder_names import FusedAdamBuilder as FusedAdamBuilderName from deepspeed.ops.op_builder.builder_names import FusedLambBuilder as FusedLambBuilderName from deepspeed.ops.op_builder.builder_names import QuantizerBuilder as QuantizerBuilderName from deepspeed.ops.op_builder.builder_names import SparseAttnBuilder as SparseAttnBuilderName from deepspeed.ops.op_builder.builder_names import StochasticTransformerBuilder as StochasticTransformerBuilderName from deepspeed.ops.op_builder.builder_names import TransformerBuilder as TransformerBuilderName from deepspeed.ops.op_builder.builder_names import InferenceBuilder as InferenceBuilderName from deepspeed.ops.op_builder.builder_names import UtilsBuilder as UtilsBuilderName if class_name == AsyncIOBuilderName: return AsyncIOBuilder() elif class_name == CPUAdagradBuilderName: return CPUAdagradBuilder() elif class_name == CPUAdamBuilderName: return CPUAdamBuilder() elif class_name == FusedAdamBuilderName: return FusedAdamBuilder() elif class_name == FusedLambBuilderName: return FusedLambBuilder() elif class_name == QuantizerBuilderName: return QuantizerBuilder() elif class_name == SparseAttnBuilderName: return SparseAttnBuilder() elif class_name == StochasticTransformerBuilderName: return StochasticTransformerBuilder() elif class_name == TransformerBuilderName: return TransformerBuilder() elif class_name == InferenceBuilderName: return InferenceBuilder() elif class_name == UtilsBuilderName: return UtilsBuilder() else: return None def build_extension(self): from torch.utils.cpp_extension import BuildExtension return BuildExtension def get_accelerator(): global ds_accelerator if ds_accelerator is None: try: from intel_extension_for_deepspeed import XPU_Accelerator except ImportError as e: pass else: ds_accelerator = XPU_Accelerator() _validate_accelerator(ds_accelerator) return ds_accelerator from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator ds_accelerator = CUDA_Accelerator() _validate_accelerator(ds_accelerator) return ds_accelerator
null
10,471
from .abstract_accelerator import DeepSpeedAccelerator ds_accelerator = None def _validate_accelerator(accel_obj): assert isinstance(accel_obj, DeepSpeedAccelerator), \ f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator' # TODO: turn off is_available test since this breaks tests #assert accel_obj.is_available(), \ # f'{accel_obj.__class__.__name__} accelerator fails is_available() test' def set_accelerator(accel_obj): global ds_accelerator _validate_accelerator(accel_obj) ds_accelerator = accel_obj
null
10,472
import logging import sys import os logger = LoggerFactory.create_logger(name="DeepSpeed", level=logging.INFO) import logging The provided code snippet includes necessary dependencies for implementing the `log_dist` function. Write a Python function `def log_dist(message, ranks=None, level=logging.INFO)` to solve the following problem: Log message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) level (int) Here is the function: def log_dist(message, ranks=None, level=logging.INFO): from deepspeed import comm as dist """Log message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) level (int) """ should_log = not dist.is_initialized() ranks = ranks or [] my_rank = dist.get_rank() if dist.is_initialized() else -1 if ranks and not should_log: should_log = ranks[0] == -1 should_log = should_log or (my_rank in set(ranks)) if should_log: final_message = "[Rank {}] {}".format(my_rank, message) logger.log(level, final_message)
Log message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) level (int)
10,473
import logging import sys import os The provided code snippet includes necessary dependencies for implementing the `print_json_dist` function. Write a Python function `def print_json_dist(message, ranks=None, path=None)` to solve the following problem: Print message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) path (str) Here is the function: def print_json_dist(message, ranks=None, path=None): from deepspeed import comm as dist """Print message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) path (str) """ should_log = not dist.is_initialized() ranks = ranks or [] my_rank = dist.get_rank() if dist.is_initialized() else -1 if ranks and not should_log: should_log = ranks[0] == -1 should_log = should_log or (my_rank in set(ranks)) if should_log: message['rank'] = my_rank import json with open(path, 'w') as outfile: json.dump(message, outfile) os.fsync(outfile)
Print message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) path (str)
10,474
import logging import sys import os log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } def get_current_level(): """ Return logger's current log level """ return logger.getEffectiveLevel() The provided code snippet includes necessary dependencies for implementing the `should_log_le` function. Write a Python function `def should_log_le(max_log_level_str)` to solve the following problem: Args: max_log_level_str: maximum log level as a string Returns ``True`` if the current log_level is less or equal to the specified log level. Otherwise ``False``. Example: ``should_log_le("info")`` will return ``True`` if the current log level is either ``logging.INFO`` or ``logging.DEBUG`` Here is the function: def should_log_le(max_log_level_str): """ Args: max_log_level_str: maximum log level as a string Returns ``True`` if the current log_level is less or equal to the specified log level. Otherwise ``False``. Example: ``should_log_le("info")`` will return ``True`` if the current log level is either ``logging.INFO`` or ``logging.DEBUG`` """ if not isinstance(max_log_level_str, str): raise ValueError(f"{max_log_level_str} is not a string") max_log_level_str = max_log_level_str.lower() if max_log_level_str not in log_levels: raise ValueError(f"{max_log_level_str} is not one of the `logging` levels") return get_current_level() <= log_levels[max_log_level_str]
Args: max_log_level_str: maximum log level as a string Returns ``True`` if the current log_level is less or equal to the specified log level. Otherwise ``False``. Example: ``should_log_le("info")`` will return ``True`` if the current log level is either ``logging.INFO`` or ``logging.DEBUG``
10,475
import argparse import torch import glob import math import os import re from collections import OrderedDict from deepspeed.utils import logger from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES) def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): """ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example via a model hub. Args: - ``checkpoint_dir``: path to the desired checkpoint folder - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` Returns: - pytorch ``state_dict`` Note: this approach may not work if your application doesn't have sufficient free CPU memory and you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with the checkpoint. A typical usage might be :: from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint # do the training and checkpoint saving state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu model = model.cpu() # move to cpu model.load_state_dict(state_dict) # submit to model hub or save the model to share with others In this example the ``model`` will no longer be usable in the deepspeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. """ if tag is None: latest_path = os.path.join(checkpoint_dir, 'latest') if os.path.isfile(latest_path): with open(latest_path, 'r') as fd: tag = fd.read().strip() else: raise ValueError(f"Unable to find 'latest' file at {latest_path}") ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) if not os.path.isdir(ds_checkpoint_dir): raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) The provided code snippet includes necessary dependencies for implementing the `convert_zero_checkpoint_to_fp32_state_dict` function. Write a Python function `def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None)` to solve the following problem: Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. Args: - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` Here is the function: def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): """ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. Args: - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` """ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) print(f"Saving fp32 state dict to {output_file}") torch.save(state_dict, output_file)
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. Args: - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
10,476
import argparse import torch import glob import math import os import re from collections import OrderedDict from deepspeed.utils import logger from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES) def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): """ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example via a model hub. Args: - ``checkpoint_dir``: path to the desired checkpoint folder - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` Returns: - pytorch ``state_dict`` Note: this approach may not work if your application doesn't have sufficient free CPU memory and you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with the checkpoint. A typical usage might be :: from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint # do the training and checkpoint saving state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu model = model.cpu() # move to cpu model.load_state_dict(state_dict) # submit to model hub or save the model to share with others In this example the ``model`` will no longer be usable in the deepspeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. """ if tag is None: latest_path = os.path.join(checkpoint_dir, 'latest') if os.path.isfile(latest_path): with open(latest_path, 'r') as fd: tag = fd.read().strip() else: raise ValueError(f"Unable to find 'latest' file at {latest_path}") ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) if not os.path.isdir(ds_checkpoint_dir): raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) The provided code snippet includes necessary dependencies for implementing the `load_state_dict_from_zero_checkpoint` function. Write a Python function `def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None)` to solve the following problem: 1. Put the provided model to cpu 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` 3. Load it into the provided model Args: - ``model``: the model object to update - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` Returns: - ``model`: modified model Make sure you have plenty of CPU memory available before you call this function. If you don't have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it conveniently placed for you in the checkpoint folder. A typical usage might be :: from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) # submit to model hub or save the model to share with others Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. Here is the function: def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): """ 1. Put the provided model to cpu 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` 3. Load it into the provided model Args: - ``model``: the model object to update - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` Returns: - ``model`: modified model Make sure you have plenty of CPU memory available before you call this function. If you don't have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it conveniently placed for you in the checkpoint folder. A typical usage might be :: from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) # submit to model hub or save the model to share with others Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. """ logger.info(f"Extracting fp32 weights") state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) logger.info(f"Overwriting model with fp32 weights") model = model.cpu() model.load_state_dict(state_dict, strict=False) return model
1. Put the provided model to cpu 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` 3. Load it into the provided model Args: - ``model``: the model object to update - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` Returns: - ``model`: modified model Make sure you have plenty of CPU memory available before you call this function. If you don't have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it conveniently placed for you in the checkpoint folder. A typical usage might be :: from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) # submit to model hub or save the model to share with others Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
10,477
module_names = {} param_names = {} def debug_extract_module_and_param_names(model): # extract the fully qualified names as soon as the model is acquired global module_names global param_names # XXX: can probably make a map of param2module and vice-versa module_names = {module: name for name, module in model.named_modules()} param_names = {param: name for name, param in model.named_parameters()}
null
10,478
def debug_module2name(module): if module in module_names: return module_names[module] else: return "unknown" def debug_module2name_id(module): return f"name={debug_module2name(module)} id={module.id}"
null
10,479
def debug_module2name(module): def debug_module2name_class(module): return f"name={debug_module2name(module)} {module.__class__.__name__}"
null
10,480
def debug_param2name(param): if param in param_names: return param_names[param] else: return "unknown" def debug_param2name_id(param): return f"name={debug_param2name(param)} id={param.ds_id}"
null
10,481
def debug_param2name(param): if param in param_names: return param_names[param] else: return "unknown" def debug_param2name_id_shape(param): return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}"
null
10,482
def debug_param2name(param): if param in param_names: return param_names[param] else: return "unknown" def debug_param2name_id_shape_device(param): return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}"
null
10,483
def debug_param2name(param): if param in param_names: return param_names[param] else: return "unknown" def debug_param2name_id_numel(param): return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}"
null
10,484
def debug_param2name(param): if param in param_names: return param_names[param] else: return "unknown" def debug_param2name_id_shape_status(param): return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}"
null
10,485
fcntl = None fh = None The provided code snippet includes necessary dependencies for implementing the `printflock` function. Write a Python function `def printflock(*msgs)` to solve the following problem: For printing messages for all concurrent gpus w/o getting interleaved text. This is useful when debugging issues where multi-gpus don't sync. 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() printflock(f"[{rank}] {message}") 3. run the program and you get both logs non-interleaved But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper function might be more useful, as it's easier to send each log stream into a separate file and then compare those. Here is the function: def printflock(*msgs): """ For printing messages for all concurrent gpus w/o getting interleaved text. This is useful when debugging issues where multi-gpus don't sync. 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() printflock(f"[{rank}] {message}") 3. run the program and you get both logs non-interleaved But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper function might be more useful, as it's easier to send each log stream into a separate file and then compare those. """ global fcntl if fcntl == None: import fcntl with open(__file__, "r") as fh: fcntl.flock(fh, fcntl.LOCK_EX) try: print(*msgs) finally: fcntl.flock(fh, fcntl.LOCK_UN)
For printing messages for all concurrent gpus w/o getting interleaved text. This is useful when debugging issues where multi-gpus don't sync. 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() printflock(f"[{rank}] {message}") 3. run the program and you get both logs non-interleaved But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper function might be more useful, as it's easier to send each log stream into a separate file and then compare those.
10,486
fh = None The provided code snippet includes necessary dependencies for implementing the `log_rank_file` function. Write a Python function `def log_rank_file(rank, *msgs)` to solve the following problem: Print to a log file of the given rank This is useful for debugging hanging in sync processes. Here is a possible workflow: 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions of print_rank_0 in those files with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() log_rank_file(rank, message) 3. run the program 4. fix up the expected differences, e.g. different cuda numbers :: perl -pi -e 's|cuda:1|cuda:0|' log_rank_* 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same work (e.g. when some layers get conditionally skipped on one gpu but not all) diff -u log_rank_0.txt log_rank_1.txt | less Here is the function: def log_rank_file(rank, *msgs): """ Print to a log file of the given rank This is useful for debugging hanging in sync processes. Here is a possible workflow: 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions of print_rank_0 in those files with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() log_rank_file(rank, message) 3. run the program 4. fix up the expected differences, e.g. different cuda numbers :: perl -pi -e 's|cuda:1|cuda:0|' log_rank_* 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same work (e.g. when some layers get conditionally skipped on one gpu but not all) diff -u log_rank_0.txt log_rank_1.txt | less """ global fh if fh is None: fh = open(f"log_rank_{rank}.txt", "w") for m in msgs: fh.write(f"{m}\n") fh.flush()
Print to a log file of the given rank This is useful for debugging hanging in sync processes. Here is a possible workflow: 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions of print_rank_0 in those files with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() log_rank_file(rank, message) 3. run the program 4. fix up the expected differences, e.g. different cuda numbers :: perl -pi -e 's|cuda:1|cuda:0|' log_rank_* 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same work (e.g. when some layers get conditionally skipped on one gpu but not all) diff -u log_rank_0.txt log_rank_1.txt | less
10,487
def print_backward_tensors(tensor): def _print_bwd_tensors(grad_fn): print(f"Backward tensors in {grad_fn}") for funcs in grad_fn.next_functions: if funcs[0]: try: tensor = getattr(funcs[0], 'variable') print(funcs[0]) print( f"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}" ) except AttributeError as e: _print_bwd_tensors(funcs[0]) if hasattr(tensor, 'grad_fn'): _print_bwd_tensors(tensor.grad_fn)
null
10,488
import math from deepspeed.utils import log_dist def convert_size(size_bytes): if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i])
null
10,489
import math from deepspeed.utils import log_dist def calc_bw_log(comm_op, size, duration): import deepspeed.comm as dist n = dist.get_world_size() tput = 0 busbw = 0 if comm_op == "all_to_all_single": tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_gather" or comm_op == "all_gather_base" or comm_op == "reduce_scatter" or comm_op == "reduce_scatter_base": size *= n tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_reduce": tput = (size * 2 / duration) busbw = (size / duration) * (2 * (n - 1) / n) elif comm_op == "send" or comm_op == "recv" or comm_op == "isend" or comm_op == "irecv" or comm_op == "broadcast" or comm_op == "reduce" or comm_op == "gather" or comm_op == "scatter" or comm_op == "barrier": tput = (size / duration) busbw = tput else: print_rank_0("wrong comm_op specified") # noqa: F821 exit(0) # convert to Gbps tput *= 8 busbw *= 8 tput /= 1e6 busbw /= 1e6 return tput, busbw
null
10,490
import time import torch from numpy import mean from deepspeed.utils.logging import log_dist from deepspeed import comm as dist The provided code snippet includes necessary dependencies for implementing the `trim_mean` function. Write a Python function `def trim_mean(data, trim_percent)` to solve the following problem: Compute the trimmed mean of a list of numbers. Args: data (list): List of numbers. trim_percent (float): Percentage of data to trim. Returns: float: Trimmed mean. Here is the function: def trim_mean(data, trim_percent): """Compute the trimmed mean of a list of numbers. Args: data (list): List of numbers. trim_percent (float): Percentage of data to trim. Returns: float: Trimmed mean. """ assert trim_percent >= 0.0 and trim_percent <= 1.0 n = len(data) # Account for edge case of empty list if len(data) == 0: return 0 data.sort() k = int(round(n * (trim_percent))) return mean(data[k:n - k])
Compute the trimmed mean of a list of numbers. Args: data (list): List of numbers. trim_percent (float): Percentage of data to trim. Returns: float: Trimmed mean.
10,491
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException class DeprecatedException(Exception): pass The provided code snippet includes necessary dependencies for implementing the `initialize` function. Write a Python function `def initialize(ep_size=1, mpu=None)` to solve the following problem: Deprecated function. Retained to inform the users. Here is the function: def initialize(ep_size=1, mpu=None): """ Deprecated function. Retained to inform the users.""" raise DeprecatedException( "Please do not use the groups.initialize() API as it is deprecated. Instead, pass the desired ep_size to deepspeed.moe.layer.MoE(..,ep_size,..)" )
Deprecated function. Retained to inform the users.
10,492
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, '{} is not divisible by {}'.format( numerator, denominator) The provided code snippet includes necessary dependencies for implementing the `_create_model_parallel` function. Write a Python function `def _create_model_parallel(model_parallel_size_)` to solve the following problem: Initialize model data parallel groups. Arguments: model_parallel_size: number of GPUs used to parallelize model. Returns: Tuple of data parallel group and model parallel group Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model. The present function will create 4 model parallel groups and 2 data parallel groups as: 4 model parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 data parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. Here is the function: def _create_model_parallel(model_parallel_size_): """ Initialize model data parallel groups. Arguments: model_parallel_size: number of GPUs used to parallelize model. Returns: Tuple of data parallel group and model parallel group Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model. The present function will create 4 model parallel groups and 2 data parallel groups as: 4 model parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 data parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. """ log_dist(f'Creating model parallel group with size {model_parallel_size_}', ranks=[0]) # Get world size and rank. Ensure some consistencies. assert dist.is_initialized() world_size = dist.get_world_size() model_parallel_size = min(model_parallel_size_, world_size) _ensure_divisibility(world_size, model_parallel_size) rank = dist.get_rank() _DATA_PARALLEL_GROUP = None _MODEL_PARALLEL_GROUP = None # Build the data parallel groups. for i in range(model_parallel_size): ranks = range(i, world_size, model_parallel_size) group = dist.new_group(ranks) if i == (rank % model_parallel_size): _DATA_PARALLEL_GROUP = group # Build the model parallel groups. for i in range(world_size // model_parallel_size): ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size) group = dist.new_group(ranks) if i == (rank // model_parallel_size): _MODEL_PARALLEL_GROUP = group return _DATA_PARALLEL_GROUP, _MODEL_PARALLEL_GROUP
Initialize model data parallel groups. Arguments: model_parallel_size: number of GPUs used to parallelize model. Returns: Tuple of data parallel group and model parallel group Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model. The present function will create 4 model parallel groups and 2 data parallel groups as: 4 model parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 data parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box.
10,493
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException _EXPERT_PARALLEL_GROUP = {} _EXPERT_DATA_PARALLEL_GROUP = {} def _ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, '{} is not divisible by {}'.format( numerator, denominator) The provided code snippet includes necessary dependencies for implementing the `_create_expert_and_data_parallel` function. Write a Python function `def _create_expert_and_data_parallel(expert_parallel_size_)` to solve the following problem: Create expert and data parallel groups. Note: Caller of this function is responsible to check if the groups already exist. Example - E + D parallel world_size = 16 expert_parallel_size = 2 # number of experts in same group expert_data_parallel_group = [0,2,4,6,8,10,12,14], [1,3,5,7,9,11,13,15] - all reduce is only on MoE params expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE Here is the function: def _create_expert_and_data_parallel(expert_parallel_size_): """ Create expert and data parallel groups. Note: Caller of this function is responsible to check if the groups already exist. Example - E + D parallel world_size = 16 expert_parallel_size = 2 # number of experts in same group expert_data_parallel_group = [0,2,4,6,8,10,12,14], [1,3,5,7,9,11,13,15] - all reduce is only on MoE params expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE """ assert dist.is_initialized() log_dist( f'Creating expert and data parallel groups with size {expert_parallel_size_}', ranks=[0]) world_size = dist.get_world_size() rank = dist.get_rank() _ensure_divisibility(world_size, expert_parallel_size_) group_name = f"ep_size_{expert_parallel_size_}" # Build the expert data parallel groups. global _EXPERT_DATA_PARALLEL_GROUP # Only create group if it does not already exist if group_name not in _EXPERT_DATA_PARALLEL_GROUP: for i in range(expert_parallel_size_): ranks = range(i, world_size, expert_parallel_size_) group = dist.new_group(ranks) log_dist( f'Creating expert data parallel process group named {group_name} with ranks: {list(ranks)}', [0]) if i == (rank % expert_parallel_size_): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group # Build the expert parallel groups. global _EXPERT_PARALLEL_GROUP # Only create group if it does not already exist if group_name not in _EXPERT_PARALLEL_GROUP: for i in range(world_size // expert_parallel_size_): ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_) group = dist.new_group(ranks) log_dist( f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0]) if i == (rank // expert_parallel_size_): _EXPERT_PARALLEL_GROUP[group_name] = group
Create expert and data parallel groups. Note: Caller of this function is responsible to check if the groups already exist. Example - E + D parallel world_size = 16 expert_parallel_size = 2 # number of experts in same group expert_data_parallel_group = [0,2,4,6,8,10,12,14], [1,3,5,7,9,11,13,15] - all reduce is only on MoE params expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE
10,494
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException _EXPERT_PARALLEL_GROUP = {} _EXPERT_DATA_PARALLEL_GROUP = {} expert_tensor_parallel_world_size = 1 def _ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, '{} is not divisible by {}'.format( numerator, denominator) def _get_expert_parallel_ranks(world_size, model_parallel_size_, expert_parallel_size_): """Generate expert parallel and expert data parallel group ranks list. Example - E + M + D parallel world_size = 16 model_degree = 2 expert_degree = 4 # number of experts in same group mp_group = [0, 1], [2,3], [4,5] ... data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] Args: world_size (int): Distributed world size. model_parallel_size_ (int): Model parallel group size. expert_parallel_size_ (int): Expert parallel group size. Returns: Expert parallel group ranks and Expert data parallel group ranks list. """ _ensure_divisibility(world_size, model_parallel_size_) dp_world_size = world_size // model_parallel_size_ _ensure_divisibility(dp_world_size, expert_parallel_size_) # Generate data parallel groups data_parallel_groups = [] dp_group_size = model_parallel_size_ for i in range(dp_group_size): data_parallel_groups.append(list(range(i, world_size, dp_group_size))) expert_parallel_groups = [] expert_data_parallel_groups = [] for dp_ranks in data_parallel_groups: # partition of expert parallel groups, e.g. [0,2,4,6], [8,10,12,14] part_ep_groups = [] for i in range(0, dp_world_size, expert_parallel_size_): part_ep_groups.append(dp_ranks[i:i + expert_parallel_size_]) expert_parallel_groups.extend(part_ep_groups) # zip part_ep_groups get expert data parallel ranks, e.g [0,8],[2,10],[4,12],[6,14] for expert_dp_ranks in zip(*part_ep_groups): expert_data_parallel_groups.append(list(expert_dp_ranks)) return expert_parallel_groups, expert_data_parallel_groups The provided code snippet includes necessary dependencies for implementing the `_create_expert_data_and_model_parallel` function. Write a Python function `def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu)` to solve the following problem: Create expert and data parallel groups based on MPU (model parallel) group. Note: Caller of this function is responsible to check if the groups already exist. Example - E + M + D parallel world_size = 16 model_degree = 2 expert_degree = 4 # number of experts in same group mp_group = [0, 1], [2,3], [4,5] ... data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] Here is the function: def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu): """ Create expert and data parallel groups based on MPU (model parallel) group. Note: Caller of this function is responsible to check if the groups already exist. Example - E + M + D parallel world_size = 16 model_degree = 2 expert_degree = 4 # number of experts in same group mp_group = [0, 1], [2,3], [4,5] ... data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ assert dist.is_initialized(), "dist is not initialized" model_parallel_size_ = mpu.get_model_parallel_world_size() global expert_tensor_parallel_world_size expert_tensor_parallel_world_size = model_parallel_size_ world_size = dist.get_world_size() rank = dist.get_rank() dp_world_size = mpu.get_data_parallel_world_size() dp_rank = mpu.get_data_parallel_rank() _ensure_divisibility(world_size, model_parallel_size_) _ensure_divisibility(dp_world_size, expert_parallel_size_) log_dist( f"Creating deepspeed groups with model parallel size {model_parallel_size_}, expert parallel size {expert_parallel_size_}, world size {world_size}, dp world size {dp_world_size}", [0]) global _EXPERT_PARALLEL_GROUP, _EXPERT_DATA_PARALLEL_GROUP # Get world size and rank. Ensure some consistencies. _DATA_PARALLEL_GROUP = mpu.get_data_parallel_group() _MODEL_PARALLEL_GROUP = mpu.get_model_parallel_group() group_name = f"ep_size_{expert_parallel_size_}" # Only create groups if they don't already exist # Need to check conditions outside the group creation loop because of the way torch.dist group creation works if group_name not in _EXPERT_DATA_PARALLEL_GROUP and group_name not in _EXPERT_PARALLEL_GROUP: expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks( world_size, model_parallel_size_, expert_parallel_size_) for ranks in expert_parallel_groups: group = dist.new_group(ranks) if rank in list(ranks): _EXPERT_PARALLEL_GROUP[group_name] = group for ranks in expert_data_parallel_groups: group = dist.new_group(ranks) if rank in list(ranks): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group
Create expert and data parallel groups based on MPU (model parallel) group. Note: Caller of this function is responsible to check if the groups already exist. Example - E + M + D parallel world_size = 16 model_degree = 2 expert_degree = 4 # number of experts in same group mp_group = [0, 1], [2,3], [4,5] ... data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15]
10,495
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _get_max_expert_size_name(): """Get the name of the group with max. ep_size""" return f'ep_size_{_get_max_expert_size()}' def _get_expert_parallel_group(group_name): """Get the expert parallel group the caller rank belongs to.""" assert group_name in _EXPERT_PARALLEL_GROUP, \ 'expert parallel group is not initialized' return _EXPERT_PARALLEL_GROUP[group_name] The provided code snippet includes necessary dependencies for implementing the `_get_max_expert_parallel_group` function. Write a Python function `def _get_max_expert_parallel_group()` to solve the following problem: Get the max expert parallel size. Here is the function: def _get_max_expert_parallel_group(): """Get the max expert parallel size.""" return _get_expert_parallel_group(_get_max_expert_size_name())
Get the max expert parallel size.
10,496
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException _EXPERT_PARALLEL_GROUP = {} The provided code snippet includes necessary dependencies for implementing the `_get_expert_parallel_group_dict` function. Write a Python function `def _get_expert_parallel_group_dict()` to solve the following problem: Get the expert parallel group dict. Here is the function: def _get_expert_parallel_group_dict(): """Get the expert parallel group dict.""" return _EXPERT_PARALLEL_GROUP
Get the expert parallel group dict.
10,497
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException _EXPERT_DATA_PARALLEL_GROUP = {} The provided code snippet includes necessary dependencies for implementing the `_get_expert_data_parallel_group_dict` function. Write a Python function `def _get_expert_data_parallel_group_dict()` to solve the following problem: Get the expert data parallel group dict. Here is the function: def _get_expert_data_parallel_group_dict(): """Get the expert data parallel group dict.""" return _EXPERT_DATA_PARALLEL_GROUP
Get the expert data parallel group dict.
10,498
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _get_data_parallel_group(): def _get_broadcast_src_rank(): return dist.get_global_rank(_get_data_parallel_group(), 0)
null
10,499
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _get_expert_data_parallel_group(group_name): """Get the expert data parallel group the caller rank belongs to.""" assert group_name in _EXPERT_DATA_PARALLEL_GROUP, \ 'expert data parallel group is not initialized' return _EXPERT_DATA_PARALLEL_GROUP[group_name] def _get_expert_broadcast_src_rank(group_name): return dist.get_global_rank(_get_expert_data_parallel_group(group_name), 0)
null
10,500
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _get_expert_data_parallel_group(group_name): """Get the expert data parallel group the caller rank belongs to.""" assert group_name in _EXPERT_DATA_PARALLEL_GROUP, \ 'expert data parallel group is not initialized' return _EXPERT_DATA_PARALLEL_GROUP[group_name] The provided code snippet includes necessary dependencies for implementing the `_get_expert_data_parallel_world_size` function. Write a Python function `def _get_expert_data_parallel_world_size(group_name)` to solve the following problem: Return world size for the expert data parallel group. Here is the function: def _get_expert_data_parallel_world_size(group_name): """Return world size for the expert data parallel group.""" return dist.get_world_size(group=_get_expert_data_parallel_group(group_name))
Return world size for the expert data parallel group.
10,501
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _get_expert_parallel_group(group_name): """Get the expert parallel group the caller rank belongs to.""" assert group_name in _EXPERT_PARALLEL_GROUP, \ 'expert parallel group is not initialized' return _EXPERT_PARALLEL_GROUP[group_name] The provided code snippet includes necessary dependencies for implementing the `_get_expert_parallel_rank` function. Write a Python function `def _get_expert_parallel_rank(group_name)` to solve the following problem: Return my rank for the expert parallel group. Here is the function: def _get_expert_parallel_rank(group_name): """Return my rank for the expert parallel group.""" return dist.get_rank(group=_get_expert_parallel_group(group_name))
Return my rank for the expert parallel group.
10,502
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _get_expert_parallel_world_size(group_name): """Return world size for the expert parallel group.""" return dist.get_world_size(group=_get_expert_parallel_group(group_name)) The provided code snippet includes necessary dependencies for implementing the `_get_expert_parallel_src_rank` function. Write a Python function `def _get_expert_parallel_src_rank(group_name)` to solve the following problem: Calculate the global rank corresponding to a local rank zero in the expert parallel group. Here is the function: def _get_expert_parallel_src_rank(group_name): """Calculate the global rank corresponding to a local rank zero in the expert parallel group.""" global_rank = dist.get_rank() local_world_size = _get_expert_parallel_world_size(group_name) return (global_rank // local_world_size) * local_world_size
Calculate the global rank corresponding to a local rank zero in the expert parallel group.
10,503
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException def _get_expert_data_parallel_group(group_name): """Get the expert data parallel group the caller rank belongs to.""" assert group_name in _EXPERT_DATA_PARALLEL_GROUP, \ 'expert data parallel group is not initialized' return _EXPERT_DATA_PARALLEL_GROUP[group_name] The provided code snippet includes necessary dependencies for implementing the `_get_expert_data_parallel_rank` function. Write a Python function `def _get_expert_data_parallel_rank(group_name)` to solve the following problem: Return my rank for the expert data parallel group. Here is the function: def _get_expert_data_parallel_rank(group_name): """Return my rank for the expert data parallel group.""" return dist.get_rank(group=_get_expert_data_parallel_group(group_name))
Return my rank for the expert data parallel group.
10,504
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException mpu = None def _get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" assert dist.is_initialized(), \ 'dist is not initialized' global mpu if mpu is not None: return mpu.get_data_parallel_group() # Return the clone of dist world group return _clone_world_group() The provided code snippet includes necessary dependencies for implementing the `_get_data_parallel_world_size` function. Write a Python function `def _get_data_parallel_world_size()` to solve the following problem: Return world size for the data parallel group. Here is the function: def _get_data_parallel_world_size(): """Return world size for the data parallel group.""" global mpu if mpu is not None: return mpu.get_data_parallel_world_size() return dist.get_world_size(group=_get_data_parallel_group())
Return world size for the data parallel group.
10,505
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException mpu = None The provided code snippet includes necessary dependencies for implementing the `_get_model_parallel_world_size` function. Write a Python function `def _get_model_parallel_world_size()` to solve the following problem: Return world size for the model parallel group. Here is the function: def _get_model_parallel_world_size(): """Return world size for the model parallel group.""" global mpu if mpu is not None: return mpu.get_model_parallel_world_size() return 1
Return world size for the model parallel group.
10,506
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException mpu = None def _get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" assert dist.is_initialized(), \ 'dist is not initialized' global mpu if mpu is not None: return mpu.get_data_parallel_group() # Return the clone of dist world group return _clone_world_group() The provided code snippet includes necessary dependencies for implementing the `_get_data_parallel_rank` function. Write a Python function `def _get_data_parallel_rank()` to solve the following problem: Return my rank for the data parallel group. Here is the function: def _get_data_parallel_rank(): """Return my rank for the data parallel group.""" global mpu if mpu is not None: return mpu.get_data_parallel_rank() return dist.get_rank(group=_get_data_parallel_group())
Return my rank for the data parallel group.
10,507
from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException expert_tensor_parallel_world_size = 1 def _get_expert_model_parallel_world_size(): global expert_tensor_parallel_world_size return expert_tensor_parallel_world_size
null
10,508
import types from deepspeed.utils import get_full_hp_param, get_hp_fragment_mapping def _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group): def link_hp_params(lp_param_list, flat_hp_partition, partition_start, partition_size, partition_optimizer_state, dp_group): local_lp_param_and_offset = _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group) for lp_param, lp_start in local_lp_param_and_offset: lp_param._hp_mapping = get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, partition_start, partition_size, partition_optimizer_state)
null
10,509
import torch try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) The provided code snippet includes necessary dependencies for implementing the `instrument_w_nvtx` function. Write a Python function `def instrument_w_nvtx(func)` to solve the following problem: decorator that causes an NVTX range to be recorded for the duration of the function call. Here is the function: def instrument_w_nvtx(func): """decorator that causes an NVTX range to be recorded for the duration of the function call.""" if hasattr(torch.cuda.nvtx, "range"): def wrapped_fn(*args, **kwargs): with torch.cuda.nvtx.range(func.__qualname__): return func(*args, **kwargs) return wrapped_fn else: return func
decorator that causes an NVTX range to be recorded for the duration of the function call.
10,510
import torch from dataclasses import dataclass from deepspeed import comm as dist try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def get_full_hp_param(self, optim_state_key=None): reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() if self._hp_mapping is not None: lp_frag_address = self._hp_mapping.lp_fragment_address reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel) if optim_state_key is None: hp_fragment = self._hp_mapping.hp_fragment else: hp_fragment = self._hp_mapping.get_optim_state_fragment(optim_state_key) reduce_fragment.data.copy_(hp_fragment.data) dist.all_reduce(reduce_buffer, group=self._dp_group) return reduce_buffer.reshape_as(self)
null
10,511
import torch from dataclasses import dataclass from deepspeed import comm as dist class fragment_address: class tensor_fragment: def update_hp(self): def update_lp(self): def get_optim_state_fragment(self, key): def get_hp_fragment_address(self): def get_optim_state_keys(self): try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, partition_start, partition_size, optimizer_state_dict): lp_end = lp_param.numel() + lp_start hp_start = partition_start hp_end = partition_start + partition_size fragment_start = max(lp_start, hp_start) fragment_end = min(lp_end, hp_end) # print( # f'{self.dp_rank=} {lp_start=} {lp_end-lp_start=} {hp_start=} {hp_end-hp_start=} {fragment_start=} {fragment_end-fragment_start=}' # ) assert fragment_start < fragment_end, \ f'fragment start {fragment_start} should be < fragment_end {fragment_end}' fragment_numel = fragment_end - fragment_start hp_frag_address = fragment_address(start=fragment_start - hp_start, numel=fragment_numel) hp_fragment_tensor = flat_hp_partition.narrow(0, hp_frag_address.start, hp_frag_address.numel) optim_fragment = { key: value.narrow(0, hp_frag_address.start, hp_frag_address.numel) for key, value in optimizer_state_dict.items() if torch.is_tensor(value) and value.shape == flat_hp_partition.shape } lp_frag_address = fragment_address(start=fragment_start - lp_start, numel=fragment_numel) lp_fragment_tensor = lp_param.flatten().narrow(0, lp_frag_address.start, lp_frag_address.numel) return tensor_fragment(lp_fragment=lp_fragment_tensor, lp_fragment_address=lp_frag_address, hp_fragment=hp_fragment_tensor, hp_fragment_address=hp_frag_address, optim_fragment=optim_fragment)
null
10,512
import os import torch import time import deepspeed import argparse from transformers import pipeline def print_latency(latency_set, title, warmup=3): # trim warmup queries latency_set = list(latency_set) latency_set = latency_set[warmup:] count = len(latency_set) if count > 0: latency_set.sort() n50 = (count - 1) * 0.5 + 1 n90 = (count - 1) * 0.9 + 1 n95 = (count - 1) * 0.95 + 1 n99 = (count - 1) * 0.99 + 1 n999 = (count - 1) * 0.999 + 1 avg = sum(latency_set) / count p50 = latency_set[int(n50) - 1] p90 = latency_set[int(n90) - 1] p95 = latency_set[int(n95) - 1] p99 = latency_set[int(n99) - 1] p999 = latency_set[int(n999) - 1] print(f"====== latency stats {title} ======") print("\tAvg Latency: {0:8.2f} ms".format(avg * 1000)) print("\tP50 Latency: {0:8.2f} ms".format(p50 * 1000)) print("\tP90 Latency: {0:8.2f} ms".format(p90 * 1000)) print("\tP95 Latency: {0:8.2f} ms".format(p95 * 1000)) print("\tP99 Latency: {0:8.2f} ms".format(p99 * 1000)) print("\t999 Latency: {0:8.2f} ms".format(p999 * 1000))
null
10,513
import os import re import argparse import pandas as pd def get_branch(file_path): match = re.match(r".*\/(.*)\.log", file_path) if match is None: return False else: return match.groups()[0]
null
10,514
import os import re import argparse import pandas as pd def get_benchmark_params(root_dir, file_path): match = re.match( rf"{root_dir}\/(.+?)_(fp\d+)_(true|false)_(true|false)_(\d+)gpus_v(\d+)\/", file_path, ) if match is None: return False else: model, dtype, graphs, kernel, gpus, version = match.groups() bool_dict = {"true": True, "false": False} return { "model": model, "dtype": dtype, "graphs": bool_dict[graphs.lower()], "kernel": bool_dict[kernel.lower()], "gpus": int(gpus), "version": int(version), }
null
10,515
import os import re import argparse import pandas as pd def get_perf_data(file_content): matches = re.findall(r"\s+(.+?)\sLatency:\s+(\d+\.\d+)\sms", file_content) if matches is []: return False else: return {f"latency-{key}": float(val) for key, val in matches}
null
10,516
import os import re import argparse import pandas as pd def get_generated_text(file_content, gen_text_n): file_content = file_content.replace("\n", " ") file_content = file_content.replace("\t", " ") matches = re.findall(r"RESPONSE\s(\d+):\s+[-]{30}\s+(.+?)\s+[-]{30}", file_content) if len(matches) != gen_text_n: return False else: return {f"generated-text-{key}": val for key, val in matches}
null
10,517
import os import re import argparse import pandas as pd def get_error(file_content): matches = re.findall(r"Error:\s+(.+?)\n", file_content) if matches is []: return False else: return {f"error": val for val in matches}
null
10,518
import torch import time import deepspeed import argparse from transformers import pipeline print(args.model, args.max_tokens, args.dtype) print(responses[0:3]) def print_latency(latency_set, title, warmup=3): # trim warmup queries latency_set = latency_set[warmup:] count = len(latency_set) if count > 0: latency_set.sort() n50 = (count - 1) * 0.5 + 1 n90 = (count - 1) * 0.9 + 1 n95 = (count - 1) * 0.95 + 1 n99 = (count - 1) * 0.99 + 1 n999 = (count - 1) * 0.999 + 1 avg = sum(latency_set) / count p50 = latency_set[int(n50) - 1] p90 = latency_set[int(n90) - 1] p95 = latency_set[int(n95) - 1] p99 = latency_set[int(n99) - 1] p999 = latency_set[int(n999) - 1] print(f"====== latency stats {title} ======") print("\tAvg Latency: {0:8.2f} ms".format(avg * 1000)) print("\tP50 Latency: {0:8.2f} ms".format(p50 * 1000)) print("\tP90 Latency: {0:8.2f} ms".format(p90 * 1000)) print("\tP95 Latency: {0:8.2f} ms".format(p95 * 1000)) print("\tP99 Latency: {0:8.2f} ms".format(p99 * 1000)) print("\t999 Latency: {0:8.2f} ms".format(p999 * 1000))
null
10,519
import torch import os import math import argparse from benchmarks.communication.constants import * global dist def init_torch_distributed(backend): global dist import torch.distributed as dist torch.distributed.init_process_group(backend) local_rank = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) def init_deepspeed_comm(backend): global dist import deepspeed import deepspeed.comm as dist deepspeed.init_distributed(dist_backend=backend) local_rank = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) def print_rank_0(message): if dist.get_rank() == 0: print(message) def init_processes(local_rank, args): if args.dist == 'deepspeed': init_deepspeed_comm(args.backend) elif args.dist == 'torch': init_torch_distributed(args.backend) else: print_rank_0(f"distributed framework {args.dist} not supported") exit(0)
null
10,520
import torch import os import math import argparse from benchmarks.communication.constants import * global dist def print_rank_0(message): if dist.get_rank() == 0: print(message) def print_header(args, comm_op): if comm_op == 'pt2pt': world_size = 2 else: world_size = dist.get_world_size() tput = f'Throughput ({args.bw_unit})' busbw = f'BusBW ({args.bw_unit})' header = f"\n---- Performance of {comm_op} on {world_size} devices ---------------------------------------------------------\n" duration_str = 'Duration' if args.raw: duration_str += ' (us)' header += f"{'Size (Bytes)':20s} {'Description':25s} {duration_str:20s} {tput:20s} {busbw:20s}\n" header += "----------------------------------------------------------------------------------------------------" print_rank_0(header)
null
10,521
import torch import os import math import argparse from benchmarks.communication.constants import * global dist def print_rank_0(message): if dist.get_rank() == 0: print(message) def get_bw(comm_op, size, duration, args): n = dist.get_world_size() tput = 0 busbw = 0 if comm_op == "all_to_all": tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_gather": size *= n tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_reduce": tput = (size * 2 / duration) busbw = (size / duration) * (2 * (n - 1) / n) elif comm_op == "pt2pt" or comm_op == "broadcast": tput = (size / duration) busbw = tput else: print_rank_0("wrong comm_op specified") exit(0) if args.bw_unit == 'Gbps': tput *= 8 busbw *= 8 return tput, busbw
null
10,522
import torch import os import math import argparse from benchmarks.communication.constants import * def get_metric_strings(args, tput, busbw, duration): duration_ms = duration * 1e3 duration_us = duration * 1e6 tput = f'{tput / 1e9:.3f}' busbw = f'{busbw /1e9:.3f}' if duration_us < 1e3 or args.raw: duration = f'{duration_us:.3f}' if not args.raw: duration += ' us' else: duration = f'{duration_ms:.3f} ms' return tput, busbw, duration
null
10,523
import torch import os import math import argparse from benchmarks.communication.constants import * global dist def sync_all(): torch.cuda.synchronize() dist.barrier()
null
10,524
import torch import os import math import argparse from benchmarks.communication.constants import * global dist def _element_size(dtype): """ Returns the element size for a dtype, in bytes """ if not isinstance(dtype, torch.dtype): raise RuntimeError(f'expected torch.dtype, but got {type(dtype)}') if dtype.is_complex: return torch.finfo(dtype).bits >> 2 elif dtype.is_floating_point: return torch.finfo(dtype).bits >> 3 elif dtype == torch.bool: # NOTE: torch.bool is not supported in torch.iinfo() return 1 else: return torch.iinfo(dtype).bits >> 3 def max_numel(comm_op, dtype, mem_factor, local_rank, args): dtype_size = _element_size(dtype) max_memory_per_gpu = torch.cuda.get_device_properties( local_rank).total_memory * mem_factor if comm_op == 'all_reduce' or comm_op == 'pt2pt' or comm_op == 'broadcast': elements_per_gpu = int(max_memory_per_gpu // dtype_size) elif comm_op == 'all_gather': # all_gather performance is lower for non-powers of two, and the output buffer size scales with world size # Therefore, divide by world size and round down to nearest power of 2 elements_per_gpu = int(max_memory_per_gpu // dtype_size // dist.get_world_size()) elements_per_gpu = int(pow(2, int(math.log(elements_per_gpu, 2)))) elif comm_op == 'all_to_all': # Number of elements must be divisible by world_size # all_to_all performance is lower for non-powers of two. Round down like all_gather. elements_per_gpu = int(max_memory_per_gpu // dtype_size) elements_per_gpu = int(dist.get_world_size() * round(elements_per_gpu / dist.get_world_size())) elements_per_gpu = int(pow(2, int(math.log(elements_per_gpu, 2)))) else: print(f"This communication operation: {comm_op} is not supported yet") exit(0) return elements_per_gpu
null
10,525
import torch import os import math import argparse from benchmarks.communication.constants import * def convert_size(size_bytes): if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i])
null
10,526
import torch import os import math import argparse from benchmarks.communication.constants import * def benchmark_parser(): parser = argparse.ArgumentParser() parser.add_argument("--local_rank", type=int) parser.add_argument("--trials", type=int, default=DEFAULT_TRIALS, help='Number of timed iterations') parser.add_argument("--warmups", type=int, default=DEFAULT_WARMUPS, help='Number of warmup (non-timed) iterations') parser.add_argument("--maxsize", type=int, default=24, help='Max message size as a power of 2') parser.add_argument("--async-op", action="store_true", help='Enables non-blocking communication') parser.add_argument("--bw-unit", type=str, default=DEFAULT_UNIT, choices=['Gbps', 'GBps']) parser.add_argument("--backend", type=str, default=DEFAULT_BACKEND, choices=['nccl'], help='Communication library to use') parser.add_argument("--dist", type=str, default=DEFAULT_DIST, choices=['deepspeed', 'torch'], help='Distributed DL framework to use') parser.add_argument("--scan", action="store_true", help='Enables scanning all message sizes') parser.add_argument("--raw", action="store_true", help='Print the message size and latency without units') parser.add_argument("--all-reduce", action="store_true", help='Run all_reduce') parser.add_argument("--all-gather", action="store_true", help='Run all_gather') parser.add_argument("--all-to-all", action="store_true", help='Run all_to_all') parser.add_argument("--pt2pt", action="store_true", help='Run pt2pt') parser.add_argument("--broadcast", action="store_true", help='Run broadcast') parser.add_argument("--dtype", type=str, default=DEFAULT_TYPE, help='PyTorch tensor dtype') parser.add_argument( "--mem-factor", type=float, default=.4, help='Proportion of max available GPU memory to use for single-size evals') parser.add_argument("--debug", action="store_true", help='Enables all_to_all debug prints') return parser
null
10,527
import torch from benchmarks.communication.utils import * from benchmarks.communication.constants import * import time def timed_broadcast(input, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist sync_all() # Warmups, establish connections, etc. for i in range(args.warmups): dist.broadcast(input, 0, async_op=args.async_op) sync_all() # time the actual comm op trials times and average it pre = time.perf_counter() for i in range(args.trials): dist.broadcast(input, 0, async_op=args.async_op) sync_all() duration = time.perf_counter() - pre # maintain and clean performance data avg_duration = duration / args.trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw('broadcast', size, avg_duration, args) tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not args.raw: size = convert_size(size) print_rank_0( f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_broadcast(local_rank, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist # Prepare benchmark header print_header(args, 'broadcast') world_size = dist.get_world_size() global_rank = dist.get_rank() if args.scan: M_LIST = [] for x in (2**p for p in range(1, args.maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, args.dtype)).cuda(local_rank) sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break sync_all() timed_broadcast(input, args) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor # Don't need output tensor, so we double mem_factor elements_per_gpu = max_numel(comm_op='broadcast', dtype=getattr(torch, args.dtype), mem_factor=args.mem_factor * 2, local_rank=local_rank, args=args) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, args.dtype)).cuda(local_rank) input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print( 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' ) sync_all() return sync_all() timed_broadcast(input, args)
null
10,528
from benchmarks.communication.utils import * from benchmarks.communication.constants import * import time def timed_all_reduce(input, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist sync_all() # Warmups, establish connections, etc. for i in range(args.warmups): dist.all_reduce(input, async_op=args.async_op) sync_all() # time the actual comm op trials times and average it pre = time.perf_counter() for i in range(args.trials): dist.all_reduce(input, async_op=args.async_op) sync_all() duration = time.perf_counter() - pre # maintain and clean performance data avg_duration = duration / args.trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw('all_reduce', size, avg_duration, args) tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not args.raw: size = convert_size(size) print_rank_0( f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_all_reduce(local_rank, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist # Prepare benchmark header print_header(args, 'all_reduce') world_size = dist.get_world_size() global_rank = dist.get_rank() if args.scan: M_LIST = [] for x in (2**p for p in range(1, args.maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, args.dtype)).cuda(local_rank) sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break sync_all() timed_all_reduce(input, args) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor # Don't need output tensor, so we double mem_factor elements_per_gpu = max_numel(comm_op='all_reduce', dtype=getattr(torch, args.dtype), mem_factor=args.mem_factor * 2, local_rank=local_rank, args=args) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, args.dtype)).cuda(local_rank) input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print( 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' ) sync_all() return sync_all() timed_all_reduce(input, args)
null
10,529
from benchmarks.communication.utils import * from benchmarks.communication.constants import * import time def timed_pt2pt(input, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist sync_all() # Warmups, establish connections, etc. for i in range(args.warmups): if dist.get_rank() == 0: if args.async_op: dist.isend(input, 1) else: dist.send(input, 1) if dist.get_rank() == 1: if args.async_op: dist.irecv(input, src=0) else: dist.recv(input, src=0) sync_all() # time the actual comm op trials times and average it pre = time.perf_counter() for i in range(args.trials): if dist.get_rank() == 0: if args.async_op: dist.isend(input, 1) else: dist.send(input, 1) if dist.get_rank() == 1: if args.async_op: dist.irecv(input, src=0) else: dist.recv(input, src=0) sync_all() duration = time.perf_counter() - pre # maintain and clean performance data avg_duration = duration / args.trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw('pt2pt', size, avg_duration, args) tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not args.raw: size = convert_size(size) print_rank_0( f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_pt2pt(local_rank, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist # Prepare benchmark header print_header(args, 'pt2pt') global_rank = dist.get_rank() world_size = dist.get_world_size() if args.scan: # Create list of message sizes M_LIST = [] for x in (2**p for p in range(1, args.maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, args.dtype)).cuda(local_rank) sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break sync_all() timed_pt2pt(input, args) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor # Don't need output tensor, so double mem_factor elements_per_gpu = max_numel(comm_op='pt2pt', dtype=getattr(torch, args.dtype), mem_factor=args.mem_factor * 2, local_rank=local_rank, args=args) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, args.dtype)).cuda(local_rank) input = ((mat.mul_(float(global_rank))).view(-1)) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print( 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' ) sync_all() return sync_all() timed_pt2pt(input, args)
null
10,530
from benchmarks.communication.utils import * from benchmarks.communication.constants import * import time def timed_all_to_all(input, output, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist sync_all() # Warmups, establish connections, etc. for i in range(args.warmups): dist.all_to_all_single(output, input, async_op=args.async_op) sync_all() # time the actual comm op trials times and average it pre = time.perf_counter() for i in range(args.trials): dist.all_to_all_single(output, input, async_op=args.async_op) sync_all() duration = time.perf_counter() - pre # maintain and clean performance data avg_duration = duration / args.trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw('all_to_all', size, avg_duration, args) tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not args.raw: size = convert_size(size) print_rank_0( f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_all_to_all(local_rank, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist world_size = dist.get_world_size() global_rank = dist.get_rank() # Prepare benchmark header print_header(args, 'all_to_all') if args.scan: M_LIST = [] for x in (2**p for p in range(1, args.maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, args.dtype)).cuda(local_rank) assert mat.numel() % world_size == 0, f"tensor cannot be divided in {world_size} chunks" sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) output = (mat.clone().view(-1)) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break sync_all() timed_all_to_all(input, output, args) else: # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor elements_per_gpu = max_numel(comm_op='all_to_all', dtype=getattr(torch, args.dtype), mem_factor=args.mem_factor, local_rank=local_rank, args=args) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, args.dtype)).cuda(local_rank) assert mat.numel() % world_size == 0, f"tensor with {mat.numel()} elements cannot be divided in {world_size} chunks" input = ((mat.mul_(float(global_rank))).view(-1)) # Delete original mat to avoid OOM del mat torch.cuda.empty_cache() output = torch.zeros(elements_per_gpu, dtype=getattr(torch, args.dtype)).cuda(local_rank) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print( 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' ) sync_all() return sync_all() if args.debug: for i in range(world_size): if i == global_rank: print(f"Before AllToAll Input List at rank {global_rank}: {input}") dist.barrier() timed_all_to_all(input, output, args) if args.debug: for i in range(world_size): if i == global_rank: print(f"AllToAll Results at rank {global_rank}: {output}") dist.barrier()
null
10,531
from benchmarks.communication.utils import * from benchmarks.communication.constants import * import time def timed_all_gather(input, output, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist sync_all() # Warmups, establish connections, etc. for i in range(args.warmups): # use all_gather_base if available if args.dist == 'torch': if hasattr(torch.distributed, "_all_gather_base"): dist._all_gather_base(output, input, group=None, async_op=args.async_op) else: output_tensors = list( torch.chunk(output_tensor, cdb.get_world_size(group))) dist.all_gather(output_tensors, input_tensor, group=group, async_op=True) elif args.dist == 'deepspeed': dist.allgather_fn(output, input, group=None, async_op=args.async_op) sync_all() # time the actual comm op trials times and average it pre = time.perf_counter() for i in range(args.trials): # use all_gather_base if available if args.dist == 'torch': if hasattr(torch.distributed, "_all_gather_base"): dist._all_gather_base(output, input, group=None, async_op=args.async_op) else: output_tensors = list( torch.chunk(output_tensor, cdb.get_world_size(group))) dist.all_gather(output_tensors, input_tensor, group=group, async_op=True) elif args.dist == 'deepspeed': dist.allgather_fn(output, input, group=None, async_op=args.async_op) sync_all() duration = time.perf_counter() - pre # maintain and clean performance data avg_duration = duration / args.trials size = input.element_size() * input.nelement() n = dist.get_world_size() tput, busbw = get_bw('all_gather', size, avg_duration, args) tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) desc = f'{input.nelement()}x{input.element_size()}' if not args.raw: size = convert_size(size) print_rank_0( f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") def run_all_gather(local_rank, args): if args.dist == 'torch': import torch.distributed as dist elif args.dist == 'deepspeed': import deepspeed.comm as dist # Prepare benchmark header print_header(args, 'all_gather') global_rank = dist.get_rank() world_size = dist.get_world_size() if args.scan: # Create list of message sizes M_LIST = [] for x in (2**p for p in range(1, args.maxsize)): M_LIST.append(x) sync_all() # loop over various tensor sizes for M in M_LIST: global_rank = dist.get_rank() try: mat = torch.ones(world_size, M, dtype=getattr(torch, args.dtype)).cuda(local_rank) sync_all() input = ((mat.mul_(float(global_rank))).view(-1)) # Delete original mat to avoid OOM del mat torch.cuda.empty_cache() output = torch.zeros(input.nelement() * world_size, dtype=getattr(torch, args.dtype)).cuda(local_rank) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print('WARNING: Ran out of GPU memory. Exiting comm op.') sync_all() break sync_all() timed_all_gather(input, output, args) else: # all_gather_base saves memory if (args.dist == 'torch' and hasattr(torch.distributed, "_all_gather_base")) or (args.dist == 'deepspeed' and dist.has_allgather_base): mem_factor = args.mem_factor + 0.2 else: mem_factor = args.mem_factor # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor sync_all() elements_per_gpu = max_numel(comm_op='all_gather', dtype=getattr(torch, args.dtype), mem_factor=mem_factor, local_rank=local_rank, args=args) try: mat = torch.ones(elements_per_gpu, dtype=getattr(torch, args.dtype)).cuda(local_rank) # multiply each GPU's tensor by the rank to ease debugging input = ((mat.mul_(float(global_rank))).view(-1)) # Delete original mat to avoid OOM del mat torch.cuda.empty_cache() output = torch.zeros(elements_per_gpu * world_size, dtype=getattr(torch, args.dtype)).cuda(local_rank) except RuntimeError as e: if 'out of memory' in str(e): if dist.get_rank() == 0: print( 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' ) sync_all() return sync_all() timed_all_gather(input, output, args)
null
10,534
from __future__ import annotations import subprocess import sys def err(s: str) -> None: print(s, file=sys.stderr)
null
10,535
import os import re import shutil from distutils.core import Command from pathlib import Path from setuptools import find_packages, setup deps = {b: a for a, b in (re.findall(r"^(([^!=<>~ ]+)(?:[!=<>~ ].*)?$)", x)[0] for x in _deps)} def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs]
null
10,536
import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation_tf_utils import TFGenerationMixin from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, logging, requires_backends, working_or_temp_dir, ) def dummy_loss(y_true, y_pred): return tf.reduce_mean(y_pred)
null
10,537
import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation_tf_utils import TFGenerationMixin from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, logging, requires_backends, working_or_temp_dir, ) class PretrainedConfig(PushToHubMixin): r""" Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations. <Tip> A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights. It only affects the model's configuration. </Tip> Class attributes (overridden by derived classes): - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in [`~transformers.AutoConfig`]. - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like: [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`]. - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary outputs of the model during inference. - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized naming of attributes. Common attributes (present in all subclasses): - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT). - **hidden_size** (`int`) -- The hidden size of the model. - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the model. - **num_hidden_layers** (`int`) -- The number of blocks in the model. Arg: name_or_path (`str`, *optional*, defaults to `""`): Store the string that was passed to [`PreTrainedModel.from_pretrained`] or [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created with such a method. output_hidden_states (`bool`, *optional*, defaults to `False`): Whether or not the model should return all hidden-states. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not the model should returns all attentions. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple. is_encoder_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as an encoder/decoder or not. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as decoder or not (in which case it's used as an encoder). cross_attention_hidden_size** (`bool`, *optional*): The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder setting and the cross-attention hidden dimension differs from `self.config.hidden_size`. add_cross_attention (`bool`, *optional*, defaults to `False`): Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models in `AUTO_MODELS_FOR_CAUSAL_LM`. tie_encoder_decoder (`bool`, *optional*, defaults to `False`): Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names. prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`): Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of heads to prune in said layer. For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. chunk_size_feed_forward (`int`, *optional*, defaults to `0`): The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` < sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed Forward Chunking work?](../glossary.html#feed-forward-chunking). > Parameters for sequence generation max_length (`int`, *optional*, defaults to 20): Maximum length that will be used by default in the `generate` method of the model. min_length (`int`, *optional*, defaults to 10): Minimum length that will be used by default in the `generate` method of the model. do_sample (`bool`, *optional*, defaults to `False`): Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (`bool`, *optional*, defaults to `False`): Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means no beam search. num_beam_groups (`int`, *optional*, defaults to 1): Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams that will be used by default in the `generate` method of the model. 1 means no group beam search. diversity_penalty (`float`, *optional*, defaults to 0.0): Value to control diversity for group beam search. that will be used by default in the `generate` method of the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs. temperature (`float`, *optional*, defaults to 1): The value used to module the next token probabilities that will be used by default in the `generate` method of the model. Must be strictly positive. top_k (`int`, *optional*, defaults to 50): Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in the `generate` method of the model. top_p (`float`, *optional*, defaults to 1): Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1, only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. repetition_penalty (`float`, *optional*, defaults to 1): Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0 means no penalty. length_penalty (`float`, *optional*, defaults to 1): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`. bad_words_ids (`List[int]`, *optional*): List of token ids that are not allowed to be generated that will be used by default in the `generate` method of the model. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`. num_return_sequences (`int`, *optional*, defaults to 1): Number of independently computed returned sequences for each element in the batch that will be used by default in the `generate` method of the model. output_scores (`bool`, *optional*, defaults to `False`): Whether the model should return the logits when used for generation. return_dict_in_generate (`bool`, *optional*, defaults to `False`): Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`. forced_bos_token_id (`int`, *optional*): The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target language token. forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. remove_invalid_values (`bool`, *optional*): Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation. > Parameters for fine-tuning tasks architectures (`List[str]`, *optional*): Model architectures that can be used with the model pretrained weights. finetuning_task (`str`, *optional*): Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint. id2label (`Dict[int, str]`, *optional*): A map from index (for instance prediction index, or target index) to label. label2id (`Dict[str, int]`, *optional*): A map from label to index for the model. num_labels (`int`, *optional*): Number of labels to use in the last layer added to the model, typically for a classification task. task_specific_params (`Dict[str, Any]`, *optional*): Additional keyword arguments to store for the current task. problem_type (`str`, *optional*): Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`, `"single_label_classification"` or `"multi_label_classification"`. > Parameters linked to the tokenizer tokenizer_class (`str`, *optional*): The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the model by default). prefix (`str`, *optional*): A specific prompt that should be added at the beginning of each text before calling the model. bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token. pad_token_id (`int`, *optional*): The id of the _padding_ token. eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token. decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. sep_token_id (`int`, *optional*): The id of the _separation_ token. > PyTorch specific parameters torchscript (`bool`, *optional*, defaults to `False`): Whether or not the model should be used with Torchscript. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the model has a output word embedding layer. torch_dtype (`str`, *optional*): The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype` (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load `float16` weights. Since the config object is stored in plain text, this attribute contains just the floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the `"float16"` string. This attribute is currently not being used during model loading time, but this may change in the future versions. But we can already start preparing for the future by saving the dtype with save_pretrained. > TensorFlow specific parameters use_bfloat16 (`bool`, *optional*, defaults to `False`): Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models). tf_legacy_loss (`bool`, *optional*, defaults to `False`): Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers v5. """ model_type: str = "" is_composition: bool = False attribute_map: Dict[str, str] = {} _auto_class: Optional[str] = None def __setattr__(self, key, value): if key in super().__getattribute__("attribute_map"): key = super().__getattribute__("attribute_map")[key] super().__setattr__(key, value) def __getattribute__(self, key): if key != "attribute_map" and key in super().__getattribute__("attribute_map"): key = super().__getattribute__("attribute_map")[key] return super().__getattribute__(key) def __init__(self, **kwargs): # Attributes with defaults self.return_dict = kwargs.pop("return_dict", True) self.output_hidden_states = kwargs.pop("output_hidden_states", False) self.output_attentions = kwargs.pop("output_attentions", False) self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models self.use_bfloat16 = kwargs.pop("use_bfloat16", False) self.tf_legacy_loss = kwargs.pop("tf_legacy_loss", False) # Only used by TensorFlow models self.pruned_heads = kwargs.pop("pruned_heads", {}) self.tie_word_embeddings = kwargs.pop( "tie_word_embeddings", True ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models. # Is decoder is used in encoder-decoder models to differentiate encoder from decoder self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False) self.is_decoder = kwargs.pop("is_decoder", False) self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None) self.add_cross_attention = kwargs.pop("add_cross_attention", False) self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False) # Parameters for sequence generation self.max_length = kwargs.pop("max_length", 20) self.min_length = kwargs.pop("min_length", 0) self.do_sample = kwargs.pop("do_sample", False) self.early_stopping = kwargs.pop("early_stopping", False) self.num_beams = kwargs.pop("num_beams", 1) self.num_beam_groups = kwargs.pop("num_beam_groups", 1) self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0) self.temperature = kwargs.pop("temperature", 1.0) self.top_k = kwargs.pop("top_k", 50) self.top_p = kwargs.pop("top_p", 1.0) self.typical_p = kwargs.pop("typical_p", 1.0) self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0) self.length_penalty = kwargs.pop("length_penalty", 1.0) self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0) self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0) self.bad_words_ids = kwargs.pop("bad_words_ids", None) self.num_return_sequences = kwargs.pop("num_return_sequences", 1) self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0) self.output_scores = kwargs.pop("output_scores", False) self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False) self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None) self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None) self.remove_invalid_values = kwargs.pop("remove_invalid_values", False) self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None) self.suppress_tokens = kwargs.pop("suppress_tokens", None) self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None) # Fine-tuning task arguments self.architectures = kwargs.pop("architectures", None) self.finetuning_task = kwargs.pop("finetuning_task", None) self.id2label = kwargs.pop("id2label", None) self.label2id = kwargs.pop("label2id", None) if self.id2label is not None: num_labels = kwargs.pop("num_labels", None) if num_labels is not None and len(self.id2label) != num_labels: logger.warning( f"You passed along `num_labels={num_labels}` with an incompatible id to label map: " f"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}." ) self.id2label = dict((int(key), value) for key, value in self.id2label.items()) # Keys are always strings in JSON so convert ids to int here. else: self.num_labels = kwargs.pop("num_labels", 2) if self.torch_dtype is not None and isinstance(self.torch_dtype, str): # we will start using self.torch_dtype in v5, but to be consistent with # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object if is_torch_available(): import torch self.torch_dtype = getattr(torch, self.torch_dtype) # Tokenizer arguments TODO: eventually tokenizer and models should share the same config self.tokenizer_class = kwargs.pop("tokenizer_class", None) self.prefix = kwargs.pop("prefix", None) self.bos_token_id = kwargs.pop("bos_token_id", None) self.pad_token_id = kwargs.pop("pad_token_id", None) self.eos_token_id = kwargs.pop("eos_token_id", None) self.sep_token_id = kwargs.pop("sep_token_id", None) self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None) # task specific arguments self.task_specific_params = kwargs.pop("task_specific_params", None) # regression / multi-label classification self.problem_type = kwargs.pop("problem_type", None) allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification") if self.problem_type is not None and self.problem_type not in allowed_problem_types: raise ValueError( f"The config parameter `problem_type` was not understood: received {self.problem_type} " "but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid." ) # TPU arguments if kwargs.pop("xla_device", None) is not None: logger.warning( "The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can " "safely remove it from your `config.json` file." ) # Name or path to the pretrained checkpoint self._name_or_path = str(kwargs.pop("name_or_path", "")) # Config hash self._commit_hash = kwargs.pop("_commit_hash", None) # Drop the transformers version info self.transformers_version = kwargs.pop("transformers_version", None) # Deal with gradient checkpointing if kwargs.get("gradient_checkpointing", False): warnings.warn( "Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 " "Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the " "`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`." ) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def name_or_path(self) -> str: return getattr(self, "_name_or_path", None) def name_or_path(self, value): self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding) def use_return_dict(self) -> bool: """ `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples. """ # If torchscript is set, force `return_dict=False` to avoid jit errors return self.return_dict and not self.torchscript def num_labels(self) -> int: """ `int`: The number of labels for classification models. """ return len(self.id2label) def num_labels(self, num_labels: int): if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels: self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)} self.label2id = dict(zip(self.id2label.values(), self.id2label.keys())) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the [`~PretrainedConfig.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs: Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id, token = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self) # If we save using the predefined names, we can load using `from_pretrained` output_config_file = os.path.join(save_directory, CONFIG_NAME) self.to_json_file(output_config_file, use_diff=True) logger.info(f"Configuration saved in {output_config_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token ) def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": r""" Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a configuration file saved using the [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final configuration object. If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of `kwargs` which has not been used to update `config` and is otherwise ignored. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. kwargs (`Dict[str, Any]`, *optional*): The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. Returns: [`PretrainedConfig`]: The configuration object instantiated from this pretrained model. Examples: ```python # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a # derived class: BertConfig config = BertConfig.from_pretrained( "bert-base-uncased" ) # Download configuration from huggingface.co and cache. config = BertConfig.from_pretrained( "./test/saved_model/" ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')* config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json") config = BertConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) assert config.output_attentions == True config, unused_kwargs = BertConfig.from_pretrained( "bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True ) assert config.output_attentions == True assert unused_kwargs == {"foo": False} ```""" config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) def get_config_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a [`PretrainedConfig`] using `from_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. Returns: `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object. """ original_kwargs = copy.deepcopy(kwargs) # Get config dict associated with the base config file config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) if "_commit_hash" in config_dict: original_kwargs["_commit_hash"] = config_dict["_commit_hash"] # That config file may point us toward another config file to use. if "configuration_files" in config_dict: configuration_file = get_configuration_file(config_dict["configuration_files"]) config_dict, kwargs = cls._get_config_dict( pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs ) return config_dict, kwargs def _get_config_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) use_auth_token = kwargs.pop("use_auth_token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) trust_remote_code = kwargs.pop("trust_remote_code", None) subfolder = kwargs.pop("subfolder", "") from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) commit_hash = kwargs.pop("_commit_hash", None) if trust_remote_code is True: logger.warning( "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" " ignored." ) user_agent = {"file_type": "config", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): # Special case when pretrained_model_name_or_path is a local file resolved_config_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): configuration_file = pretrained_model_name_or_path resolved_config_file = download_url(pretrained_model_name_or_path) else: configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME) try: # Load from local folder or from cache or download from model Hub and cache resolved_config_file = cached_file( pretrained_model_name_or_path, configuration_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the same" f" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory" f" containing a {configuration_file} file" ) try: # Load config dict config_dict = cls._dict_from_json_file(resolved_config_file) config_dict["_commit_hash"] = commit_hash except (json.JSONDecodeError, UnicodeDecodeError): raise EnvironmentError( f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_config_file}") else: logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") return config_dict, kwargs def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig": """ Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters. Args: config_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the configuration object. Returns: [`PretrainedConfig`]: The configuration object instantiated from those parameters. """ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # Those arguments may be passed along for our internal telemetry. # We remove them so they don't appear in `return_unused_kwargs`. kwargs.pop("_from_auto", None) kwargs.pop("_from_pipeline", None) # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update. if "_commit_hash" in kwargs and "_commit_hash" in config_dict: kwargs["_commit_hash"] = config_dict["_commit_hash"] config = cls(**config_dict) if hasattr(config, "pruned_heads"): config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items()) # Update config with kwargs if needed if "num_labels" in kwargs and "id2label" in kwargs: num_labels = kwargs["num_labels"] id2label = kwargs["id2label"] if kwargs["id2label"] is not None else [] if len(id2label) != num_labels: raise ValueError( f"You passed along `num_labels={num_labels }` with an incompatible id to label map: " f"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove " "one of them." ) to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) if key != "torch_dtype": to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f"Model config {config}") if return_unused_kwargs: return config, kwargs else: return config def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig": """ Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters. Returns: [`PretrainedConfig`]: The configuration object instantiated from that JSON file. """ config_dict = cls._dict_from_json_file(json_file) return cls(**config_dict) def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return json.loads(text) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" def to_diff_dict(self) -> Dict[str, Any]: """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = PretrainedConfig().to_dict() # get class specific config dict class_config_dict = self.__class__().to_dict() if not self.is_composition else {} serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if ( key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key] or (key in class_config_dict and value != class_config_dict[key]) ): serializable_config_dict[key] = value self.dict_torch_dtype_to_str(serializable_config_dict) return serializable_config_dict def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) if hasattr(self.__class__, "model_type"): output["model_type"] = self.__class__.model_type if "_auto_class" in output: del output["_auto_class"] if "_commit_hash" in output: del output["_commit_hash"] # Transformers version when serializing the model output["transformers_version"] = __version__ self.dict_torch_dtype_to_str(output) return output def to_json_string(self, use_diff: bool = True) -> str: """ Serializes this instance to a JSON string. Args: use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` is serialized to JSON string. Returns: `str`: String containing all the attributes that make up this configuration instance in JSON format. """ if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string(use_diff=use_diff)) def update(self, config_dict: Dict[str, Any]): """ Updates attributes of this class with attributes from `config_dict`. Args: config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class. """ for key, value in config_dict.items(): setattr(self, key, value) def update_from_string(self, update_str: str): """ Updates attributes of this class with attributes from `update_str`. The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example: "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" The keys to change have to already exist in the config object. Args: update_str (`str`): String with attributes that should be updated for this class. """ d = dict(x.split("=") for x in update_str.split(",")) for k, v in d.items(): if not hasattr(self, k): raise ValueError(f"key {k} isn't in the original config dict") old_v = getattr(self, k) if isinstance(old_v, bool): if v.lower() in ["true", "1", "y", "yes"]: v = True elif v.lower() in ["false", "0", "n", "no"]: v = False else: raise ValueError(f"can't derive true or false from {v} (key {k})") elif isinstance(old_v, int): v = int(v) elif isinstance(old_v, float): v = float(v) elif not isinstance(old_v, str): raise ValueError( f"You can only update int, float, bool or string values in the config, got {v} for key {k}" ) setattr(self, k, v) def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None: """ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None, converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"* string, which can then be stored in the json format. """ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str): d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1] for value in d.values(): if isinstance(value, dict): self.dict_torch_dtype_to_str(value) def register_for_auto_class(cls, auto_class="AutoConfig"): """ Register this class with a given auto class. This should only be used for custom configurations as the ones in the library are already mapped with `AutoConfig`. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`): The auto class to register this new configuration with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub) PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format( object="config", object_class="AutoConfig", object_files="configuration file" ) The provided code snippet includes necessary dependencies for implementing the `keras_serializable` function. Write a Python function `def keras_serializable(cls)` to solve the following problem: Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at serialization time. 2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`. Args: cls (a `tf.keras.layers.Layers subclass`): Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. Here is the function: def keras_serializable(cls): """ Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at serialization time. 2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`. Args: cls (a `tf.keras.layers.Layers subclass`): Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. """ initializer = cls.__init__ config_class = getattr(cls, "config_class", None) if config_class is None: raise AttributeError("Must set `config_class` to use @keras_serializable") @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, "get_config"): raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses") if hasattr(cls.get_config, "_is_default"): def get_config(self): cfg = super(cls, self).get_config() cfg["config"] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(tf.keras.utils, "register_keras_serializable"): cls = tf.keras.utils.register_keras_serializable()(cls) return cls
Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at serialization time. 2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`. Args: cls (a `tf.keras.layers.Layers subclass`): Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization.
10,538
import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation_tf_utils import TFGenerationMixin from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, logging, requires_backends, working_or_temp_dir, ) def input_processing(func, config, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32', name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (`callable`): The callable function of the TensorFlow model. config ([`PretrainedConfig`]): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) has_kwargs = bool(signature.pop("kwargs", None)) signature.pop("self", None) parameter_names = list(signature.keys()) main_input_name = parameter_names[0] main_input = kwargs.pop(main_input_name, None) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor) if "inputs" in kwargs["kwargs_call"]: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = kwargs["kwargs_call"].pop("inputs") if "decoder_cached_states" in kwargs["kwargs_call"]: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use" " `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") if "past" in kwargs["kwargs_call"] and "past_key_values" in parameter_names: warnings.warn( "The `past` argument is deprecated and will be removed in a future version, use `past_key_values`" " instead.", FutureWarning, ) kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past") elif "past_key_values" in kwargs["kwargs_call"] and "past" in parameter_names: kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values") if has_kwargs: output["kwargs"] = kwargs.pop("kwargs_call", {}) else: if len(kwargs["kwargs_call"]) > 0: raise ValueError( "The following keyword arguments are not supported by this model:" f" {list(kwargs['kwargs_call'].keys())}." ) kwargs.pop("kwargs_call") for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(main_input, (tuple, list)): for i, input in enumerate(main_input): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for" f" {parameter_names[i]}." ) elif isinstance(main_input, Mapping): if "inputs" in main_input: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids`" " instead.", FutureWarning, ) output["input_ids"] = main_input.pop("inputs") if "decoder_cached_states" in main_input: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use" " `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = main_input.pop("decoder_cached_states") for k, v in dict(main_input).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(main_input, (tf.Tensor, KerasTensor)) or main_input is None: output[main_input_name] = main_input else: raise ValueError( f"Data of type {type(main_input)} is not allowed only {allowed_types} is accepted for" f" {main_input_name}." ) # Populates any unspecified argument with their default value, according to the signature. for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_ids` output["input_ids"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] cast_output = dict() for key, val in output.items(): if isinstance(val, tf.Tensor) and val.dtype == tf.int32: cast_output[key] = tf.cast(val, tf.int64) elif isinstance(val, np.ndarray) and val.dtype == np.int32: cast_output[key] = val.astype(np.int64) else: cast_output[key] = val output = cast_output del cast_output if config is not None: boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update( booleans_processing( config=config, **boolean_dict, ) ) return output The provided code snippet includes necessary dependencies for implementing the `unpack_inputs` function. Write a Python function `def unpack_inputs(func)` to solve the following problem: Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input (common case in Keras). Args: func (`callable`): The callable function of the TensorFlow model. Returns: A callable that wraps the original `func` with the behavior described above. Here is the function: def unpack_inputs(func): """ Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input (common case in Keras). Args: func (`callable`): The callable function of the TensorFlow model. Returns: A callable that wraps the original `func` with the behavior described above. """ original_signature = inspect.signature(func) @functools.wraps(func) def run_call_with_unpacked_inputs(self, *args, **kwargs): # isolates the actual `**kwargs` for the decorated function kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)} fn_args_and_kwargs = {key: val for key, val in kwargs.items() if key not in kwargs_call} fn_args_and_kwargs.update({"kwargs_call": kwargs_call}) # move any arg into kwargs, if they exist fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args))) # Encoder Decoder models delegate the application of the configuration options to their inner models. if "EncoderDecoder" in self.__class__.__name__: config = None else: config = self.config unpacked_inputs = input_processing(func, config, **fn_args_and_kwargs) return func(self, **unpacked_inputs) # Keras enforces the first layer argument to be passed, and checks it through `inspect.getfullargspec()`. This # function does not follow wrapper chains (i.e. ignores `functools.wraps()`), meaning that without the line below # Keras would attempt to check the first argument against the literal signature of the wrapper. run_call_with_unpacked_inputs.__signature__ = original_signature return run_call_with_unpacked_inputs
Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input (common case in Keras). Args: func (`callable`): The callable function of the TensorFlow model. Returns: A callable that wraps the original `func` with the behavior described above.
10,539
import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation_tf_utils import TFGenerationMixin from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, logging, requires_backends, working_or_temp_dir, ) def dtype_byte_size(dtype): """ Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: ```py >>> dtype_byte_size(tf.float32) 4 ``` """ if dtype == tf.bool: return 1 / 8 bit_search = re.search(r"[^\d](\d+)$", dtype.name) if bit_search is None: raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") bit_size = int(bit_search.groups()[0]) return bit_size // 8 TF2_WEIGHTS_NAME = "tf_model.h5" The provided code snippet includes necessary dependencies for implementing the `tf_shard_checkpoint` function. Write a Python function `def tf_shard_checkpoint(weights, max_shard_size="10GB")` to solve the following problem: Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). Here is the function: def tf_shard_checkpoint(weights, max_shard_size="10GB"): """ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). """ max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = [] current_block_size = 0 total_size = 0 for item in weights: weight_size = item.numpy().size * dtype_byte_size(item.dtype) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = [] current_block_size = 0 current_block.append(item) current_block_size += weight_size total_size += weight_size # Add the last block sharded_state_dicts.append(current_block) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5") shards[shard_file] = shard for weight in shard: weight_name = weight.name weight_map[weight_name] = shard_file # Add the metadata metadata = {"total_size": total_size} index = {"metadata": metadata, "weight_map": weight_map} return shards, index
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
10,540
import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation_tf_utils import TFGenerationMixin from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, logging, requires_backends, working_or_temp_dir, ) def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False): """ Loads a shard from a sharded checkpoint file. Handles the missing keys and unexpected keys. Args: model (`tf.keras.models.Model`): Model in which the weights are loaded model_layer_map (`Dict`): A dictionnary mapping the layer name to the index of the layer in the model. resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys Returns: `tf.keras.models.Model`: Three lists, one for the layers that were found and succesfully restored (from the shard file), one for the missmatched layers, and another one for the unexpected layers. """ saved_weight_names_set = set() saved_weights = {} missmatched_keys = set() unexpected_keys = set() # Read the H5 file try: with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set( hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names") ) weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer_name in saved_h5_model_layers_name: h5_layer_object = sharded_checkpoint_file[layer_name] saved_weights[layer_name] = np.asarray(h5_layer_object) saved_weight_names_set.add(layer_name) if layer_name not in model_layer_map: unexpected_keys.add(layer_name) else: symbolic_weight = model.weights[model_layer_map[layer_name]] saved_weight_value = saved_weights[layer_name] # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: missmatched_keys.add( (layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) K.batch_set_value(weight_value_tuples) return saved_weight_names_set, unexpected_keys, missmatched_keys except Exception as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError( f"Unable to locate the file {resolved_archive_file} which is necessary to load this pretrained" " model. Make sure you have saved the model properly." ) from e except (UnicodeDecodeError, ValueError): raise OSError( f"Unable to load weights from TF checkpoint file for '{resolved_archive_file}' " f"at '{resolved_archive_file}'. " "If you tried to load a TF model from a sharded checkpoint, you should try converting the model" "by loading it in pytorch and saving it localy. A convertion script should be realeased soon." ) The provided code snippet includes necessary dependencies for implementing the `load_tf_sharded_weights` function. Write a Python function `def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=True)` to solve the following problem: This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`tf.keras.models.Model`): The model in which to load the checkpoint. shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names. ignore_mismatched_sizes`bool`, *optional`, defaults to `True`): Whether or not to ignore the mismatch between the sizes strict (`bool`, *optional*, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. Here is the function: def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=True): """ This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`tf.keras.models.Model`): The model in which to load the checkpoint. shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names. ignore_mismatched_sizes`bool`, *optional`, defaults to `True`): Whether or not to ignore the mismatch between the sizes strict (`bool`, *optional*, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ # Load the index missing_keys = [] unexpected_keys = set() saved_keys = set() missmatched_keys = set() # Since TF adds the name of the class to its weights, and uses the index and not the name of the layer to load # the weight, we have to get rid of the first prefix of the name of the layer. model_keys = set() model_layer_map = dict() for i, k in enumerate(model.weights): if "model." in k.name or len(k.name.split("/")) == 1: layer_name = k.name else: layer_name = "/".join(k.name.split("/")[1:]) model_keys.add(layer_name) model_layer_map[layer_name] = i for shard_file in shard_files: state_dict = tf.io.read_file(shard_file) saved_weight_names_set, unexpected_keys_set, missmatched_keys_set = load_tf_shard( model, model_layer_map, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes ) saved_keys.update(saved_weight_names_set) unexpected_keys.update(unexpected_keys_set) missmatched_keys.update(missmatched_keys_set) del state_dict gc.collect() missing_keys = model_keys - saved_keys if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" if len(missing_keys) > 0: str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) error_message += f"\nMissing key(s): {str_missing_keys}." if len(unexpected_keys) > 0: str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) error_message += f"\nMissing key(s): {str_unexpected_keys}." raise RuntimeError(error_message) return missing_keys, unexpected_keys, missmatched_keys
This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`tf.keras.models.Model`): The model in which to load the checkpoint. shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names. ignore_mismatched_sizes`bool`, *optional`, defaults to `True`): Whether or not to ignore the mismatch between the sizes strict (`bool`, *optional*, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers.
10,541
import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation_tf_utils import TFGenerationMixin from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, logging, requires_backends, working_or_temp_dir, ) def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. Returns: `List[int]`: The shape of the tensor as a list. """ if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] The provided code snippet includes necessary dependencies for implementing the `init_copy_embeddings` function. Write a Python function `def init_copy_embeddings(old_embeddings, new_num_tokens)` to solve the following problem: r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] Here is the function: def init_copy_embeddings(old_embeddings, new_num_tokens): r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] """ old_num_tokens, old_embedding_dim = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens # initialize new embeddings # Copy token embeddings from the previous ones if tf.math.greater(size_diff, 0): # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size # and we create a mask to properly identify the padded values and be replaced by the values of the newly created # embeddings current_weights = tf.pad( old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 ) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: # if the new size if lower than the old one, we take the current embeddings until the new size current_weights = tf.slice( old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), ) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return mask, current_weights
r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
10,542
import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.keras_tensor import KerasTensor from tensorflow.python.keras.saving import hdf5_format from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation_tf_utils import TFGenerationMixin from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, logging, requires_backends, working_or_temp_dir, ) The provided code snippet includes necessary dependencies for implementing the `get_initializer` function. Write a Python function `def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal` to solve the following problem: Creates a `tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range. Returns: `tf.initializers.TruncatedNormal`: The truncated normal initializer. Here is the function: def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal: """ Creates a `tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range. Returns: `tf.initializers.TruncatedNormal`: The truncated normal initializer. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
Creates a `tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range. Returns: `tf.initializers.TruncatedNormal`: The truncated normal initializer.
10,543
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available def is_fairscale_available(): return importlib.util.find_spec("fairscale") is not None
null
10,544
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available def is_wandb_available(): def is_optuna_available(): def is_ray_tune_available(): def is_sigopt_available(): def is_wandb_available(): def hp_params(trial): if is_optuna_available(): import optuna if isinstance(trial, optuna.Trial): return trial.params if is_ray_tune_available(): if isinstance(trial, dict): return trial if is_sigopt_available(): if isinstance(trial, dict): return trial if is_wandb_available(): if isinstance(trial, dict): return trial raise RuntimeError(f"Unknown type for trial {trial.__class__}")
null
10,545
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available def is_optuna_available(): return importlib.util.find_spec("optuna") is not None def is_ray_tune_available(): if not is_ray_available(): return False return importlib.util.find_spec("ray.tune") is not None def is_sigopt_available(): return importlib.util.find_spec("sigopt") is not None def default_hp_search_backend(): if is_optuna_available(): return "optuna" elif is_ray_tune_available(): return "ray" elif is_sigopt_available(): return "sigopt"
null
10,546
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available PREFIX_CHECKPOINT_DIR = "checkpoint" class BestRun(NamedTuple): """ The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]). Parameters: run_id (`str`): The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending with run-{run_id}). objective (`float`): The objective that was obtained for this run. hyperparameters (`Dict[str, Any]`): The hyperparameters picked to get this run. """ run_id: str objective: float hyperparameters: Dict[str, Any] class ParallelMode(Enum): NOT_PARALLEL = "not_parallel" NOT_DISTRIBUTED = "not_distributed" DISTRIBUTED = "distributed" SAGEMAKER_MODEL_PARALLEL = "sagemaker_model_parallel" SAGEMAKER_DATA_PARALLEL = "sagemaker_data_parallel" TPU = "tpu" def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import optuna if trainer.args.process_index == 0: def _objective(trial, checkpoint_dir=None): checkpoint = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): checkpoint = os.path.join(checkpoint_dir, subdir) trainer.objective = None if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") trainer._hp_search_setup(trial) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) trainer.train(resume_from_checkpoint=checkpoint) else: trainer.train(resume_from_checkpoint=checkpoint, trial=trial) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) return trainer.objective timeout = kwargs.pop("timeout", None) n_jobs = kwargs.pop("n_jobs", 1) study = optuna.create_study(direction=direction, **kwargs) study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs) best_trial = study.best_trial return BestRun(str(best_trial.number), best_trial.value, best_trial.params) else: for i in range(n_trials): trainer.objective = None args_main_rank = list(pickle.dumps(trainer.args)) if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") torch.distributed.broadcast_object_list(args_main_rank, src=0) args = pickle.loads(bytes(args_main_rank)) for key, value in asdict(args).items(): if key != "local_rank": setattr(trainer.args, key, value) trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) return None
null
10,547
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging logger = logging.get_logger(__name__) from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available class TensorBoardCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard). Args: tb_writer (`SummaryWriter`, *optional*): The writer to use. Will instantiate one if not set. """ def __init__(self, tb_writer=None): has_tensorboard = is_tensorboard_available() if not has_tensorboard: raise RuntimeError( "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or" " install tensorboardX." ) if has_tensorboard: try: from torch.utils.tensorboard import SummaryWriter # noqa: F401 self._SummaryWriter = SummaryWriter except ImportError: try: from tensorboardX import SummaryWriter self._SummaryWriter = SummaryWriter except ImportError: self._SummaryWriter = None else: self._SummaryWriter = None self.tb_writer = tb_writer def _init_summary_writer(self, args, log_dir=None): log_dir = log_dir or args.logging_dir if self._SummaryWriter is not None: self.tb_writer = self._SummaryWriter(log_dir=log_dir) def on_train_begin(self, args, state, control, **kwargs): if not state.is_world_process_zero: return log_dir = None if state.is_hyper_param_search: trial_name = state.trial_name if trial_name is not None: log_dir = os.path.join(args.logging_dir, trial_name) if self.tb_writer is None: self._init_summary_writer(args, log_dir) if self.tb_writer is not None: self.tb_writer.add_text("args", args.to_json_string()) if "model" in kwargs: model = kwargs["model"] if hasattr(model, "config") and model.config is not None: model_config_json = model.config.to_json_string() self.tb_writer.add_text("model_config", model_config_json) # Version of TensorBoard coming from tensorboardX does not have this method. if hasattr(self.tb_writer, "add_hparams"): self.tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={}) def on_log(self, args, state, control, logs=None, **kwargs): if not state.is_world_process_zero: return if self.tb_writer is None: self._init_summary_writer(args) if self.tb_writer is not None: logs = rewrite_logs(logs) for k, v in logs.items(): if isinstance(v, (int, float)): self.tb_writer.add_scalar(k, v, state.global_step) else: logger.warning( "Trainer is attempting to log a value of " f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' "This invocation of Tensorboard's writer.add_scalar() " "is incorrect so we dropped this attribute." ) self.tb_writer.flush() def on_train_end(self, args, state, control, **kwargs): if self.tb_writer: self.tb_writer.close() self.tb_writer = None class ProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation. """ def __init__(self): self.training_bar = None self.prediction_bar = None def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar.update(state.global_step - self.current_step) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_local_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: self.prediction_bar = tqdm(total=len(eval_dataloader), leave=self.training_bar is None) self.prediction_bar.update(1) def on_evaluate(self, args, state, control, **kwargs): if state.is_local_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_local_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_local_process_zero and self.training_bar is not None: _ = logs.pop("total_flos", None) self.training_bar.write(str(logs)) def on_train_end(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar.close() self.training_bar = None PREFIX_CHECKPOINT_DIR = "checkpoint" class IntervalStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class BestRun(NamedTuple): """ The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]). Parameters: run_id (`str`): The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending with run-{run_id}). objective (`float`): The objective that was obtained for this run. hyperparameters (`Dict[str, Any]`): The hyperparameters picked to get this run. """ run_id: str objective: float hyperparameters: Dict[str, Any] class TrainerMemoryTracker: """ A helper class that tracks cpu and gpu memory. This class will silently skip unless `psutil` is available. Install with `pip install psutil`. When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage. Example : ```python self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() # code ... metrics = {"train_runtime": 10.5} self._memory_tracker.stop_and_update_metrics(metrics) ``` At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`. To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`]. """ # map trainer methods to metrics prefix stages = { "__init__": "init", "train": "train", "_inner_training_loop": "train", "evaluate": "eval", "predict": "test", } def __init__(self, skip_memory_metrics=False): self.skip_memory_metrics = skip_memory_metrics if not is_psutil_available(): # soft dependency on psutil self.skip_memory_metrics = True if self.skip_memory_metrics: return import psutil # noqa if is_torch_cuda_available(): import torch self.torch = torch self.gpu = {} else: self.torch = None self.process = psutil.Process() self.cur_stage = None self.cpu = {} self.init_reported = False def derive_stage(self): """derives the stage/caller name automatically""" caller = inspect.currentframe().f_back.f_back.f_code.co_name if caller in self.stages: return self.stages[caller] else: raise ValueError( f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}" ) def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_mem_used_peak = -1 while True: self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def start(self): """start tracking for the caller's stage""" if self.skip_memory_metrics: return stage = self.derive_stage() # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return self.cur_stage = stage gc.collect() if self.torch is not None: self.torch.cuda.reset_peak_memory_stats() self.torch.cuda.empty_cache() # gpu if self.torch is not None: self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() # cpu self.cpu_mem_used_at_start = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() def stop(self, stage): """stop tracking for the passed stage""" # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return # this sends a signal to peak_monitor_func to complete its loop self.peak_monitoring = False # first ensure all objects get collected and their memory is freed gc.collect() if self.torch is not None: self.torch.cuda.empty_cache() # concepts: # - alloc_delta: the difference of allocated memory between the end and the start # - peaked_delta: the difference between the peak memory and the current memory # in order to know how much memory the measured code consumed one needs to sum these two # gpu if self.torch is not None: self.gpu_mem_used_now = self.torch.cuda.memory_allocated() self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() self.gpu[self.cur_stage] = dict( begin=self.gpu_mem_used_at_start, end=self.gpu_mem_used_now, alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start), peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), ) # cpu self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = dict( begin=self.cpu_mem_used_at_start, end=self.cpu_mem_used_now, alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start), peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now), ) # reset - cycle finished self.cur_stage = None def update_metrics(self, stage, metrics): """updates the metrics""" if self.skip_memory_metrics: return # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return # since we don't have a way to return init metrics, we push them into the first of train/val/predict stages = [stage] if not self.init_reported: stages.insert(0, "init") self.init_reported = True for stage in stages: for t in ["alloc", "peaked"]: if stage in self.cpu and t in self.cpu[stage]: metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t] if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] # if we need additional debug info, enable the following # for t in ["begin", "end"]: # if stage in self.cpu and t in self.cpu[stage]: # metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t] # if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: # metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t] # since memory can be allocated before init, and it might be difficult to track overall # memory usage, in particular for GPU, let's report memory usage at the point init was called if stages[0] == "init": metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"] if self.torch is not None: metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"] # if we also wanted to report any additional memory allocations in between init and # whatever the next stage was we could also report this: # if self.cpu["init"]["end"] != self.cpu[stage]["begin"]: # metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"] # if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]: # metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"] def stop_and_update_metrics(self, metrics=None): """combine stop and metrics update in one call for simpler code""" if self.skip_memory_metrics: return stage = self.derive_stage() self.stop(stage) # init doesn't have metrics to update so we just save that data for later stages to retrieve if metrics is not None: self.update_metrics(stage, metrics) def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import ray def _objective(trial, local_trainer, checkpoint_dir=None): try: from transformers.utils.notebook import NotebookProgressCallback if local_trainer.pop_callback(NotebookProgressCallback): local_trainer.add_callback(ProgressCallback) except ModuleNotFoundError: pass checkpoint = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): checkpoint = os.path.join(checkpoint_dir, subdir) local_trainer.objective = None local_trainer.train(resume_from_checkpoint=checkpoint, trial=trial) # If there hasn't been any evaluation during the training loop. if getattr(local_trainer, "objective", None) is None: metrics = local_trainer.evaluate() local_trainer.objective = local_trainer.compute_objective(metrics) local_trainer._tune_save_checkpoint() ray.tune.report(objective=local_trainer.objective, **metrics, done=True) if not trainer._memory_tracker.skip_memory_metrics: from .trainer_utils import TrainerMemoryTracker logger.warning( "Memory tracking for your Trainer is currently " "enabled. Automatically disabling the memory tracker " "since the memory tracker is not serializable." ) trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True) # The model and TensorBoard writer do not pickle so we have to remove them (if they exists) # while doing the ray hp search. _tb_writer = trainer.pop_callback(TensorBoardCallback) trainer.model = None # Setup default `resources_per_trial`. if "resources_per_trial" not in kwargs: # Default to 1 CPU and 1 GPU (if applicable) per trial. kwargs["resources_per_trial"] = {"cpu": 1} if trainer.args.n_gpu > 0: kwargs["resources_per_trial"]["gpu"] = 1 resource_msg = "1 CPU" + (" and 1 GPU" if trainer.args.n_gpu > 0 else "") logger.info( "No `resources_per_trial` arg was passed into " "`hyperparameter_search`. Setting it to a default value " f"of {resource_msg} for each trial." ) # Make sure each trainer only uses GPUs that were allocated per trial. gpus_per_trial = kwargs["resources_per_trial"].get("gpu", 0) trainer.args._n_gpu = gpus_per_trial # Setup default `progress_reporter`. if "progress_reporter" not in kwargs: from ray.tune import CLIReporter kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"]) if "keep_checkpoints_num" in kwargs and kwargs["keep_checkpoints_num"] > 0: # `keep_checkpoints_num=0` would disabled checkpointing trainer.use_tune_checkpoints = True if kwargs["keep_checkpoints_num"] > 1: logger.warning( f"Currently keeping {kwargs['keep_checkpoints_num']} checkpoints for each trial. " "Checkpoints are usually huge, " "consider setting `keep_checkpoints_num=1`." ) if "scheduler" in kwargs: from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining # Check if checkpointing is enabled for PopulationBasedTraining if isinstance(kwargs["scheduler"], PopulationBasedTraining): if not trainer.use_tune_checkpoints: logger.warning( "You are using PopulationBasedTraining but you haven't enabled checkpointing. " "This means your trials will train from scratch everytime they are exploiting " "new configurations. Consider enabling checkpointing by passing " "`keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`." ) # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting. if isinstance( kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining) ) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == IntervalStrategy.NO): raise RuntimeError( "You are using {cls} as a scheduler but you haven't enabled evaluation during training. " "This means your trials will not report intermediate results to Ray Tune, and " "can thus not be stopped early or used to exploit other trials parameters. " "If this is what you want, do not use {cls}. If you would like to use {cls}, " "make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the " "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__) ) trainable = ray.tune.with_parameters(_objective, local_trainer=trainer) @functools.wraps(trainable) def dynamic_modules_import_trainable(*args, **kwargs): """ Wrapper around `tune.with_parameters` to ensure datasets_modules are loaded on each Actor. Without this, an ImportError will be thrown. See https://github.com/huggingface/transformers/issues/11565. Assumes that `_objective`, defined above, is a function. """ if is_datasets_available(): import datasets.load dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), "__init__.py") # load dynamic_modules from path spec = importlib.util.spec_from_file_location("datasets_modules", dynamic_modules_path) datasets_modules = importlib.util.module_from_spec(spec) sys.modules[spec.name] = datasets_modules spec.loader.exec_module(datasets_modules) return trainable(*args, **kwargs) # special attr set by tune.with_parameters if hasattr(trainable, "__mixins__"): dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__ analysis = ray.tune.run( dynamic_modules_import_trainable, config=trainer.hp_space(None), num_samples=n_trials, **kwargs, ) best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3], scope=trainer.args.ray_scope) best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config) if _tb_writer is not None: trainer.add_callback(_tb_writer) return best_run
null
10,548
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging logger = logging.get_logger(__name__) from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available class BestRun(NamedTuple): class ParallelMode(Enum): def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import sigopt from transformers.utils.versions import importlib_metadata if trainer.args.process_index == 0: if importlib_metadata.version("sigopt") >= "8.0.0": sigopt.set_project("huggingface") experiment = sigopt.create_experiment( name="huggingface-tune", type="offline", parameters=trainer.hp_space(None), metrics=[dict(name="objective", objective=direction, strategy="optimize")], parallel_bandwidth=1, budget=n_trials, ) logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") for run in experiment.loop(): with run: trainer.objective = None if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") trainer._hp_search_setup(run.run) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) trainer.train(resume_from_checkpoint=None) else: trainer.train(resume_from_checkpoint=None, trial=run.run) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) run.log_metric("objective", trainer.objective) best = list(experiment.get_best_runs())[0] best_run = BestRun(best.id, best.values["objective"].value, best.assignments) else: from sigopt import Connection conn = Connection() proxies = kwargs.pop("proxies", None) if proxies is not None: conn.set_proxies(proxies) experiment = conn.experiments().create( name="huggingface-tune", parameters=trainer.hp_space(None), metrics=[dict(name="objective", objective=direction, strategy="optimize")], parallel_bandwidth=1, observation_budget=n_trials, project="huggingface", ) logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") while experiment.progress.observation_count < experiment.observation_budget: suggestion = conn.experiments(experiment.id).suggestions().create() trainer.objective = None if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") trainer._hp_search_setup(suggestion) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) trainer.train(resume_from_checkpoint=None) else: trainer.train(resume_from_checkpoint=None, trial=suggestion) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) values = [dict(name="objective", value=trainer.objective)] obs = conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values) logger.info(f"[suggestion_id, observation_id]: [{suggestion.id}, {obs.id}]") experiment = conn.experiments(experiment.id).fetch() best = list(conn.experiments(experiment.id).best_assignments().fetch().iterate_pages())[0] best_run = BestRun(best.id, best.value, best.assignments) return best_run else: for i in range(n_trials): trainer.objective = None args_main_rank = list(pickle.dumps(trainer.args)) if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") torch.distributed.broadcast_object_list(args_main_rank, src=0) args = pickle.loads(bytes(args_main_rank)) for key, value in asdict(args).items(): if key != "local_rank": setattr(trainer.args, key, value) trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) return None
null
10,549
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging logger = logging.get_logger(__name__) from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available def is_wandb_available(): def rewrite_logs(d): class WandbCallback(TrainerCallback): def __init__(self): def setup(self, args, state, model, **kwargs): def on_train_begin(self, args, state, control, model=None, **kwargs): def on_train_end(self, args, state, control, model=None, tokenizer=None, **kwargs): def on_log(self, args, state, control, model=None, logs=None, **kwargs): class BestRun(NamedTuple): def is_wandb_available(): def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: from .integrations import is_wandb_available if not is_wandb_available(): raise ImportError("This function needs wandb installed: `pip install wandb`") import wandb # add WandbCallback if not already added in trainer callbacks reporting_to_wandb = False for callback in trainer.callback_handler.callbacks: if isinstance(callback, WandbCallback): reporting_to_wandb = True break if not reporting_to_wandb: trainer.add_callback(WandbCallback()) trainer.args.report_to = "wandb" best_trial = {"run_id": None, "objective": None, "hyperparameters": None} sweep_id = kwargs.pop("sweep_id", None) project = kwargs.pop("project", None) name = kwargs.pop("name", None) entity = kwargs.pop("entity", None) metric = kwargs.pop("metric", "eval/loss") sweep_config = trainer.hp_space(None) sweep_config["metric"]["goal"] = direction sweep_config["metric"]["name"] = metric if name: sweep_config["name"] = name def _objective(): run = wandb.run if wandb.run else wandb.init() trainer.state.trial_name = run.name run.config.update({"assignments": {}, "metric": metric}) config = wandb.config trainer.objective = None trainer.train(resume_from_checkpoint=None, trial=vars(config)["_items"]) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) format_metrics = rewrite_logs(metrics) if metric not in format_metrics: logger.warning( f"Provided metric {metric} not found. This might result in unexpected sweeps charts. The available" f" metrics are {format_metrics.keys()}" ) best_score = False if best_trial["run_id"] is not None: if direction == "minimize": best_score = trainer.objective < best_trial["objective"] elif direction == "maximize": best_score = trainer.objective > best_trial["objective"] if best_score or best_trial["run_id"] is None: best_trial["run_id"] = run.id best_trial["objective"] = trainer.objective best_trial["hyperparameters"] = dict(config) return trainer.objective sweep_id = wandb.sweep(sweep_config, project=project, entity=entity) if not sweep_id else sweep_id logger.info(f"wandb sweep id - {sweep_id}") wandb.agent(sweep_id, function=_objective, count=n_trials) return BestRun(best_trial["run_id"], best_trial["objective"], best_trial["hyperparameters"])
null
10,550
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available def is_wandb_available(): # any value of WANDB_DISABLED disables wandb if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES: logger.warning( "Using the `WANDB_DISABLED` environment variable is deprecated and will be removed in v5. Use the " "--report_to flag to control the integrations used for logging result (for instance --report_to none)." ) return False return importlib.util.find_spec("wandb") is not None def is_comet_available(): return _has_comet def is_tensorboard_available(): return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None def is_azureml_available(): if importlib.util.find_spec("azureml") is None: return False if importlib.util.find_spec("azureml.core") is None: return False return importlib.util.find_spec("azureml.core.run") is not None def is_mlflow_available(): if os.getenv("DISABLE_MLFLOW_INTEGRATION", "FALSE").upper() == "TRUE": return False return importlib.util.find_spec("mlflow") is not None def is_neptune_available(): return _has_neptune def is_codecarbon_available(): return importlib.util.find_spec("codecarbon") is not None def is_wandb_available(): # any value of WANDB_DISABLED disables wandb if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES: logger.warning( "Using the `WANDB_DISABLED` environment variable is deprecated and will be removed in v5. Use the " "--report_to flag to control the integrations used for logging result (for instance --report_to none)." ) return False return importlib.util.find_spec("wandb") is not None def get_available_reporting_integrations(): integrations = [] if is_azureml_available(): integrations.append("azure_ml") if is_comet_available(): integrations.append("comet_ml") if is_mlflow_available(): integrations.append("mlflow") if is_neptune_available(): integrations.append("neptune") if is_tensorboard_available(): integrations.append("tensorboard") if is_wandb_available(): integrations.append("wandb") if is_codecarbon_available(): integrations.append("codecarbon") return integrations
null
10,551
import functools import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional import numpy as np from . import __version__ as version from .utils import flatten_dict, is_datasets_available, is_torch_available, logging from .trainer_callback import ProgressCallback, TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy from .training_args import ParallelMode from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available INTEGRATION_TO_CALLBACK = { "azure_ml": AzureMLCallback, "comet_ml": CometCallback, "mlflow": MLflowCallback, "neptune": NeptuneCallback, "tensorboard": TensorBoardCallback, "wandb": WandbCallback, "codecarbon": CodeCarbonCallback, } def get_reporting_integration_callbacks(report_to): for integration in report_to: if integration not in INTEGRATION_TO_CALLBACK: raise ValueError( f"{integration} is not supported, only {', '.join(INTEGRATION_TO_CALLBACK.keys())} are supported." ) return [INTEGRATION_TO_CALLBACK[integration] for integration in report_to]
null
10,552
import inspect from typing import Callable, List, Optional, Set, Tuple, Union import torch from packaging import version from torch import _softmax_backward_data, nn from .utils import logging is_torch_less_than_1_8 = parsed_torch_version_base < version.parse("1.8.0") The provided code snippet includes necessary dependencies for implementing the `torch_int_div` function. Write a Python function `def torch_int_div(tensor1, tensor2)` to solve the following problem: A function that performs integer division across different versions of PyTorch. Here is the function: def torch_int_div(tensor1, tensor2): """ A function that performs integer division across different versions of PyTorch. """ if is_torch_less_than_1_8: return tensor1 // tensor2 else: return torch.div(tensor1, tensor2, rounding_mode="floor")
A function that performs integer division across different versions of PyTorch.
10,553
import inspect from typing import Callable, List, Optional, Set, Tuple, Union import torch from packaging import version from torch import _softmax_backward_data, nn from .utils import logging is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11") The provided code snippet includes necessary dependencies for implementing the `softmax_backward_data` function. Write a Python function `def softmax_backward_data(parent, grad_output, output, dim, self)` to solve the following problem: A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according to the torch version detected. Here is the function: def softmax_backward_data(parent, grad_output, output, dim, self): """ A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according to the torch version detected. """ if is_torch_less_than_1_11: return _softmax_backward_data(grad_output, output, parent.dim, self) else: return _softmax_backward_data(grad_output, output, parent.dim, self.dtype)
A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according to the torch version detected.
10,554
import inspect from typing import Callable, List, Optional, Set, Tuple, Union import torch from packaging import version from torch import _softmax_backward_data, nn from .utils import logging def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (`torch.nn.Linear`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices. Returns: `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class Conv1D(nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: """ Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Used to remove heads. Args: layer ([`~pytorch_utils.Conv1D`]): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices. Returns: [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer The provided code snippet includes necessary dependencies for implementing the `prune_layer` function. Write a Python function `def prune_layer( layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[nn.Linear, Conv1D]` to solve the following problem: Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*): The dimension on which to keep the indices. Returns: `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. Here is the function: def prune_layer( layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[nn.Linear, Conv1D]: """ Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*): The dimension on which to keep the indices. Returns: `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. """ if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError(f"Can't prune layer of class {layer.__class__}")
Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*): The dimension on which to keep the indices. Returns: `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
10,555
import inspect from typing import Callable, List, Optional, Set, Tuple, Union import torch from packaging import version from torch import _softmax_backward_data, nn from .utils import logging The provided code snippet includes necessary dependencies for implementing the `apply_chunking_to_forward` function. Write a Python function `def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor` to solve the following problem: This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly applying `forward_fn` to `input_tensors`. Args: forward_fn (`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (`int`): The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (`int`): The dimension over which the `input_tensors` should be chunked. input_tensors (`Tuple[torch.Tensor]`): The input tensors of `forward_fn` which will be chunked Returns: `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. Examples: ```python # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) ``` Here is the function: def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor: """ This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly applying `forward_fn` to `input_tensors`. Args: forward_fn (`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (`int`): The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (`int`): The dimension over which the `input_tensors` should be chunked. input_tensors (`Tuple[torch.Tensor]`): The input tensors of `forward_fn` which will be chunked Returns: `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. Examples: ```python # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) ```""" assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors" # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) if num_args_in_forward_chunk_fn != len(input_tensors): raise ValueError( f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input " "tensors are given" ) if chunk_size > 0: tensor_shape = input_tensors[0].shape[chunk_dim] for input_tensor in input_tensors: if input_tensor.shape[chunk_dim] != tensor_shape: raise ValueError( f"All input tenors have to be of the same shape: {tensor_shape}, " f"found shape {input_tensor.shape[chunk_dim]}" ) if input_tensors[0].shape[chunk_dim] % chunk_size != 0: raise ValueError( f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk " f"size {chunk_size}" ) num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size # chunk input tensor into tuples input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) # apply forward fn to every tuple output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) # concatenate output at same dimension return torch.cat(output_chunks, dim=chunk_dim) return forward_fn(*input_tensors)
This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly applying `forward_fn` to `input_tensors`. Args: forward_fn (`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (`int`): The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (`int`): The dimension over which the `input_tensors` should be chunked. input_tensors (`Tuple[torch.Tensor]`): The input tensors of `forward_fn` which will be chunked Returns: `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. Examples: ```python # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) ```