id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
6,500
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/tokenization_utils_base.py
transformers.tokenization_utils_base.SpecialTokensMixin
from collections.abc import Mapping, Sequence, Sized from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, Union class SpecialTokensMixin: """ A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens. Args: bos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the beginning of a sentence. eos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the end of a sentence. unk_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing an out-of-vocabulary token. sep_token (`str` or `tokenizers.AddedToken`, *optional*): A special token separating two different sentences in the same input (used by BERT for instance). pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. cls_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the class of the input (used by BERT for instance). mask_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be skipped when decoding if `skip_special_tokens` is set to `True`. """ SPECIAL_TOKENS_ATTRIBUTES = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', 'additional_special_tokens'] def __init__(self, verbose=False, **kwargs): self._pad_token_type_id = 0 self.verbose = verbose self._special_tokens_map = dict.fromkeys(self.SPECIAL_TOKENS_ATTRIBUTES) self._special_tokens_map['additional_special_tokens'] = [] for key, value in kwargs.items(): if value is None: continue if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == 'additional_special_tokens': assert isinstance(value, (list, tuple)), f'Value {value} is not a list or tuple' assert all((isinstance(t, (str, AddedToken)) for t in value)), 'One of the tokens is not a string or an AddedToken' setattr(self, key, value) elif isinstance(value, (str, AddedToken)): setattr(self, key, value) else: raise TypeError(f'Special token {key} has to be either str or AddedToken but got: {type(value)}') def sanitize_special_tokens(self) -> int: """ The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in transformers v5. """ logger.warning_once('The `sanitize_special_tokens` will be removed in transformers v5.') return self.add_tokens(self.all_special_tokens_extended, special_tokens=True) def add_special_tokens(self, special_tokens_dict: dict[str, Union[str, AddedToken, Sequence[Union[str, AddedToken]]]], replace_additional_special_tokens=True) -> int: """ Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Using `add_special_tokens` will ensure your special tokens can be used in several ways: - Special tokens can be skipped when decoding using `skip_special_tokens = True`. - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`. - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (for instance [`BertTokenizer`] `cls_token` is already registered to be `'[CLS]'` and XLM's one is also registered to be `'</s>'`). Args: special_tokens_dict (dictionary *str* to *str*, `tokenizers.AddedToken`, or `Sequence[Union[str, AddedToken]]`): Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the `unk_token` to them). replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`): If `True`, the existing list of additional special tokens will be replaced by the list provided in `special_tokens_dict`. Otherwise, `self._special_tokens_map["additional_special_tokens"]` is just extended. In the former case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous `additional_special_tokens` are still added tokens, and will not be split by the model. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2Model.from_pretrained("openai-community/gpt2") special_tokens_dict = {"cls_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>" ```""" if not special_tokens_dict: return 0 added_tokens = [] for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f'Key {key} is not a special token' if self.verbose: logger.info(f'Assigning {value} to the {key} key of the tokenizer') if key == 'additional_special_tokens': assert isinstance(value, (list, tuple)) and all((isinstance(t, (str, AddedToken)) for t in value)), f'Tokens {value} for key {key} should all be str or AddedToken instances' to_add = [] for token in value: if isinstance(token, str): token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True) if not replace_additional_special_tokens and str(token) in self.additional_special_tokens: continue to_add.append(token) if replace_additional_special_tokens and len(to_add) > 0: setattr(self, key, list(to_add)) else: self._special_tokens_map['additional_special_tokens'].extend(to_add) added_tokens += to_add else: if not isinstance(value, (str, AddedToken)): raise ValueError(f'Token {value} for key {key} should be a str or an AddedToken instance') if isinstance(value, str): value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True) if isinstance(value, AddedToken): setattr(self, key, value) if value not in added_tokens: added_tokens.append(value) added_tokens = self.add_tokens(added_tokens, special_tokens=True) return added_tokens def add_tokens(self, new_tokens: Union[str, AddedToken, Sequence[Union[str, AddedToken]]], special_tokens: bool=False) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way. Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Args: new_tokens (`str`, `tokenizers.AddedToken` or a sequence of *str* or `tokenizers.AddedToken`): Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc. special_tokens (`bool`, *optional*, defaults to `False`): Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance). See details for `tokenizers.AddedToken` in HuggingFace tokenizers library. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased") model = BertModel.from_pretrained("google-bert/bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) ```""" if not new_tokens: return 0 if not isinstance(new_tokens, (list, tuple)): new_tokens = [new_tokens] return self._add_tokens(new_tokens, special_tokens=special_tokens) def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool=False) -> int: raise NotImplementedError @property def pad_token_type_id(self) -> int: """ `int`: Id of the padding token type in the vocabulary. """ return self._pad_token_type_id def __setattr__(self, key, value): key_without_id = key key_is_special_id = key.endswith('_id') or key.endswith('_ids') if key_is_special_id: key_without_id = key[:-3] if not key.endswith('_ids') else key[:-4] if self.__dict__.get('_special_tokens_map', None) is not None and any((name in self.__dict__['_special_tokens_map'] for name in [key, key_without_id])): if key_is_special_id: if value is not None: value = self.convert_ids_to_tokens(value) if key != 'additional_special_tokens' else [self.convert_ids_to_tokens(val) for val in value] key = key_without_id if key != 'additional_special_tokens' and (not isinstance(value, (str, AddedToken))) and (value is not None): raise ValueError(f'Cannot set a non-string value as the {key}') self._special_tokens_map[key] = value else: super().__setattr__(key, value) def __getattr__(self, key): key_without_id = key key_is_special_id = key.endswith('_id') or key.endswith('_ids') if key_is_special_id: key_without_id = key[:-3] if not key.endswith('_ids') else key[:-4] if self.__dict__.get('_special_tokens_map', None) is not None and any((name in self.__dict__['_special_tokens_map'] for name in [key, key_without_id])): _special_tokens_map = self.__dict__['_special_tokens_map'] if not key_is_special_id: if _special_tokens_map[key] is None: if self.verbose: logger.error(f'Using {key}, but it is not set yet.') return None value = _special_tokens_map[key] return str(value) if key != 'additional_special_tokens' else [str(tok) for tok in value] else: attr_as_tokens = getattr(self, key_without_id) return self.convert_tokens_to_ids(attr_as_tokens) if attr_as_tokens is not None else None if key not in self.__dict__: raise AttributeError(f'{self.__class__.__name__} has no attribute {key}') else: return super().__getattr__(key) @property def special_tokens_map(self) -> dict[str, Union[str, list[str]]]: """ `dict[str, Union[str, list[str]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Convert potential tokens of `tokenizers.AddedToken` type to string. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def special_tokens_map_extended(self) -> dict[str, Union[str, AddedToken, list[Union[str, AddedToken]]]]: """ `dict[str, Union[str, tokenizers.AddedToken, list[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = self._special_tokens_map[attr] if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens_extended(self) -> list[Union[str, AddedToken]]: """ `list[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has nothing to do with the index of each tokens. If you want to know the correct indices, check `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`. Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ all_tokens = [] seen = set() for value in self.special_tokens_map_extended.values(): if isinstance(value, (list, tuple)): tokens_to_add = [token for token in value if str(token) not in seen] else: tokens_to_add = [value] if str(value) not in seen else [] seen.update(map(str, tokens_to_add)) all_tokens.extend(tokens_to_add) return all_tokens @property def all_special_tokens(self) -> list[str]: """ `list[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. """ all_toks = [str(s) for s in self.all_special_tokens_extended] return all_toks @property def all_special_ids(self) -> list[int]: """ `list[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes. """ all_toks = self.all_special_tokens all_ids = self.convert_tokens_to_ids(all_toks) return all_ids def _set_model_specific_special_tokens(self, special_tokens: list[str]): """ Adds new special tokens to the "SPECIAL_TOKENS_ATTRIBUTES" list which will be part of "self.special_tokens" and saved as a special token in tokenizer's config. This allows us to dynamically add new model-type specific tokens after initializing the tokenizer. For example: if the model tokenizers is multimodal, we can support special image or audio tokens. """ self.SPECIAL_TOKENS_ATTRIBUTES = self.SPECIAL_TOKENS_ATTRIBUTES + list(special_tokens.keys()) for key, value in special_tokens.items(): if isinstance(value, (str, AddedToken)): self._special_tokens_map[key] = value else: raise TypeError(f'Special token {key} has to be either str or AddedToken but got: {type(value)}')
class SpecialTokensMixin: ''' A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens. Args: bos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the beginning of a sentence. eos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the end of a sentence. unk_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing an out-of-vocabulary token. sep_token (`str` or `tokenizers.AddedToken`, *optional*): A special token separating two different sentences in the same input (used by BERT for instance). pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. cls_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the class of the input (used by BERT for instance). mask_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be skipped when decoding if `skip_special_tokens` is set to `True`. ''' def __init__(self, verbose=False, **kwargs): pass def sanitize_special_tokens(self) -> int: ''' The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in transformers v5. ''' pass def add_special_tokens(self, special_tokens_dict: dict[str, Union[str, AddedToken, Sequence[Union[str, AddedToken]]]], replace_additional_special_tokens=True) -> int: ''' Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Using `add_special_tokens` will ensure your special tokens can be used in several ways: - Special tokens can be skipped when decoding using `skip_special_tokens = True`. - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`. - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (for instance [`BertTokenizer`] `cls_token` is already registered to be `'[CLS]'` and XLM's one is also registered to be `'</s>'`). Args: special_tokens_dict (dictionary *str* to *str*, `tokenizers.AddedToken`, or `Sequence[Union[str, AddedToken]]`): Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the `unk_token` to them). replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`): If `True`, the existing list of additional special tokens will be replaced by the list provided in `special_tokens_dict`. Otherwise, `self._special_tokens_map["additional_special_tokens"]` is just extended. In the former case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous `additional_special_tokens` are still added tokens, and will not be split by the model. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2Model.from_pretrained("openai-community/gpt2") special_tokens_dict = {"cls_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>" ```''' pass def add_tokens(self, new_tokens: Union[str, AddedToken, Sequence[Union[str, AddedToken]]], special_tokens: bool=False) -> int: ''' Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way. Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Args: new_tokens (`str`, `tokenizers.AddedToken` or a sequence of *str* or `tokenizers.AddedToken`): Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc. special_tokens (`bool`, *optional*, defaults to `False`): Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance). See details for `tokenizers.AddedToken` in HuggingFace tokenizers library. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased") model = BertModel.from_pretrained("google-bert/bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) ```''' pass def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool=False) -> int: pass @property def pad_token_type_id(self) -> int: ''' `int`: Id of the padding token type in the vocabulary. ''' pass def __setattr__(self, key, value): pass def __getattr__(self, key): pass @property def special_tokens_map(self) -> dict[str, Union[str, list[str]]]: ''' `dict[str, Union[str, list[str]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Convert potential tokens of `tokenizers.AddedToken` type to string. ''' pass @property def special_tokens_map_extended(self) -> dict[str, Union[str, AddedToken, list[Union[str, AddedToken]]]]: ''' `dict[str, Union[str, tokenizers.AddedToken, list[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. ''' pass @property def all_special_tokens_extended(self) -> list[Union[str, AddedToken]]: ''' `list[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has nothing to do with the index of each tokens. If you want to know the correct indices, check `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`. Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. ''' pass @property def all_special_tokens_extended(self) -> list[Union[str, AddedToken]]: ''' `list[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. ''' pass @property def all_special_ids(self) -> list[int]: ''' `list[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes. ''' pass def _set_model_specific_special_tokens(self, special_tokens: list[str]): ''' Adds new special tokens to the "SPECIAL_TOKENS_ATTRIBUTES" list which will be part of "self.special_tokens" and saved as a special token in tokenizer's config. This allows us to dynamically add new model-type specific tokens after initializing the tokenizer. For example: if the model tokenizers is multimodal, we can support special image or audio tokens. ''' pass
21
11
22
3
11
8
4
0.83
0
13
0
1
14
3
14
14
367
55
171
55
146
142
131
45
116
13
0
4
58
6,501
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/tokenization_utils_base.py
transformers.tokenization_utils_base.TokenSpan
from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, Union class TokenSpan(NamedTuple): """ Token span in an encoded string (list of tokens). Args: start (`int`): Index of the first token in the span. end (`int`): Index of the token following the last token in the span. """ start: int end: int
class TokenSpan(NamedTuple): ''' Token span in an encoded string (list of tokens). Args: start (`int`): Index of the first token in the span. end (`int`): Index of the token following the last token in the span. ''' pass
1
1
0
0
0
0
0
2
1
0
0
0
0
0
0
0
11
2
3
1
2
6
3
1
2
0
1
0
0
6,502
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/tokenization_utils_base.py
transformers.tokenization_utils_base.TruncationStrategy
from .utils import CHAT_TEMPLATE_DIR, CHAT_TEMPLATE_FILE, ExplicitEnum, PaddingStrategy, PushToHubMixin, TensorType, add_end_docstrings, cached_file, copy_func, download_url, extract_commit_hash, is_mlx_available, is_numpy_array, is_offline_mode, is_protobuf_available, is_remote_url, is_tokenizers_available, is_torch_available, is_torch_device, is_torch_tensor, list_repo_templates, logging, requires_backends, to_py_obj class TruncationStrategy(ExplicitEnum): """ Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ ONLY_FIRST = 'only_first' ONLY_SECOND = 'only_second' LONGEST_FIRST = 'longest_first' DO_NOT_TRUNCATE = 'do_not_truncate'
class TruncationStrategy(ExplicitEnum): ''' Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. ''' pass
1
1
0
0
0
0
0
0.8
1
0
0
0
0
0
0
0
10
1
5
5
4
4
5
5
4
0
1
0
0
6,503
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/tokenization_utils_fast.py
transformers.tokenization_utils_fast.PreTrainedTokenizerFast
from tokenizers import Encoding as EncodingFast from tokenizers import Tokenizer as TokenizerFast import tokenizers.pre_tokenizers as pre_tokenizers_fast import os from .integrations.ggml import convert_gguf_tokenizer from .tokenization_utils import PreTrainedTokenizer from tokenizers.decoders import Decoder as DecoderFast from collections.abc import Iterable import json from .convert_slow_tokenizer import convert_slow_tokenizer from .utils import PaddingStrategy, add_end_docstrings, logging from typing import Any, Optional, Union import copy from .tokenization_utils_base import INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, SpecialTokensMixin, TextInput, TextInputPair, TruncationStrategy from .modeling_gguf_pytorch_utils import load_gguf_checkpoint from collections import defaultdict @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizerFast(PreTrainedTokenizerBase): """ Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`]. Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary. This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class: Optional[type[PreTrainedTokenizer]] = None def __init__(self, *args, **kwargs): tokenizer_object = kwargs.pop('tokenizer_object', None) slow_tokenizer = kwargs.pop('__slow_tokenizer', None) gguf_file = kwargs.pop('gguf_file', None) fast_tokenizer_file = kwargs.pop('tokenizer_file', None) from_slow = kwargs.pop('from_slow', False) added_tokens_decoder = kwargs.pop('added_tokens_decoder', {}) self.add_prefix_space = kwargs.get('add_prefix_space', False) if from_slow and slow_tokenizer is None and (self.slow_tokenizer_class is None): raise ValueError("Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you have sentencepiece installed.") if tokenizer_object is not None: fast_tokenizer = copy.deepcopy(tokenizer_object) elif fast_tokenizer_file is not None and (not from_slow): fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file) elif slow_tokenizer: fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) elif gguf_file is not None: gguf_param = load_gguf_checkpoint(kwargs.get('vocab_file')) architecture = gguf_param['config']['model_type'] tokenizer_dict = gguf_param['tokenizer'] tokenizer_config = gguf_param['tokenizer_config'] fast_tokenizer, additional_kwargs = convert_gguf_tokenizer(architecture, tokenizer_dict) kwargs.update(tokenizer_config) if len(additional_kwargs) > 0: kwargs.update(additional_kwargs) elif self.slow_tokenizer_class is not None and slow_tokenizer is not False: slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs) fast_tokenizer = convert_slow_tokenizer(slow_tokenizer) elif not slow_tokenizer: self.vocab_file = kwargs.get('vocab_file') self.additional_special_tokens = kwargs.get('additional_special_tokens', []) fast_tokenizer = convert_slow_tokenizer(self, from_tiktoken=True) slow_tokenizer = None else: raise ValueError("Couldn't instantiate the backend tokenizer from one of: \n(1) a `tokenizers` library serialization file, \n(2) a slow tokenizer instance to convert or \n(3) an equivalent slow tokenizer class to instantiate and convert. \nYou need to have sentencepiece or tiktoken installed to convert a slow tokenizer to a fast one.") self._tokenizer = fast_tokenizer if slow_tokenizer is not None: kwargs.update(slow_tokenizer.init_kwargs) self._decode_use_source_tokenizer = False _truncation = self._tokenizer.truncation if _truncation is not None: self._tokenizer.enable_truncation(**_truncation) kwargs.setdefault('max_length', _truncation['max_length']) kwargs.setdefault('truncation_side', _truncation['direction']) kwargs.setdefault('stride', _truncation['stride']) kwargs.setdefault('truncation_strategy', _truncation['strategy']) else: self._tokenizer.no_truncation() _padding = self._tokenizer.padding if _padding is not None: self._tokenizer.enable_padding(**_padding) kwargs.setdefault('pad_token', _padding['pad_token']) kwargs.setdefault('pad_token_type_id', _padding['pad_type_id']) kwargs.setdefault('padding_side', _padding['direction']) kwargs.setdefault('max_length', _padding['length']) kwargs.setdefault('pad_to_multiple_of', _padding['pad_to_multiple_of']) super().__init__(**kwargs) self._tokenizer.encode_special_tokens = self.split_special_tokens added_tokens_decoder_hash = {hash(repr(token)) for token in self.added_tokens_decoder} tokens_to_add = [token for index, token in sorted(added_tokens_decoder.items(), key=lambda x: x[0]) if hash(repr(token)) not in added_tokens_decoder_hash] encoder = list(self.added_tokens_encoder.keys()) + [str(token) for token in tokens_to_add] tokens_to_add += [token for token in self.all_special_tokens_extended if token not in encoder and token not in tokens_to_add] if len(tokens_to_add) > 0: tokens = [] special_tokens = self.all_special_tokens for token in tokens_to_add: is_special = token.special or str(token) in special_tokens if isinstance(token, AddedToken) else str(token) in special_tokens if isinstance(token, str): token = AddedToken(token, special=is_special) else: token.special = is_special tokens.append(token) if tokens: self.add_tokens(tokens) try: pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', self.add_prefix_space) != self.add_prefix_space: pre_tok_class = getattr(pre_tokenizers_fast, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = self.add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) except Exception: pass @property def is_fast(self) -> bool: return True @property def can_save_slow_tokenizer(self) -> bool: """ `bool`: Whether or not the slow tokenizer can be saved. For a sentencepiece based slow tokenizer, this can only be `True` if the original `"sentencepiece.model"` was not deleted. """ if 'vocab_file' in self.vocab_files_names and self.vocab_files_names['vocab_file'].endswith('.model'): if hasattr(self, 'vocab_file') and self.vocab_file: return os.path.isfile(self.vocab_file) return False else: return True @property def vocab_size(self) -> int: """ `int`: Size of the base vocabulary (without the added tokens). """ return self._tokenizer.get_vocab_size(with_added_tokens=False) def get_vocab(self) -> dict[str, int]: return self._tokenizer.get_vocab(with_added_tokens=True) @property def vocab(self) -> dict[str, int]: return self.get_vocab() @property def added_tokens_encoder(self) -> dict[str, int]: """ Returns the sorted mapping from string to index. The added tokens encoder is cached for performance optimisation in `self._added_tokens_encoder` for the slow tokenizers. """ return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} @property def added_tokens_decoder(self) -> dict[int, AddedToken]: """ Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. Returns: `dict[str, int]`: The added tokens. """ return self._tokenizer.get_added_tokens_decoder() def get_added_vocab(self) -> dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Returns: `dict[str, int]`: The added tokens. """ return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])} def __bool__(self) -> bool: """ Returns True, to avoid expensive `assert tokenizer` gotchas. """ return True def __len__(self) -> int: """ Size of the full vocabulary with the added tokens. """ return self._tokenizer.get_vocab_size(with_added_tokens=True) @property def backend_tokenizer(self) -> TokenizerFast: """ `tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend. """ return self._tokenizer @property def decoder(self) -> DecoderFast: """ `tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer. """ return self._tokenizer.decoder def _convert_encoding(self, encoding: EncodingFast, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> tuple[dict[str, Any], list[EncodingFast]]: """ Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list of encodings, take care of building a batch from overflowing tokens. Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are lists (overflows) of lists (tokens). Output shape: (overflows, sequence length) """ if return_token_type_ids is None: return_token_type_ids = 'token_type_ids' in self.model_input_names if return_attention_mask is None: return_attention_mask = 'attention_mask' in self.model_input_names if return_overflowing_tokens and encoding.overflowing is not None: encodings = [encoding] + encoding.overflowing else: encodings = [encoding] encoding_dict = defaultdict(list) for e in encodings: encoding_dict['input_ids'].append(e.ids) if return_token_type_ids: encoding_dict['token_type_ids'].append(e.type_ids) if return_attention_mask: encoding_dict['attention_mask'].append(e.attention_mask) if return_special_tokens_mask: encoding_dict['special_tokens_mask'].append(e.special_tokens_mask) if return_offsets_mapping: encoding_dict['offset_mapping'].append(e.offsets) if return_length: encoding_dict['length'].append(len(e.ids)) return (encoding_dict, encodings) def convert_tokens_to_ids(self, tokens: Union[str, Iterable[str]]) -> Union[int, list[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a Iterable of ids), using the vocabulary. Args: tokens (`str` or `Iterable[str]`): One or several token(s) to convert to token id(s). Returns: `int` or `list[int]`: The token id or list of token ids. """ if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) return [self._convert_token_to_id_with_added_voc(token) for token in tokens] def _convert_token_to_id_with_added_voc(self, token: str) -> int: index = self._tokenizer.token_to_id(token) if index is None: return self.unk_token_id return index def _convert_id_to_token(self, index: int) -> Optional[str]: return self._tokenizer.id_to_token(int(index)) def _add_tokens(self, new_tokens: list[Union[str, AddedToken]], special_tokens=False) -> int: if special_tokens: return self._tokenizer.add_special_tokens(new_tokens) return self._tokenizer.add_tokens(new_tokens) def num_special_tokens_to_add(self, pair: bool=False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. <Tip> This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. </Tip> Args: pair (`bool`, *optional*, defaults to `False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: `int`: Number of special tokens added to sequences. """ return self._tokenizer.num_special_tokens_to_add(pair) def convert_ids_to_tokens(self, ids: Union[int, list[int]], skip_special_tokens: bool=False) -> Union[str, list[str]]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `list[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `list[str]`: The decoded token(s). """ if isinstance(ids, int): return self._tokenizer.id_to_token(ids) tokens = [] ids_to_skip = set(self.all_special_ids) if skip_special_tokens else set() for index in ids: index = int(index) if index in ids_to_skip: continue tokens.append(self._tokenizer.id_to_token(index)) return tokens def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]: return self.encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens() def set_truncation_and_padding(self, padding_strategy: PaddingStrategy, truncation_strategy: TruncationStrategy, max_length: int, stride: int, pad_to_multiple_of: Optional[int], padding_side: Optional[str]): """ Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards. The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section. Args: padding_strategy ([`~utils.PaddingStrategy`]): The kind of padding that will be applied to the input truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]): The kind of truncation that will be applied to the input max_length (`int`): The maximum size of a sequence. stride (`int`): The stride to use when handling overflow. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. """ _truncation = self._tokenizer.truncation _padding = self._tokenizer.padding if truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE: if _truncation is not None: self._tokenizer.no_truncation() else: target = {'max_length': max_length, 'stride': stride, 'strategy': truncation_strategy.value, 'direction': self.truncation_side} if _truncation is None: current = None else: current = {k: _truncation.get(k, None) for k in target} if current != target: self._tokenizer.enable_truncation(**target) if padding_strategy == PaddingStrategy.DO_NOT_PAD: if _padding is not None: self._tokenizer.no_padding() else: length = max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None target = {'length': length, 'direction': padding_side if padding_side is not None else self.padding_side, 'pad_id': self.pad_token_id, 'pad_token': self.pad_token, 'pad_type_id': self.pad_token_type_id, 'pad_to_multiple_of': pad_to_multiple_of} if _padding != target: self._tokenizer.enable_padding(**target) def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, split_special_tokens: bool=False) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, (tuple, list)): raise TypeError(f'batch_text_or_text_pairs has to be a list or a tuple (got {type(batch_text_or_text_pairs)})') self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side) if self._tokenizer.encode_special_tokens != split_special_tokens: self._tokenizer.encode_special_tokens = split_special_tokens encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=is_split_into_words) tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings] sanitized_tokens = {} for key in tokens_and_encodings[0][0]: stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks['input_ids']) sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping for input_ids in sanitized_tokens['input_ids']: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, split_special_tokens: bool=False, **kwargs) -> BatchEncoding: batched_input = [(text, text_pair)] if text_pair else [text] batched_output = self._batch_encode_plus(batched_input, is_split_into_words=is_split_into_words, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs) if return_tensors is None and (not return_overflowing_tokens): batched_output = BatchEncoding({key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items()}, batched_output.encodings) self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose) return batched_output def convert_tokens_to_string(self, tokens: list[str]) -> str: return self.backend_tokenizer.decoder.decode(tokens) if self.backend_tokenizer.decoder is not None else ' '.join(tokens) def _decode(self, token_ids: Union[int, list[int]], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, **kwargs) -> str: self._decode_use_source_tokenizer = kwargs.pop('use_source_tokenizer', False) if isinstance(token_ids, int): token_ids = [token_ids] text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) clean_up_tokenization_spaces = clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def _save_pretrained(self, save_directory: Union[str, os.PathLike], file_names: tuple[str], legacy_format: Optional[bool]=None, filename_prefix: Optional[str]=None) -> tuple[str]: """ Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON file containing {config + vocab + added-tokens}. """ save_directory = str(save_directory) if self.slow_tokenizer_class is None and legacy_format is True: raise ValueError('Your tokenizer does not have a legacy version defined and therefore cannot register this version. You might consider leaving the legacy_format at `None` or setting it to `False`.') save_slow = (legacy_format is None or legacy_format is True) and self.slow_tokenizer_class is not None and self.can_save_slow_tokenizer save_fast = legacy_format is None or legacy_format is False if save_slow: added_tokens_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + ADDED_TOKENS_FILE) added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} if added_vocab: with open(added_tokens_file, 'w', encoding='utf-8') as f: out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + '\n' f.write(out_str) vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) file_names = file_names + vocab_files + (added_tokens_file,) if save_fast: tokenizer_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + TOKENIZER_FILE) self.backend_tokenizer.save(tokenizer_file) file_names = file_names + (tokenizer_file,) return file_names def train_new_from_iterator(self, text_iterator, vocab_size, length=None, new_special_tokens=None, special_tokens_map=None, **kwargs): """ Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one. Args: text_iterator (generator of `list[str]`): The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory. vocab_size (`int`): The size of the vocabulary you want for your tokenizer. length (`int`, *optional*): The total number of sequences in the iterator. This is used to provide meaningful progress tracking new_special_tokens (list of `str` or `AddedToken`, *optional*): A list of new special tokens to add to the tokenizer you are training. special_tokens_map (`dict[str, str]`, *optional*): If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library. Returns: [`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on `text_iterator`. """ tokenizer_json = json.loads(self._tokenizer.to_str()) added_tokens = tokenizer_json.pop('added_tokens') post_processor = tokenizer_json.pop('post_processor') unk_token = None if tokenizer_json['model']['type'] == 'BPE': tokenizer_json['model']['vocab'] = {} tokenizer_json['model']['merges'] = [] elif tokenizer_json['model']['type'] == 'Unigram': if tokenizer_json['model']['unk_id'] is not None: unk_id = tokenizer_json['model']['unk_id'] unk_token = tokenizer_json['model']['vocab'][unk_id][0] if special_tokens_map is not None and unk_token in special_tokens_map: unk_token = special_tokens_map[unk_token] tokenizer_json['model']['unk_id'] = 0 tokenizer_json['model']['vocab'] = [[unk_token, 0.0]] elif tokenizer_json['model']['type'] in ['WordLevel', 'WordPiece']: tokenizer_json['model']['vocab'] = {} else: raise ValueError(f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) only BPE, Unigram, WordLevel and WordPiece.") if special_tokens_map is not None and 'unk_token' in tokenizer_json['model'] and (tokenizer_json['model']['unk_token'] in special_tokens_map): tokenizer_json['model']['unk_token'] = special_tokens_map[tokenizer_json['model']['unk_token']] tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json)) special_tokens = [] for added_token in added_tokens: special = added_token.pop('special', None) _ = added_token.pop('id', None) if tokenizer_json['model']['type'] != 'Unigram' and (not special): continue if special_tokens_map is not None and added_token['content'] in special_tokens_map: added_token['content'] = special_tokens_map[added_token['content']] special_tokens.append(AddedToken(**added_token)) if new_special_tokens is not None: special_tokens.extend(new_special_tokens) if tokenizer_json['model']['type'] == 'BPE' and 'continuing_subword_prefix' not in kwargs and (tokenizer_json['model']['continuing_subword_prefix'] is not None): kwargs['continuing_subword_prefix'] = tokenizer_json['model']['continuing_subword_prefix'] if tokenizer_json['model']['type'] == 'BPE' and 'end_of_word_suffix' not in kwargs and (tokenizer_json['model']['end_of_word_suffix'] is not None): kwargs['end_of_word_suffix'] = tokenizer_json['model']['end_of_word_suffix'] if tokenizer_json['model']['type'] == 'Unigram' and unk_token is not None: kwargs['unk_token'] = unk_token if tokenizer_json['pre_tokenizer'] is not None: if tokenizer_json['pre_tokenizer']['type'] == 'ByteLevel' or (tokenizer_json['pre_tokenizer']['type'] == 'Sequence' and 'pretokenizers' in tokenizer_json['pre_tokenizer'] and any((pretokenizer['type'] == 'ByteLevel' for pretokenizer in tokenizer_json['pre_tokenizer']['pretokenizers']))): kwargs['initial_alphabet'] = pre_tokenizers_fast.ByteLevel.alphabet() trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json['model']['type']] trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs) tokenizer.train_from_iterator(text_iterator, length=length, trainer=trainer) if post_processor is not None: trained_tokenizer_json = json.loads(tokenizer.to_str()) if 'special_tokens' in post_processor: for key in post_processor['special_tokens']: tokens = post_processor['special_tokens'][key]['tokens'] if special_tokens_map is not None: tokens = [special_tokens_map.get(token, token) for token in tokens] post_processor['special_tokens'][key]['tokens'] = tokens for token in tokens: token_id = tokenizer.token_to_id(token) if token_id is None: raise ValueError('Attempted to set a token in the post processor that does not exist in the mapping') post_processor['special_tokens'][key]['ids'] = [tokenizer.token_to_id(token) for token in tokens] for special_token in ['cls', 'sep']: if special_token in post_processor: token, _ = post_processor[special_token] if special_tokens_map is not None and token in special_tokens_map: token = special_tokens_map[token] token_id = tokenizer.token_to_id(token) if token_id is None: raise ValueError('Attempted to set a token in the post processor that does not exist in the mapping') post_processor[special_token] = [token, token_id] trained_tokenizer_json['post_processor'] = post_processor tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json)) kwargs = self.init_kwargs.copy() special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_list.remove('additional_special_tokens') for token in special_tokens_list: if getattr(self, token) is not None: special_token = getattr(self, token) if special_tokens_map is not None and special_token in special_tokens_map: special_token = special_tokens_map[special_token] special_token_full = self._special_tokens_map.get(token, None) if isinstance(special_token_full, AddedToken): kwargs[token] = AddedToken(special_token, single_word=special_token_full.single_word, lstrip=special_token_full.lstrip, rstrip=special_token_full.rstrip, normalized=special_token_full.normalized, special=True) else: kwargs[token] = special_token additional_special_tokens = self.additional_special_tokens if new_special_tokens is not None: additional_special_tokens.extend(new_special_tokens) if len(additional_special_tokens) > 0: kwargs['additional_special_tokens'] = additional_special_tokens return self.__class__(tokenizer_object=tokenizer, **kwargs)
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizerFast(PreTrainedTokenizerBase): ''' Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`]. Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary. This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). ''' def __init__(self, *args, **kwargs): pass @property def is_fast(self) -> bool: pass @property def can_save_slow_tokenizer(self) -> bool: ''' `bool`: Whether or not the slow tokenizer can be saved. For a sentencepiece based slow tokenizer, this can only be `True` if the original `"sentencepiece.model"` was not deleted. ''' pass @property def vocab_size(self) -> int: ''' `int`: Size of the base vocabulary (without the added tokens). ''' pass def get_vocab(self) -> dict[str, int]: pass @property def vocab_size(self) -> int: pass @property def added_tokens_encoder(self) -> dict[str, int]: ''' Returns the sorted mapping from string to index. The added tokens encoder is cached for performance optimisation in `self._added_tokens_encoder` for the slow tokenizers. ''' pass @property def added_tokens_decoder(self) -> dict[int, AddedToken]: ''' Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. Returns: `dict[str, int]`: The added tokens. ''' pass def get_added_vocab(self) -> dict[str, int]: ''' Returns the added tokens in the vocabulary as a dictionary of token to index. Returns: `dict[str, int]`: The added tokens. ''' pass def __bool__(self) -> bool: ''' Returns True, to avoid expensive `assert tokenizer` gotchas. ''' pass def __len__(self) -> int: ''' Size of the full vocabulary with the added tokens. ''' pass @property def backend_tokenizer(self) -> TokenizerFast: ''' `tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend. ''' pass @property def decoder(self) -> DecoderFast: ''' `tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer. ''' pass def _convert_encoding(self, encoding: EncodingFast, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> tuple[dict[str, Any], list[EncodingFast]]: ''' Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list of encodings, take care of building a batch from overflowing tokens. Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are lists (overflows) of lists (tokens). Output shape: (overflows, sequence length) ''' pass def convert_tokens_to_ids(self, tokens: Union[str, Iterable[str]]) -> Union[int, list[int]]: ''' Converts a token string (or a sequence of tokens) in a single integer id (or a Iterable of ids), using the vocabulary. Args: tokens (`str` or `Iterable[str]`): One or several token(s) to convert to token id(s). Returns: `int` or `list[int]`: The token id or list of token ids. ''' pass def _convert_token_to_id_with_added_voc(self, token: str) -> int: pass def _convert_id_to_token(self, index: int) -> Optional[str]: pass def _add_tokens(self, new_tokens: list[Union[str, AddedToken]], special_tokens=False) -> int: pass def num_special_tokens_to_add(self, pair: bool=False) -> int: ''' Returns the number of added tokens when encoding a sequence with special tokens. <Tip> This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. </Tip> Args: pair (`bool`, *optional*, defaults to `False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: `int`: Number of special tokens added to sequences. ''' pass def convert_ids_to_tokens(self, ids: Union[int, list[int]], skip_special_tokens: bool=False) -> Union[str, list[str]]: ''' Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `list[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `list[str]`: The decoded token(s). ''' pass def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> list[str]: pass def set_truncation_and_padding(self, padding_strategy: PaddingStrategy, truncation_strategy: TruncationStrategy, max_length: int, stride: int, pad_to_multiple_of: Optional[int], padding_side: Optional[str]): ''' Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards. The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section. Args: padding_strategy ([`~utils.PaddingStrategy`]): The kind of padding that will be applied to the input truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]): The kind of truncation that will be applied to the input max_length (`int`): The maximum size of a sequence. stride (`int`): The stride to use when handling overflow. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. ''' pass def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, split_special_tokens: bool=False) -> BatchEncoding: pass def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, split_special_tokens: bool=False, **kwargs) -> BatchEncoding: pass def convert_tokens_to_string(self, tokens: list[str]) -> str: pass def _decode(self, token_ids: Union[int, list[int]], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, **kwargs) -> str: pass def _save_pretrained(self, save_directory: Union[str, os.PathLike], file_names: tuple[str], legacy_format: Optional[bool]=None, filename_prefix: Optional[str]=None) -> tuple[str]: ''' Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON file containing {config + vocab + added-tokens}. ''' pass def train_new_from_iterator(self, text_iterator, vocab_size, length=None, new_special_tokens=None, special_tokens_map=None, **kwargs): ''' Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one. Args: text_iterator (generator of `list[str]`): The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory. vocab_size (`int`): The size of the vocabulary you want for your tokenizer. length (`int`, *optional*): The total number of sequences in the iterator. This is used to provide meaningful progress tracking new_special_tokens (list of `str` or `AddedToken`, *optional*): A list of new special tokens to add to the tokenizer you are training. special_tokens_map (`dict[str, str]`, *optional*): If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library. Returns: [`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on `text_iterator`. ''' pass
38
17
29
3
20
6
4
0.3
1
15
2
58
27
5
27
88
827
103
558
202
438
166
314
108
286
32
2
5
119
6,504
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer.py
transformers.trainer.Trainer
from .tokenization_utils_base import PreTrainedTokenizerBase from pathlib import Path from .pytorch_utils import is_torch_greater_or_equal_than_2_3 from functools import partial from .processing_utils import ProcessorMixin from torch.utils.data import DataLoader, Dataset, IterableDataset, RandomSampler, SequentialSampler from collections.abc import Iterator, Mapping from .optimization import Adafactor, get_scheduler from torch import nn import warnings from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES from packaging import version import time from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, EvalLoopOutput, EvalPrediction, HPSearchBackend, HubStrategy, PredictionOutput, RemoveColumnsCollator, SaveStrategy, TrainerMemoryTracker, TrainOutput, check_target_module_exists, default_compute_objective, denumpify_detensorize, enable_full_determinism, find_executable_batch_size, get_last_checkpoint, has_length, neftune_post_forward_hook, number_of_arguments, seed_worker, set_seed, speed_metrics import tempfile from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model from huggingface_hub import ModelCard, create_repo, upload_folder import sys from .modelcard import TrainingSummary import torch.distributed as dist import shutil from .integrations.tpu import tpu_spmd_dataloader import torch import re import random from .integrations.deepspeed import deepspeed_init, deepspeed_load_checkpoint, is_deepspeed_available from .utils.quantization_config import QuantizationMethod import os from .image_processing_utils import BaseImageProcessor from .utils.import_utils import requires import math from .utils import ADAPTER_CONFIG_NAME, ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, XLA_FSDPV2_MIN_VERSION, PushInProgress, PushToHubMixin, can_return_loss, check_torch_load_is_safe, find_labels, is_accelerate_available, is_apollo_torch_available, is_bitsandbytes_available, is_datasets_available, is_galore_torch_available, is_grokadamw_available, is_in_notebook, is_liger_kernel_available, is_lomo_available, is_peft_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_schedulefree_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_optimi_available, is_torch_xla_available, is_torch_xpu_available, is_torchao_available, logging, strtobool from .utils.deprecation import deprecate_kwarg import json from .hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS, default_hp_search_backend import inspect from .feature_extraction_utils import FeatureExtractionMixin from .training_args import OptimizerNames, ParallelMode, TrainingArguments import importlib.metadata from .feature_extraction_sequence_utils import SequenceFeatureExtractor import numpy as np from .trainer_pt_utils import DistributedTensorGatherer, EvalLoopContainer, IterableDatasetShard, LabelSmoother, LayerWiseDummyOptimizer, LengthGroupedSampler, SequentialDistributedSampler, distributed_broadcast_scalars, distributed_concat, find_batch_size, get_model_param_count, get_module_class_from_name, get_parameter_names, nested_concat, nested_detach, nested_numpify, nested_xla_mesh_reduce, reissue_pt_warnings, remove_dummy_checkpoint, set_rng_state_for_device import glob import huggingface_hub.utils as hf_hub_utils from .debug_utils import DebugOption, DebugUnderflowOverflow import functools from .integrations import get_reporting_integration_callbacks import copy from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator import contextlib from .configuration_utils import PretrainedConfig from .trainer_callback import CallbackHandler, DefaultFlowCallback, ExportableState, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState from . import __version__ from typing import TYPE_CHECKING, Any, Callable, Optional, Union @requires(backends=('torch', 'accelerate')) class Trainer: """ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. Args: model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. <Tip> [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers models. </Tip> args ([`TrainingArguments`], *optional*): The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. data_collator (`DataCollator`, *optional*): The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will default to [`default_data_collator`] if no `processing_class` is provided, an instance of [`DataCollatorWithPadding`] otherwise if the processing_class is a feature extractor or tokenizer. train_dataset (Union[`torch.utils.data.Dataset`, `torch.utils.data.IterableDataset`, `datasets.Dataset`], *optional*): The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally sets the seed of the RNGs used. eval_dataset (Union[`torch.utils.data.Dataset`, dict[str, `torch.utils.data.Dataset`, `datasets.Dataset`]), *optional*): The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each dataset prepending the dictionary key to the metric name. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. This supersedes the `tokenizer` argument, which is now deprecated. model_init (`Callable[[], PreTrainedModel]`, *optional*): A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start from a new instance of the model as given by this function. The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc). compute_loss_func (`Callable`, *optional*): A function that accepts the raw model outputs, labels, and the number of items in the entire accumulated batch (batch_size * gradient_accumulation_steps) and returns the loss. For example, see the default [loss function](https://github.com/huggingface/transformers/blob/052e652d6d53c2b26ffde87e039b723949a53493/src/transformers/trainer.py#L3618) used by [`Trainer`]. compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return a dictionary string to metric values. *Note* When passing TrainingArgs with `batch_eval_metrics` set to `True`, your compute_metrics function must take a boolean `compute_result` argument. This will be triggered after the last eval batch to signal that the function needs to calculate and return the global summary statistics rather than accumulating the batch-level statistics callbacks (List of [`TrainerCallback`], *optional*): A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in [here](callback). If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`): A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. optimizer_cls_and_kwargs (`tuple[Type[torch.optim.Optimizer], dict[str, Any]]`, *optional*): A tuple containing the optimizer class and keyword arguments to use. Overrides `optim` and `optim_args` in `args`. Incompatible with the `optimizers` argument. Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before initializing the Trainer. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by `compute_metrics`. Note that the labels (second parameter) will be `None` if the dataset does not have them. Important attributes: - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] subclass. - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs). - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set to `False` if model parallel or deepspeed is used, or if the default `TrainingArguments.place_model_on_device` is overridden to return `False` . - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while in `train`) """ from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state @deprecate_kwarg('tokenizer', new_name='processing_class', version='5.0.0', raise_if_both_names=True) def __init__(self, model: Union[PreTrainedModel, nn.Module, None]=None, args: Optional[TrainingArguments]=None, data_collator: Optional[DataCollator]=None, train_dataset: Optional[Union[Dataset, IterableDataset, 'datasets.Dataset']]=None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset], 'datasets.Dataset']]=None, processing_class: Optional[Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]]=None, model_init: Optional[Callable[..., PreTrainedModel]]=None, compute_loss_func: Optional[Callable]=None, compute_metrics: Optional[Callable[[EvalPrediction], dict]]=None, callbacks: Optional[list[TrainerCallback]]=None, optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]]=(None, None), optimizer_cls_and_kwargs: Optional[tuple[type[torch.optim.Optimizer], dict[str, Any]]]=None, preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None): if args is None: output_dir = 'tmp_trainer' logger.info(f'No `TrainingArguments` passed, using `output_dir={output_dir}`.') args = TrainingArguments(output_dir=output_dir) if args.batch_eval_metrics and compute_metrics is not None: if 'compute_result' not in inspect.signature(compute_metrics).parameters: raise ValueError('When using `batch_eval_metrics`, your `compute_metrics` function must take a `compute_result` boolean argument which will be triggered after the last batch of the eval set to signal that the summary statistics should be returned by the function.') if args.eval_strategy is not None and args.eval_strategy != 'no' and (eval_dataset is None): raise ValueError(f"You have set `args.eval_strategy` to {args.eval_strategy} but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`. ") if args.save_strategy == SaveStrategy.BEST or args.load_best_model_at_end: if args.metric_for_best_model is None: raise ValueError("`args.metric_for_best_model` must be provided when using 'best' save_strategy or if `args.load_best_model_at_end` is set to `True`.") self.args = args self.compute_loss_func = compute_loss_func enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.hp_name = None self.deepspeed = None self.is_in_train = False self.model = model self.create_accelerator_and_postprocess() self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() log_level = args.get_process_log_level() logging.set_verbosity(log_level) args._setup_devices if model is None: if model_init is not None: self.model_init = model_init model = self.call_model_init() else: raise RuntimeError('`Trainer` requires either a `model` or `model_init` argument') else: if model_init is not None: warnings.warn('`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.', FutureWarning) self.model_init = model_init if model.__class__.__name__ in MODEL_MAPPING_NAMES: raise ValueError(f'The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only computes hidden states and does not accept any labels. You should choose a model with a head suitable for your task like any of the `AutoModelForXxx` listed at https://huggingface.co/docs/transformers/model_doc/auto') if getattr(model, 'is_parallelizable', False) and getattr(model, 'model_parallel', False): self.is_model_parallel = True else: self.is_model_parallel = False if getattr(model, 'hf_device_map', None) is not None: devices = [device for device in set(model.hf_device_map.values()) if device not in ['cpu', 'disk']] if len(devices) > 1: self.is_model_parallel = True elif len(devices) == 1: self.is_model_parallel = self.args.device != torch.device(devices[0]) else: self.is_model_parallel = False if self.is_model_parallel: logger.info('You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.') if self.args.use_liger_kernel: if is_liger_kernel_available(): from liger_kernel.transformers import _apply_liger_kernel_to_instance kernel_config = self.args.liger_kernel_config if self.args.liger_kernel_config is not None else {} if isinstance(model, PreTrainedModel): _apply_liger_kernel_to_instance(model=model, **kernel_config) elif hasattr(model, 'get_base_model') and isinstance(model.get_base_model(), PreTrainedModel): _apply_liger_kernel_to_instance(model=model.get_base_model(), **kernel_config) else: logger.warning('The model is not an instance of PreTrainedModel. No liger kernels will be applied.') else: raise ImportError('You have set `use_liger_kernel` to `True` but liger-kernel >= 0.3.0 is not available. Please install it with `pip install liger-kernel`') _is_quantized_and_base_model = getattr(model, 'is_quantized', False) and (not getattr(model, '_hf_peft_config_loaded', False)) _quantization_method_supports_training = getattr(model, 'hf_quantizer', None) is not None and model.hf_quantizer.is_trainable _is_model_quantized_and_qat_trainable = getattr(model, 'hf_quantizer', None) is not None and getattr(model.hf_quantizer, 'is_qat_trainable', False) if _is_quantized_and_base_model and hasattr(model, '_orig_mod'): raise ValueError('You cannot fine-tune quantized model with `torch.compile()` make sure to pass a non-compiled model when fine-tuning a quantized model with PEFT') if _is_quantized_and_base_model and (not _is_peft_model(model)) and (not _is_model_quantized_and_qat_trainable): raise ValueError('You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft for more details') elif _is_quantized_and_base_model and (not _quantization_method_supports_training): raise ValueError(f'The model you are trying to fine-tune is quantized with {model.hf_quantizer.quantization_config.quant_method} but that quantization method do not support training. Please open an issue on GitHub: https://github.com/huggingface/transformers to request the support for training support for {model.hf_quantizer.quantization_config.quant_method}') self.is_fsdp_xla_enabled = args.fsdp_config['xla'] if len(args.fsdp) > 0: if self.is_deepspeed_enabled: raise ValueError('Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags.') if not args.fsdp_config['xla'] and args.parallel_mode != ParallelMode.DISTRIBUTED: raise ValueError('Using fsdp only works in distributed training.') self.place_model_on_device = args.place_model_on_device if self.is_model_parallel or self.is_deepspeed_enabled or ((args.fp16_full_eval or args.bf16_full_eval) and (not args.do_train)) or self.is_fsdp_xla_enabled or self.is_fsdp_enabled: self.place_model_on_device = False default_collator = DataCollatorWithPadding(processing_class) if processing_class is not None and isinstance(processing_class, (PreTrainedTokenizerBase, SequenceFeatureExtractor)) else default_data_collator self.data_collator = data_collator if data_collator is not None else default_collator self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.processing_class = processing_class if self.place_model_on_device and getattr(model, 'quantization_method', None) != QuantizationMethod.BITS_AND_BYTES: self._move_model_to_device(model, args.device) if self.is_model_parallel: self.args._n_gpu = 1 self.model_wrapped = model self.model = model unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): if hasattr(unwrapped_model, 'get_base_model'): unwrapped_model = unwrapped_model.get_base_model() elif hasattr(unwrapped_model, 'base_model') and hasattr(unwrapped_model.base_model, 'model'): unwrapped_model = unwrapped_model.base_model.model else: raise AttributeError('Cannot extract base model safely from this PEFT wrapper.') if hasattr(unwrapped_model, 'accepts_loss_kwargs'): self.model_accepts_loss_kwargs = unwrapped_model.accepts_loss_kwargs else: forward_params = inspect.signature(unwrapped_model.forward).parameters self.model_accepts_loss_kwargs = any((k.kind == inspect.Parameter.VAR_KEYWORD for k in forward_params.values())) self.neftune_noise_alpha = args.neftune_noise_alpha self.compute_metrics = compute_metrics self.preprocess_logits_for_metrics = preprocess_logits_for_metrics self.optimizer, self.lr_scheduler = optimizers self.optimizer_cls_and_kwargs = optimizer_cls_and_kwargs if self.optimizer_cls_and_kwargs is not None and self.optimizer is not None: raise RuntimeError('Passing both `optimizers` and `optimizer_cls_and_kwargs` arguments is incompatible.') if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): raise RuntimeError('Passing a `model_init` is incompatible with providing the `optimizers` argument. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.') if is_torch_xla_available() and self.optimizer is not None: for param in self.model.parameters(): model_device = param.device break for param_group in self.optimizer.param_groups: if len(param_group['params']) > 0: optimizer_device = param_group['params'][0].device break if model_device != optimizer_device: raise ValueError('The model and the optimizer parameters are not on the same device, which probably means you created an optimizer around your model **before** putting on the device and passing it to the `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and `model.to(xm.xla_device())` is performed before the optimizer creation in your script.') if (self.is_fsdp_xla_enabled or self.is_fsdp_enabled) and (self.optimizer is not None or self.lr_scheduler is not None): raise RuntimeError('Passing `optimizers` is not allowed if PyTorch FSDP is enabled. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.') default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler(callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self._loggers_initialized = False self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) if not callable(self.data_collator) and callable(getattr(self.data_collator, 'collate_batch', None)): raise TypeError('The `data_collator` should be a simple callable (function, class with `__call__`).') if args.max_steps > 0 and args.num_train_epochs > 0: logger.info('max_steps is given, it will override any value given in num_train_epochs') if train_dataset is not None and (not has_length(train_dataset)) and (args.max_steps <= 0): raise ValueError('The train_dataset does not implement __len__, max_steps has to be specified. The number of steps needs to be known in advance for the learning rate scheduler.') if train_dataset is not None and isinstance(train_dataset, torch.utils.data.IterableDataset) and args.group_by_length: raise ValueError('the `--group_by_length` option is only available for `Dataset`, not `IterableDataset') self._signature_columns = None self.use_apex = False self.use_cpu_amp = False if is_sagemaker_mp_enabled(): if args.bf16: raise ValueError('SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ') if IS_SAGEMAKER_MP_POST_1_10: if args.fp16 != smp.state.cfg.fp16: logger.warning(f'FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, but FP16 provided in trainer argument is {args.fp16}, setting to {smp.state.cfg.fp16}') args.fp16 = smp.state.cfg.fp16 elif hasattr(smp.state.cfg, 'fp16'): logger.warning(f'FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer.') if (args.fp16 or args.bf16) and args.half_precision_backend == 'auto': if args.device == torch.device('cpu'): if args.fp16: if not is_torch_greater_or_equal_than_2_3: raise ValueError('Tried to use `fp16` but it is not supported on cpu') else: args.half_precision_backend = 'cpu_amp' logger.info(f'Using {args.half_precision_backend} half precision backend') if (args.fp16 or args.bf16) and (not (self.is_deepspeed_enabled or is_sagemaker_mp_enabled())): if args.half_precision_backend == 'cpu_amp': self.use_cpu_amp = True self.amp_dtype = torch.bfloat16 elif args.half_precision_backend == 'apex': self.use_apex = True if self.args.label_smoothing_factor != 0: self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) else: self.label_smoother = None if self.args.label_smoothing_factor > 0: if getattr(self.model.config, 'problem_type', None) == 'multi_label_classification': warnings.warn('Label smoothing is not compatible with multi-label classification. Disabling label smoothing for this training run.', UserWarning) self.label_smoother = None self.control = TrainerControl() self.state = TrainerState(is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), stateful_callbacks=[cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)]) self.current_flos = 0 self.hp_search_backend = None model_to_inspect = self.model if _is_peft_model(self.model): if hasattr(self.model, 'get_base_model'): model_to_inspect = self.model.get_base_model() else: model_to_inspect = self.model.base_model.model default_label_names = find_labels(model_to_inspect.__class__) self.label_names = default_label_names if self.args.label_names is None else self.args.label_names self.can_return_loss = can_return_loss(model_to_inspect.__class__) self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) self._train_batch_size = args.train_batch_size self._created_lr_scheduler = False self._memory_tracker.stop_and_update_metrics() self.is_fsdp_xla_v2_enabled = args.fsdp_config.get('xla_fsdp_v2', False) if self.is_fsdp_xla_v2_enabled: if not IS_XLA_FSDPV2_POST_2_2: raise ValueError('FSDPv2 requires `torch_xla` 2.2 or higher.') num_devices = xr.global_runtime_device_count() xs.set_global_mesh(xs.Mesh(np.array(range(num_devices)), (num_devices, 1), axis_names=('fsdp', 'tensor'))) self.is_fsdp_xla_v1_enabled = self.is_fsdp_xla_enabled and (not self.is_fsdp_xla_v2_enabled) @property def tokenizer(self) -> Optional[PreTrainedTokenizerBase]: logger.warning('Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.') return self.processing_class @tokenizer.setter def tokenizer(self, processing_class) -> None: logger.warning('Trainer.tokenizer is now deprecated. You should use `Trainer.processing_class = processing_class` instead.') self.processing_class = processing_class def _activate_neftune(self, model): """ Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://huggingface.co/papers/2310.05914 """ unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() del unwrapped_model embeddings.neftune_noise_alpha = self.neftune_noise_alpha hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) self.neftune_hook_handle = hook_handle return model def _deactivate_neftune(self, model): """ Deactivates the neftune method. Make sure to call `_activate_neftune` first. """ if not hasattr(self, 'neftune_hook_handle'): raise ValueError('Neftune is not activated make sure to call `trainer._activate_neftune()` first') unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() self.neftune_hook_handle.remove() del embeddings.neftune_noise_alpha, unwrapped_model def add_callback(self, callback): """ Add a callback to the current list of [`~transformers.TrainerCallback`]. Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will instantiate a member of that class. """ self.callback_handler.add_callback(callback) def pop_callback(self, callback): """ Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it. If the callback is not found, returns `None` (and no error is raised). Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will pop the first member of that class found in the list of callbacks. Returns: [`~transformers.TrainerCallback`]: The callback removed, if found. """ return self.callback_handler.pop_callback(callback) def remove_callback(self, callback): """ Remove a callback from the current list of [`~transformers.TrainerCallback`]. Args: callback (`type` or [`~transformers.TrainerCallback]`): A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the first case, will remove the first member of that class found in the list of callbacks. """ self.callback_handler.remove_callback(callback) def _move_model_to_device(self, model, device): model = model.to(device) if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, 'tie_weights'): model.tie_weights() def _align_special_tokens(self): """ Aligns the special tokens of the tokenizer with the model configs. A new tokens may be defined in the tokenizer for fine-tuning purposes, e.g. an "end of turn" token may be added on chat models. In that case, we want the model configs to be aligned with the tokenizer, so that all downstream uses work as expected. This alignment should happen before training, to ensure the prediction step uses the new tokens as well. """ if isinstance(self.processing_class, ProcessorMixin): tokenizer = self.processing_class.tokenizer else: tokenizer = self.processing_class model_has_generation_config = hasattr(self.model, 'generation_config') and self.model.generation_config is not None updated_tokens = {} tokenizer_has_new_eos = tokenizer.eos_token_id != self.model.config.eos_token_id if model_has_generation_config: if self.model.generation_config.eos_token_id is None: tokenizer_has_new_eos |= tokenizer.eos_token_id != self.model.generation_config.eos_token_id else: if isinstance(self.model.generation_config.eos_token_id, int): self.model.generation_config.eos_token_id = [self.model.generation_config.eos_token_id] tokenizer_has_new_eos |= tokenizer.eos_token_id not in self.model.generation_config.eos_token_id if tokenizer_has_new_eos: updated_tokens['eos_token_id'] = tokenizer.eos_token_id self.model.config.eos_token_id = tokenizer.eos_token_id if model_has_generation_config: all_eos_tokens = [tokenizer.eos_token_id] if self.model.generation_config.eos_token_id is not None: all_eos_tokens += list(self.model.generation_config.eos_token_id) self.model.generation_config.eos_token_id = [token for token in all_eos_tokens if token is not None] tokenizer_has_new_bos = tokenizer.bos_token_id != self.model.config.bos_token_id if model_has_generation_config: tokenizer_has_new_bos |= tokenizer.bos_token_id != self.model.generation_config.bos_token_id if tokenizer_has_new_bos: updated_tokens['bos_token_id'] = tokenizer.bos_token_id self.model.config.bos_token_id = tokenizer.bos_token_id if model_has_generation_config: self.model.generation_config.bos_token_id = tokenizer.bos_token_id tokenizer_has_new_pad = tokenizer.pad_token_id != self.model.config.pad_token_id if model_has_generation_config: tokenizer_has_new_pad |= tokenizer.pad_token_id != self.model.generation_config.pad_token_id if tokenizer_has_new_pad: updated_tokens['pad_token_id'] = tokenizer.pad_token_id self.model.config.pad_token_id = tokenizer.pad_token_id if model_has_generation_config: self.model.generation_config.pad_token_id = tokenizer.pad_token_id if len(updated_tokens) > 0: logger.warning(f"The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config. The model config and generation config were aligned accordingly, being updated with the tokenizer's values. Updated tokens: {updated_tokens}.") def _set_signature_columns_if_needed(self): if self._signature_columns is None: model_to_inspect = self.model if _is_peft_model(self.model): if hasattr(self.model, 'get_base_model'): model_to_inspect = self.model.get_base_model() else: model_to_inspect = self.model.base_model.model signature = inspect.signature(model_to_inspect.forward) self._signature_columns = list(signature.parameters.keys()) self._signature_columns += list(set(['label', 'label_ids'] + self.label_names)) def _remove_unused_columns(self, dataset: 'datasets.Dataset', description: Optional[str]=None): if not self.args.remove_unused_columns: return dataset self._set_signature_columns_if_needed() signature_columns = self._signature_columns ignored_columns = list(set(dataset.column_names) - set(signature_columns)) if len(ignored_columns) > 0: dset_description = '' if description is None else f'in the {description} set' logger.info(f"The following columns {dset_description} don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}. If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, you can safely ignore this message.") columns = [k for k in signature_columns if k in dataset.column_names] if len(columns) == 0: raise ValueError(f"No columns in the dataset match the model's forward method signature: ({', '.join(signature_columns)}). The following columns have been ignored: [{', '.join(ignored_columns)}]. Please check the dataset and model. You may need to set `remove_unused_columns=False` in `TrainingArguments`.") if version.parse(datasets.__version__) < version.parse('1.4.0'): dataset.set_format(type=dataset.format['type'], columns=columns, format_kwargs=dataset.format['format_kwargs']) return dataset else: return dataset.remove_columns(ignored_columns) def _get_collator_with_removed_columns(self, data_collator: Callable, description: Optional[str]=None) -> Callable: """Wrap the data collator in a callable removing unused columns.""" if not self.args.remove_unused_columns: return data_collator self._set_signature_columns_if_needed() signature_columns = self._signature_columns remove_columns_collator = RemoveColumnsCollator(data_collator=data_collator, signature_columns=signature_columns, logger=logger, description=description, model_name=self.model.__class__.__name__) return remove_columns_collator def _get_train_sampler(self, train_dataset: Optional[Dataset]=None) -> Optional[torch.utils.data.Sampler]: if train_dataset is None: train_dataset = self.train_dataset if train_dataset is None or not has_length(train_dataset): return None if self.args.group_by_length: if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): lengths = train_dataset[self.args.length_column_name] if self.args.length_column_name in train_dataset.column_names else None else: lengths = None model_input_name = self.processing_class.model_input_names[0] if self.processing_class is not None else None return LengthGroupedSampler(self.args.train_batch_size * self.args.gradient_accumulation_steps, dataset=train_dataset, lengths=lengths, model_input_name=model_input_name) else: return RandomSampler(train_dataset) def _get_dataloader(self, dataset: Dataset, description: str, batch_size: int, sampler_fn: Optional[Callable[[Dataset], torch.utils.data.Sampler]]=None, is_training: bool=False, dataloader_key: Optional[str]=None) -> DataLoader: """Create a [`~torch.utils.data.DataLoader`] from the given dataset.""" data_collator = self.data_collator if is_datasets_available() and isinstance(dataset, datasets.Dataset): dataset = self._remove_unused_columns(dataset, description=description) else: data_collator = self._get_collator_with_removed_columns(self.data_collator, description=description) dataloader_params = {'batch_size': batch_size, 'collate_fn': data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'persistent_workers': self.args.dataloader_persistent_workers} if not isinstance(dataset, torch.utils.data.IterableDataset): if sampler_fn is not None: dataloader_params['sampler'] = sampler_fn(dataset) dataloader_params['drop_last'] = self.args.dataloader_drop_last dataloader_params['prefetch_factor'] = self.args.dataloader_prefetch_factor if is_training: dataloader_params['worker_init_fn'] = partial(seed_worker, num_workers=self.args.dataloader_num_workers, rank=self.args.process_index) dataloader = self.accelerator.prepare(DataLoader(dataset, **dataloader_params)) if dataloader_key is not None and self.args.dataloader_persistent_workers: if hasattr(self, '_eval_dataloaders'): self._eval_dataloaders[dataloader_key] = dataloader else: self._eval_dataloaders = {dataloader_key: dataloader} return dataloader def get_train_dataloader(self) -> DataLoader: """ Returns the training [`~torch.utils.data.DataLoader`]. Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed training if necessary) otherwise. Subclass and override this method if you want to inject some custom behavior. """ if self.train_dataset is None: raise ValueError('Trainer: training requires a train_dataset.') return self._get_dataloader(dataset=self.train_dataset, description='Training', batch_size=self._train_batch_size, sampler_fn=self._get_train_sampler, is_training=True) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: if eval_dataset is None or not has_length(eval_dataset): return None if self.args.use_legacy_prediction_loop: if is_torch_xla_available(): return SequentialDistributedSampler(eval_dataset, num_replicas=xr.world_size(), rank=xr.global_ordinal()) elif is_sagemaker_mp_enabled(): return SequentialDistributedSampler(eval_dataset, num_replicas=smp.dp_size(), rank=smp.dp_rank(), batch_size=self.args.per_device_eval_batch_size) else: return SequentialSampler(eval_dataset) if self.args.group_by_length: if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): lengths = eval_dataset[self.args.length_column_name] if self.args.length_column_name in eval_dataset.column_names else None else: lengths = None model_input_name = self.processing_class.model_input_names[0] if self.processing_class is not None else None return LengthGroupedSampler(self.args.eval_batch_size, dataset=eval_dataset, lengths=lengths, model_input_name=model_input_name) if self.args.world_size <= 1: return SequentialSampler(eval_dataset) else: return None def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]]=None) -> DataLoader: """ Returns the evaluation [`~torch.utils.data.DataLoader`]. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (`str` or `torch.utils.data.Dataset`, *optional*): If a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError('Trainer: evaluation requires an eval_dataset.') dataloader_key = eval_dataset if isinstance(eval_dataset, str) else 'eval' if hasattr(self, '_eval_dataloaders') and dataloader_key in self._eval_dataloaders and self.args.dataloader_persistent_workers: return self._eval_dataloaders[dataloader_key] eval_dataset = self.eval_dataset[eval_dataset] if isinstance(eval_dataset, str) else eval_dataset if eval_dataset is not None else self.eval_dataset return self._get_dataloader(dataset=eval_dataset, description='Evaluation', batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler, dataloader_key=dataloader_key) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: """ Returns the test [`~torch.utils.data.DataLoader`]. Subclass and override this method if you want to inject some custom behavior. Args: test_dataset (`torch.utils.data.Dataset`, *optional*): The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement `__len__`. """ return self._get_dataloader(dataset=test_dataset, description='test', batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler) def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or `create_scheduler`) in a subclass. """ self.create_optimizer() if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: optimizer = self.optimizer.optimizer else: optimizer = self.optimizer self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) def get_decay_parameter_names(self, model) -> list[str]: """ Get all parameter names that weight decay will be applied to. This function filters out parameters in two ways: 1. By layer type (instances of layers specified in ALL_LAYERNORM_LAYERS) 2. By parameter name patterns (containing 'bias', or variation of 'norm') """ forbidden_name_patterns = ['bias', 'layernorm', 'rmsnorm', '(?:^|\\.)norm(?:$|\\.)', '_norm(?:$|\\.)'] decay_parameters = get_parameter_names(model, [nn.LayerNorm], forbidden_name_patterns) return decay_parameters def create_optimizer(self): """ Setup the optimizer. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through `optimizers`, or subclass and override this method in a subclass. """ opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.optimizer is None: decay_parameters = self.get_decay_parameter_names(opt_model) optimizer_grouped_parameters = [{'params': [p for n, p in opt_model.named_parameters() if n in decay_parameters and p.requires_grad], 'weight_decay': self.args.weight_decay}, {'params': [p for n, p in opt_model.named_parameters() if n not in decay_parameters and p.requires_grad], 'weight_decay': 0.0}] if self.optimizer_cls_and_kwargs is not None: optimizer_cls, optimizer_kwargs = self.optimizer_cls_and_kwargs else: optimizer_cls, optimizer_kwargs = self.get_optimizer_cls_and_kwargs(self.args, opt_model) if 'params' in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop('params') if 'model' in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop('model') if 'optimizer_dict' in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop('optimizer_dict') self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) if 'bitsandbytes' in str(optimizer_cls) and optimizer_kwargs.get('optim_bits', None) == 8: import bitsandbytes manager = bitsandbytes.optim.GlobalOptimManager.get_instance() skipped = 0 for module in opt_model.modules(): if isinstance(module, nn.Embedding): skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) logger.info(f'skipped {module}: {skipped / 2 ** 20}M params') manager.register_module_override(module, 'weight', {'optim_bits': 32}) logger.debug(f'bitsandbytes: will optimize {module} in fp32') logger.info(f'skipped: {skipped / 2 ** 20}M params') if is_sagemaker_mp_enabled(): self.optimizer = smp.DistributedOptimizer(self.optimizer) return self.optimizer def get_num_trainable_parameters(self): """ Get the number of trainable parameters. """ return sum((p.numel() for p in self.model.parameters() if p.requires_grad)) def get_learning_rates(self): """ Returns the learning rate of each parameter from self.optimizer. """ if self.optimizer is None: raise ValueError('Trainer optimizer is None, please make sure you have setup the optimizer before.') return [group['lr'] for group in self.optimizer.param_groups] def get_optimizer_group(self, param: Optional[Union[str, torch.nn.parameter.Parameter]]=None): """ Returns optimizer group for a parameter if given, else returns all optimizer groups for params. Args: param (`str` or `torch.nn.parameter.Parameter`, *optional*): The parameter for which optimizer group needs to be returned. """ if self.optimizer is None: raise ValueError('Trainer optimizer is None, please make sure you have setup the optimizer before.') if param is not None: for group in self.optimizer.param_groups: if param in group['params']: return group return [group['params'] for group in self.optimizer.param_groups] @staticmethod def get_optimizer_cls_and_kwargs(args: TrainingArguments, model: Optional[PreTrainedModel]=None) -> tuple[Any, Any]: """ Returns the optimizer class and optimizer parameters based on the training arguments. Args: args (`transformers.training_args.TrainingArguments`): The training arguments for the training session. """ optim_args = {} if args.optim_args: for mapping in args.optim_args.replace(' ', '').split(','): key, value = mapping.split('=') optim_args[key] = value optimizer_kwargs = {'lr': args.learning_rate} adam_kwargs = {'betas': (args.adam_beta1, args.adam_beta2), 'eps': args.adam_epsilon} def setup_low_rank_optimizer(optimizer_name: str, optimizer_mapping: dict[str, Any], optim_kwargs: dict[str, Any], is_layerwise_supported: bool=True) -> tuple[Any, Any]: """ Helper function to set up low-rank optimizers like GaLore and Apollo. Args: optimizer_name (str): Name of the optimizer. optimizer_mapping (dict): Mapping of optimizer names to their classes. optim_kwargs (dict): Keyword arguments for the optimizer. is_layerwise_supported (bool): Whether layerwise optimization is supported. Returns: tuple[Any, Any]: Optimizer class and updated optimizer kwargs. """ is_layerwise = optimizer_name.lower().endswith('layerwise') if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED and is_layerwise_supported: raise NotImplementedError(f'Layer-wise {optimizer_name} does not support DDP at this time') optimizer_cls = optimizer_mapping[optimizer_name] if args.optim_target_modules is None: raise ValueError(f'You need to define `optim_target_modules` to use {optimizer_name} optimizers') if not isinstance(args.optim_target_modules, (list, str)): raise TypeError(f"`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: {args.optim_target_modules}") if model is None: raise ValueError(f'You need to pass a model to initialize {optimizer_name} optimizer.') all_linear = isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace('_', '-') == 'all-linear' target_params_names = [] for module_name, module in model.named_modules(): target_module_exists, is_regex = check_target_module_exists(args.optim_target_modules, module_name, return_is_regex=True) if not isinstance(module, nn.Linear): if target_module_exists and (not is_regex): logger.warning(f'{module_name} matched but ignored. {optimizer_name} only supports linear layers.') continue if not target_module_exists and (not all_linear): continue target_params_names.append(module_name + '.weight') if len(target_params_names) == 0: raise ValueError(f'No target modules found for {optimizer_name} ({args.optim_target_modules}).') target_params = [p for n, p in model.named_parameters() if n in target_params_names] non_target_params = [p for n, p in model.named_parameters() if n not in target_params_names] optim_kwargs.update(optim_args) param_groups = [{'params': non_target_params}, {'params': target_params, **optim_kwargs}] if is_layerwise: if args.gradient_accumulation_steps != 1: raise ValueError(f'Layerwise {optimizer_name} does not support gradient accumulation!') optimizer_dict = {} for param in non_target_params: optimizer_dict[param] = optimizer_cls([{'params': [param]}], **optimizer_kwargs) for param in target_params: optimizer_dict[param] = optimizer_cls([{'params': [param], **optim_kwargs}], **optimizer_kwargs) def optimizer_hook(param): if param.grad is not None: optimizer_dict[param].step() optimizer_dict[param].zero_grad() for param in model.parameters(): if param.requires_grad: param.register_post_accumulate_grad_hook(optimizer_hook) optimizer_cls = LayerWiseDummyOptimizer optimizer_kwargs.update({'optimizer_dict': optimizer_dict}) optimizer_kwargs.update({'params': param_groups}) return (optimizer_cls, optimizer_kwargs) if args.optim == OptimizerNames.ADAFACTOR: optimizer_cls = Adafactor optimizer_kwargs.update({'scale_parameter': False, 'relative_step': False}) elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: from torch.optim import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: optimizer_kwargs.update({'fused': True}) elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: try: from torch_xla.amp.syncfree import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError('Trainer failed to import syncfree AdamW from torch_xla.') elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED: try: from torch_npu.optim import NpuFusedAdamW optimizer_cls = NpuFusedAdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError('Trainer failed to import FusedAdamW from torch_npu.') elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: try: from apex.optimizers import FusedAdam optimizer_cls = FusedAdam optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError('Trainer tried to instantiate apex FusedAdam but apex is not installed!') elif args.optim in [OptimizerNames.ADAMW_BNB, OptimizerNames.ADAMW_8BIT, OptimizerNames.PAGED_ADAMW, OptimizerNames.PAGED_ADAMW_8BIT, OptimizerNames.ADEMAMIX, OptimizerNames.ADEMAMIX_8BIT, OptimizerNames.PAGED_ADEMAMIX, OptimizerNames.PAGED_ADEMAMIX_8BIT, OptimizerNames.LION, OptimizerNames.LION_8BIT, OptimizerNames.PAGED_LION, OptimizerNames.PAGED_LION_8BIT, OptimizerNames.RMSPROP_BNB, OptimizerNames.RMSPROP_8BIT, OptimizerNames.RMSPROP_32BIT]: try: from bitsandbytes.optim import AdamW, Lion, RMSprop is_paged = False optim_bits = 32 optimizer_cls = None additional_optim_kwargs = adam_kwargs if 'paged' in args.optim: is_paged = True if '8bit' in args.optim: optim_bits = 8 if 'adam' in args.optim: optimizer_cls = AdamW elif 'lion' in args.optim: optimizer_cls = Lion additional_optim_kwargs = {'betas': (args.adam_beta1, args.adam_beta2)} elif 'rmsprop' in args.optim: optimizer_cls = RMSprop additional_optim_kwargs = optim_args elif 'ademamix' in args.optim: if is_bitsandbytes_available() and version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.44.0'): raise ValueError('The AdEMAMix optimizer is not supported by your current version of `bitsandbytes`. Please install `bitsandbytes` >= 0.44.0.') from bitsandbytes.optim import AdEMAMix optimizer_cls = AdEMAMix additional_optim_kwargs = {'betas': (float(optim_args.get('beta1', args.adam_beta1)), float(optim_args.get('beta2', args.adam_beta2)), float(optim_args.get('beta3', 0.9999))), 'alpha': float(optim_args.get('alpha', 5.0)), 'eps': float(optim_args.get('eps', args.adam_epsilon))} if 't_alpha' in optim_args: additional_optim_kwargs['t_alpha'] = int(optim_args['t_alpha']) if 't_beta3' in optim_args: additional_optim_kwargs['t_beta3'] = int(optim_args['t_beta3']) bnb_kwargs = {'optim_bits': optim_bits} if 'rmsprop' not in args.optim: bnb_kwargs['is_paged'] = is_paged optimizer_kwargs.update(additional_optim_kwargs) optimizer_kwargs.update(bnb_kwargs) except ImportError: raise ValueError('Trainer tried to instantiate bnb optimizer but `bitsandbytes` is not installed!') if is_bitsandbytes_available() and version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.41.1'): logger.warning('You are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. It is recommended to update your version as a major bug has been fixed in 8-bit optimizers.') elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: try: from torchdistx.optimizers import AnyPrecisionAdamW optimizer_cls = AnyPrecisionAdamW optimizer_kwargs.update(adam_kwargs) optimizer_kwargs.update({'use_kahan_summation': strtobool(optim_args.get('use_kahan_summation', 'False')), 'momentum_dtype': getattr(torch, optim_args.get('momentum_dtype', 'float32')), 'variance_dtype': getattr(torch, optim_args.get('variance_dtype', 'float32')), 'compensation_buffer_dtype': getattr(torch, optim_args.get('compensation_buffer_dtype', 'bfloat16'))}) except ImportError: raise ValueError('Please install https://github.com/pytorch/torchdistx') elif args.optim == OptimizerNames.SGD: optimizer_cls = torch.optim.SGD elif args.optim == OptimizerNames.ADAGRAD: optimizer_cls = torch.optim.Adagrad elif args.optim == OptimizerNames.RMSPROP: optimizer_cls = torch.optim.RMSprop elif args.optim in [OptimizerNames.GALORE_ADAMW, OptimizerNames.GALORE_ADAMW_8BIT, OptimizerNames.GALORE_ADAFACTOR, OptimizerNames.GALORE_ADAMW_LAYERWISE, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE]: if not is_galore_torch_available(): raise ImportError('You need to install `galore_torch` in order to use GaLore optimizers install it with `pip install git+https://github.com/jiaweizzhao/GaLore`') from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit optimizer_mapping = {OptimizerNames.GALORE_ADAMW: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor, OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor} galore_optim_kwargs = {'rank': int(optim_args.pop('rank', 128)), 'update_proj_gap': int(optim_args.pop('update_proj_gap', 200)), 'scale': float(optim_args.pop('scale', 0.25)), 'proj_type': optim_args.pop('proj_type', 'std')} optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer(args.optim, optimizer_mapping, galore_optim_kwargs) if args.optim == OptimizerNames.GALORE_ADAFACTOR: optimizer_kwargs.update({'scale_parameter': False, 'relative_step': False}) elif args.optim in [OptimizerNames.APOLLO_ADAMW, OptimizerNames.APOLLO_ADAMW_LAYERWISE]: if not is_apollo_torch_available(): raise ImportError('You need to install `apollo_torch` in order to use APOLLO optimizers install it with `pip install git+https://github.com/zhuhanqing/APOLLO`') from apollo_torch import APOLLOAdamW optimizer_mapping = {OptimizerNames.APOLLO_ADAMW: APOLLOAdamW, OptimizerNames.APOLLO_ADAMW_LAYERWISE: APOLLOAdamW} apollo_optim_kwargs = {'rank': int(optim_args.pop('rank', 128)), 'proj': optim_args.pop('proj', 'random'), 'scale_type': optim_args.pop('scale_type', 'channel'), 'update_proj_gap': int(optim_args.pop('update_proj_gap', 200)), 'scale': float(optim_args.pop('scale', 1.0)), 'proj_type': optim_args.pop('proj_type', 'std')} apollo_optim_kwargs.update(adam_kwargs) optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer(args.optim, optimizer_mapping, apollo_optim_kwargs) elif args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: if not is_lomo_available(): raise ImportError('You need to install `lomo_optim` in order to use LOMO optimizers install it with `pip install lomo-optim`') if not is_accelerate_available('0.30.0'): raise ImportError('You need to have `accelerate>=0.30.0` to be able to use LOMO optimizers') if model is None: raise ValueError('You need to pass a `model` in order to correctly initialize a LOMO optimizer.') from lomo_optim import AdaLomo, Lomo if 'ada' in args.optim: optimizer_cls = AdaLomo else: optimizer_cls = Lomo optimizer_kwargs.update({'model': model}) elif args.optim == OptimizerNames.GROKADAMW: if not is_grokadamw_available(): raise ValueError('Please install grokadamw with `pip install grokadamw`') from grokadamw import GrokAdamW optimizer_cls = GrokAdamW optimizer_kwargs.update({'alpha_init': float(optim_args.get('alpha_init', 0.98)), 'lamb': float(optim_args.get('lamb', 2.0)), 'gamma': float(optim_args.get('gamma', 0.1)), 'grokking_signal_decay_rate': float(optim_args.get('grokking_signal_decay_rate', 0.1)), 'gradient_clipping': float(optim_args.get('gradient_clipping', 1.0))}) elif args.optim in [OptimizerNames.ADAMW_TORCH_4BIT, OptimizerNames.ADAMW_TORCH_8BIT]: if not is_torchao_available() or version.parse(importlib.metadata.version('torchao')) < version.parse('0.4.0'): raise ImportError('You need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers.Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/ao') if version.parse(importlib.metadata.version('torch')) <= version.parse('2.4'): raise ImportError('You need to have `torch>2.4` in order to use torch 4-bit optimizers. Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly.') if version.parse(importlib.metadata.version('torchao')) >= version.parse('0.11.0'): from torchao.optim import AdamW4bit, AdamW8bit else: from torchao.prototype.low_bit_optim import AdamW4bit, AdamW8bit if args.optim == OptimizerNames.ADAMW_TORCH_4BIT: optimizer_cls = AdamW4bit elif args.optim == OptimizerNames.ADAMW_TORCH_8BIT: optimizer_cls = AdamW8bit else: raise ValueError('Invalid optimizer') optimizer_kwargs.update(adam_kwargs) elif args.optim in [OptimizerNames.SCHEDULE_FREE_RADAM, OptimizerNames.SCHEDULE_FREE_ADAMW, OptimizerNames.SCHEDULE_FREE_SGD]: if not is_schedulefree_available(): raise ImportError('You need to install `schedulefree` in order to use schedulefree optimizers. Install it with `pip install schedulefree.`') if not is_accelerate_available('0.30.0'): raise ImportError('You need to have `accelerate>=0.30.0` to be able to use schedulefree optimizers') from schedulefree import AdamWScheduleFree, SGDScheduleFree additional_optim_kwargs = {} require_warmup = True if args.optim == OptimizerNames.SCHEDULE_FREE_RADAM: if not is_schedulefree_available('1.4.0'): raise ImportError('You need to install `schedulefree>=1.4.0` in order to use RAdamScheduleFree optimizer. Install it with `pip install schedulefree.`') from schedulefree import RAdamScheduleFree optimizer_cls = RAdamScheduleFree additional_optim_kwargs = adam_kwargs require_warmup = False elif args.optim == OptimizerNames.SCHEDULE_FREE_ADAMW: optimizer_cls = AdamWScheduleFree additional_optim_kwargs = adam_kwargs elif args.optim == OptimizerNames.SCHEDULE_FREE_SGD: optimizer_cls = SGDScheduleFree else: raise ValueError('Invalid schedulefree optimizer') additional_optim_kwargs['weight_decay'] = args.weight_decay if require_warmup: additional_optim_kwargs['warmup_steps'] = args.warmup_steps additional_optim_kwargs.update({'weight_lr_power': float(optim_args.get('weight_lr_power', 2.0)), 'r': float(optim_args.get('r', 0.0))}) optimizer_kwargs.update(additional_optim_kwargs) elif args.optim == OptimizerNames.STABLE_ADAMW: if not is_torch_optimi_available(): raise ImportError('You need to install `torch-optimi` in order to use stable_adamw optimizers. Install it with `pip install torch-optimi`.') from optimi import StableAdamW max_lr = optim_args.pop('max_lr', None) if max_lr is not None: max_lr = float(max_lr) kahan_sum = optim_args.pop('kahan_sum', None) if kahan_sum is not None: kahan_sum = bool(kahan_sum) adam_kwargs['weight_decay'] = args.weight_decay stable_adamw_kwargs = {'decouple_lr': bool(optim_args.pop('decouple_lr', False)), 'max_lr': max_lr, 'kahan_sum': kahan_sum} optimizer_cls = StableAdamW optimizer_kwargs.update(adam_kwargs) optimizer_kwargs.update(stable_adamw_kwargs) else: raise ValueError(f'Trainer cannot instantiate unsupported optimizer: {args.optim}') return (optimizer_cls, optimizer_kwargs) def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer=None): """ Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument. Args: num_training_steps (int): The number of training steps to do. """ if self.lr_scheduler is None: self.lr_scheduler = get_scheduler(self.args.lr_scheduler_type, optimizer=self.optimizer if optimizer is None else optimizer, num_warmup_steps=self.args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, scheduler_specific_kwargs=self.args.lr_scheduler_kwargs) self._created_lr_scheduler = True return self.lr_scheduler def num_examples(self, dataloader: DataLoader) -> int: """ Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When dataloader.dataset does not exist or has no length, estimates as best it can """ try: dataset = dataloader.dataset if isinstance(dataset, IterableDatasetShard): return len(dataloader.dataset.dataset) return len(dataloader.dataset) except (NameError, AttributeError, TypeError): return len(dataloader) * self.args.per_device_train_batch_size @staticmethod def num_tokens(train_dl: DataLoader, max_steps: Optional[int]=None) -> int: """ Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader. """ train_tokens = 0 try: for batch in train_dl: tokens = batch['input_ids'].numel() if max_steps is not None: return tokens * max_steps train_tokens += tokens except KeyError: logger.warning('Cannot get num_tokens from dataloader') return train_tokens def _hp_search_setup(self, trial: Union['optuna.Trial', dict[str, Any]]): """HP search setup code""" self._trial = trial if self.hp_search_backend is None or trial is None: return if self.hp_search_backend == HPSearchBackend.OPTUNA: params = self.hp_space(trial) elif self.hp_search_backend == HPSearchBackend.RAY: params = trial params.pop('wandb', None) elif self.hp_search_backend == HPSearchBackend.SIGOPT: params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} elif self.hp_search_backend == HPSearchBackend.WANDB: params = trial for key, value in params.items(): if not hasattr(self.args, key): logger.warning(f'Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`.') continue old_attr = getattr(self.args, key, None) if old_attr is not None: value = type(old_attr)(value) setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: logger.info(f'Trial: {trial.params}') if self.hp_search_backend == HPSearchBackend.SIGOPT: logger.info(f'SigOpt Assignments: {trial.assignments}') if self.hp_search_backend == HPSearchBackend.WANDB: logger.info(f'W&B Sweep parameters: {trial}') if self.is_deepspeed_enabled: if self.args.deepspeed is None: raise ValueError('For sweeps with deepspeed, `args.deepspeed` must be set') self.accelerator.free_memory() from accelerate.utils import DeepSpeedPlugin from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) self.args.hf_deepspeed_config.trainer_config_process(self.args) self.args.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.args.hf_deepspeed_config) AcceleratorState()._reset_state() self.create_accelerator_and_postprocess() def _report_to_hp_search(self, trial: Union['optuna.Trial', dict[str, Any]], step: int, metrics: dict[str, float]): if self.hp_search_backend is None or trial is None: return metrics = metrics.copy() self.objective = self.compute_objective(metrics) if self.hp_search_backend == HPSearchBackend.OPTUNA: import optuna if hasattr(trial, 'study') and (not trial.study._is_multi_objective()): trial.report(self.objective, step) if trial.should_prune(): self.callback_handler.on_train_end(self.args, self.state, self.control) raise optuna.TrialPruned() elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train with tempfile.TemporaryDirectory() as temp_checkpoint_dir: checkpoint = None if self.control.should_save: self._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) metrics['objective'] = self.objective ray.train.report(metrics, checkpoint=checkpoint) def _tune_save_checkpoint(self, checkpoint_dir: str): output_dir = os.path.join(checkpoint_dir, f'{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}') self.save_model(output_dir, _internal_call=True) if self.args.should_save: self.state.stateful_callbacks['TrainerControl'] = self.control.state() self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) def call_model_init(self, trial=None): model_init_argcount = number_of_arguments(self.model_init) if model_init_argcount == 0: model = self.model_init() elif model_init_argcount == 1: model = self.model_init(trial) else: raise RuntimeError('model_init should have 0 or 1 argument.') if model is None: raise RuntimeError('model_init should not return None.') return model def torch_jit_model_eval(self, model, dataloader, training=False): if not training: if dataloader is None: logger.warning('failed to use PyTorch jit mode due to current dataloader is none.') return model example_batch = next(iter(dataloader)) example_batch = self._prepare_inputs(example_batch) try: jit_model = copy.copy(model) jit_model.eval() original_forward = jit_model.__dict__.pop('_original_forward', None) if original_forward: jit_model.forward = original_forward autocast_handler = AutocastKwargs(cache_enabled=False) with self.accelerator.autocast(autocast_handler=autocast_handler), torch.no_grad(): if isinstance(example_batch, dict): jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) else: jit_model = torch.jit.trace(jit_model, example_kwarg_inputs={key: example_batch[key] for key in example_batch}, strict=False) jit_model = torch.jit.freeze(jit_model) with torch.no_grad(): jit_model(**example_batch) jit_model(**example_batch) model = jit_model self.use_cpu_amp = False except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: logger.warning(f'failed to use PyTorch jit mode due to: {e}.') return model def compare_trainer_and_checkpoint_args(self, training_args, trainer_state): attributes_map = {'logging_steps': 'logging_steps', 'eval_steps': 'eval_steps', 'save_steps': 'save_steps'} has_warning = False warning_str = 'Warning: The following arguments do not match the ones in the `trainer_state.json` within the checkpoint directory: ' for arg_attr, state_attr in attributes_map.items(): arg_value = getattr(training_args, arg_attr, None) state_value = getattr(trainer_state, state_attr, None) if arg_value is not None and state_value is not None and (arg_value != state_value): warning_str += f'\n\t{arg_attr}: {arg_value} (from args) != {state_value} (from trainer_state.json)' has_warning = True train_bs_args = training_args.per_device_train_batch_size train_bs_state = trainer_state.train_batch_size // max(1, training_args.n_gpu) if train_bs_args != train_bs_state: warning_str += f'\n\tper_device_train_batch_size: {train_bs_args} (from args) != {train_bs_state} (from trainer_state.json)' has_warning = True if has_warning: logger.warning_once(warning_str) def _wrap_model(self, model, training=True, dataloader=None): if is_sagemaker_mp_enabled(): if isinstance(self.model_wrapped, smp.model.DistributedModel): return self.model_wrapped return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) if self.accelerator.unwrap_model(model, keep_torch_compile=False) is not model: return model if self.use_apex and training: from apex import amp model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) if self.args.n_gpu > 1 and (not getattr(model, 'is_loaded_in_8bit', False)): model = nn.DataParallel(model) if self.args.jit_mode_eval: start_time = time.time() model = self.torch_jit_model_eval(model, dataloader, training) self.jit_compilation_time = round(time.time() - start_time, 4) if not training: return model if self.is_fsdp_xla_enabled: try: from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP from torch_xla.distributed.fsdp import checkpoint_module from torch_xla.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy if self.is_fsdp_xla_v2_enabled: from torch_xla.experimental.spmd_fully_sharded_data_parallel import SpmdFullyShardedDataParallel as FSDPv2 except ImportError: raise ImportError('Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.') auto_wrap_policy = None auto_wrapper_callable = None default_transformer_cls_names_to_wrap = getattr(model, '_no_split_modules', None) fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get('transformer_layer_cls_to_wrap', default_transformer_cls_names_to_wrap) if self.args.fsdp_config['min_num_params'] > 0: auto_wrap_policy = functools.partial(size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config['min_num_params']) elif fsdp_transformer_layer_cls_to_wrap is not None: transformer_cls_to_wrap = set() for layer_class in fsdp_transformer_layer_cls_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception('Could not find the transformer layer class to wrap in the model.') else: transformer_cls_to_wrap.add(transformer_cls) auto_wrap_policy = functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap) fsdp_kwargs = self.args.xla_fsdp_config if self.args.fsdp_config['xla_fsdp_grad_ckpt']: if model.config.use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') model.config.use_cache = False def auto_wrapper_callable(m, *args, **kwargs): target_cls = FSDP if not self.is_fsdp_xla_v2_enabled else FSDPv2 return target_cls(checkpoint_module(m), *args, **kwargs) if self.is_fsdp_xla_v2_enabled: def shard_output(output, mesh): from .modeling_outputs import CausalLMOutputWithPast real_output = None if isinstance(output, torch.Tensor): real_output = output elif isinstance(output, tuple): real_output = output[0] elif isinstance(output, CausalLMOutputWithPast): real_output = output.logits if real_output is None: raise ValueError("Something went wrong, the output of the model shouldn't be `None`") xs.mark_sharding(real_output, mesh, ('fsdp', None, None)) self.model = model = FSDPv2(model, shard_output=shard_output, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable) else: self.model = model = FSDP(model, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, **fsdp_kwargs) def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): loss = optimizer.step(**optimizer_args) if barrier: xm.mark_step() return loss xm.optimizer_step = patched_optimizer_step elif is_sagemaker_dp_enabled(): model = nn.parallel.DistributedDataParallel(model, device_ids=[int(os.getenv('SMDATAPARALLEL_LOCAL_RANK'))]) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: if is_torch_neuroncore_available(): return model kwargs = {} if self.args.ddp_find_unused_parameters is not None: kwargs['find_unused_parameters'] = self.args.ddp_find_unused_parameters elif isinstance(model, PreTrainedModel): kwargs['find_unused_parameters'] = not model.is_gradient_checkpointing else: kwargs['find_unused_parameters'] = True if self.args.ddp_bucket_cap_mb is not None: kwargs['bucket_cap_mb'] = self.args.ddp_bucket_cap_mb if self.args.ddp_broadcast_buffers is not None: kwargs['broadcast_buffers'] = self.args.ddp_broadcast_buffers self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs) return model def train(self, resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union['optuna.Trial', dict[str, Any], None]=None, ignore_keys_for_eval: Optional[list[str]]=None, **kwargs): """ Main training entry point. Args: resume_from_checkpoint (`str` or `bool`, *optional*): If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. trial (`optuna.Trial` or `dict[str, Any]`, *optional*): The trial run or the hyperparameter dictionary for hyperparameter search. ignore_keys_for_eval (`list[str]`, *optional*) A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments used to hide deprecated arguments """ if resume_from_checkpoint is False: resume_from_checkpoint = None self._memory_tracker.start() args = self.args self.is_in_train = True if isinstance(self.processing_class, (PreTrainedTokenizerBase, ProcessorMixin)) and hasattr(self.model, 'config'): self._align_special_tokens() if self.neftune_noise_alpha is not None: self.model = self._activate_neftune(self.model) if (args.fp16_full_eval or args.bf16_full_eval) and (not args.do_train) and (not self.is_model_parallel) and (self.model_init is None): self._move_model_to_device(self.model, args.device) if 'model_path' in kwargs: resume_from_checkpoint = kwargs.pop('model_path') warnings.warn('`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` instead.', FutureWarning) if len(kwargs) > 0: raise TypeError(f"train() got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") self._hp_search_setup(trial) self._train_batch_size = self.args.train_batch_size model_reloaded = False if self.model_init is not None: enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.model = self.call_model_init(trial) model_reloaded = True self.optimizer, self.lr_scheduler = (None, None) if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: resume_from_checkpoint = get_last_checkpoint(args.output_dir) if resume_from_checkpoint is None: raise ValueError(f'No valid checkpoint found in output directory ({args.output_dir})') if resume_from_checkpoint is not None: if not is_sagemaker_mp_enabled() and (not self.is_deepspeed_enabled) and (not self.is_fsdp_enabled): self._load_from_checkpoint(resume_from_checkpoint) state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) if state.train_batch_size is not None: self._train_batch_size = state.train_batch_size if model_reloaded: if self.place_model_on_device: self._move_model_to_device(self.model, args.device) self.model_wrapped = self.model inner_training_loop = find_executable_batch_size(self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size) if args.push_to_hub: try: hf_hub_utils.disable_progress_bars() return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval) finally: hf_hub_utils.enable_progress_bars() else: return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval) def get_tp_size(self) -> int: """Get the tensor parallel size from either the model or DeepSpeed config.""" if (model_tp := getattr(self.model, '_tp_size', None)) is not None: return model_tp if self.is_deepspeed_enabled and (deepspeed_config := getattr(self.args, 'hf_deepspeed_config', None)): return deepspeed_config.config.get('tensor_parallel', {}).get('autotp_size', 1) return 1 def get_total_train_batch_size(self, args) -> int: """Calculates total batch size (micro_batch * grad_accum * dp_world_size). Note: Only considers DP and TP (dp_world_size = world_size // tp_size).""" dp_world_size = args.world_size // self.get_tp_size() return self._train_batch_size * args.gradient_accumulation_steps * dp_world_size def _inner_training_loop(self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None): self.accelerator.free_memory() self._train_batch_size = batch_size if self.args.auto_find_batch_size: if self.state.train_batch_size != self._train_batch_size: from accelerate.utils import release_memory self.model_wrapped, = release_memory(self.model_wrapped) self.model_wrapped = self.model if self.is_deepspeed_enabled: original_bs = self.args.per_device_train_batch_size self.args.per_device_train_batch_size = self._train_batch_size // max(1, self.args.n_gpu) self.propagate_args_to_deepspeed(True) self.args.per_device_train_batch_size = original_bs self.state.train_batch_size = self._train_batch_size logger.debug(f'Currently training with a batch size of: {self._train_batch_size}') train_dataloader = self.get_train_dataloader() if self.is_fsdp_xla_v2_enabled: train_dataloader = tpu_spmd_dataloader(train_dataloader) total_train_batch_size = self.get_total_train_batch_size(args) num_train_epochs, num_update_steps_per_epoch, num_examples, num_train_samples, epoch_based, len_dataloader, max_steps = self.set_initial_training_values(args, train_dataloader, total_train_batch_size) num_train_tokens = None if self.args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader, None if epoch_based else max_steps) if len_dataloader is not None and epoch_based: num_train_tokens *= args.num_train_epochs else: num_train_tokens *= args.gradient_accumulation_steps if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: raise ValueError('Currently --debug underflow_overflow is not supported under DP. Please use DDP (torchrun or torch.distributed.launch (deprecated)).') else: debug_overflow = DebugUnderflowOverflow(self.model) delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled is_fsdp2 = self.is_fsdp_enabled and getattr(self.accelerator.state.fsdp_plugin, 'fsdp_version', 1) == 2 if is_fsdp2: delay_optimizer_creation = False if self._created_lr_scheduler: self.lr_scheduler = None self._created_lr_scheduler = False if self.is_deepspeed_enabled: self.optimizer, self.lr_scheduler = deepspeed_init(self, num_training_steps=max_steps) if not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState(stateful_callbacks=[cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)]) self.state.is_hyper_param_search = trial is not None self.state.train_batch_size = self._train_batch_size self.state.compute_steps(args, max_steps) if args.gradient_checkpointing: self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=args.gradient_checkpointing_kwargs) model = self._wrap_model(self.model_wrapped) use_accelerator_prepare = model is self.model if use_accelerator_prepare and self.is_fsdp_enabled: self.model = unwrap_model(self.model, recursive=True) if delay_optimizer_creation: if use_accelerator_prepare: self._fsdp_qlora_plugin_updates() if self.accelerator.mixed_precision != 'fp8': self.model = self.accelerator.prepare(self.model) self.create_optimizer_and_scheduler(num_training_steps=max_steps) if use_accelerator_prepare: self.model.train() if hasattr(self.lr_scheduler, 'step'): if self.use_apex: model = self.accelerator.prepare(self.model) elif self.is_tp_enabled: self.optimizer = self.accelerator.prepare(self.optimizer) else: model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) else: model, self.optimizer, self.lr_scheduler = self.accelerator.prepare(self.model, self.optimizer, self.lr_scheduler) else: self.optimizer = self.accelerator.prepare(self.optimizer) if self.is_fsdp_enabled: self.model = self.model_wrapped = model if model is not self.model: self.model_wrapped = model if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped if resume_from_checkpoint is not None: if self.is_deepspeed_enabled: deepspeed_load_checkpoint(self.model_wrapped, resume_from_checkpoint, load_module_strict=not _is_peft_model(self.model)) elif is_sagemaker_mp_enabled() or self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint, self.model_wrapped) self._load_optimizer_and_scheduler(resume_from_checkpoint) self._load_scaler(resume_from_checkpoint) logger.info('***** Running training *****') logger.info(f' Num examples = {num_examples:,}') logger.info(f' Num Epochs = {num_train_epochs:,}') logger.info(f' Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}') if self.args.per_device_train_batch_size != self._train_batch_size: logger.info(f' Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}') logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}') logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}') logger.info(f' Total optimization steps = {max_steps:,}') logger.info(f' Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}') self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)): self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) self.compare_trainer_and_checkpoint_args(self.args, self.state) self._load_callback_state() epochs_trained = int(self.state.global_step // num_update_steps_per_epoch) if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % num_update_steps_per_epoch steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info(' Continuing training from checkpoint, will skip to saved global_step') logger.info(f' Continuing training from epoch {epochs_trained}') logger.info(f' Continuing training from global step {self.state.global_step}') if not args.ignore_data_skip: logger.info(f' Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} batches in the first epoch.') for attr in ('model', 'optimizer', 'lr_scheduler'): setattr(self.callback_handler, attr, getattr(self, attr)) self.callback_handler.train_dataloader = train_dataloader self.state.init_training_references(self, max_steps, num_train_epochs, trial) tr_loss = torch.tensor(0.0, device=args.device) self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() grad_norm: Optional[float] = None learning_rate = None self.control = self.callback_handler.on_train_begin(args, self.state, self.control) if args.eval_on_start: self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True) for epoch in range(epochs_trained, num_train_epochs): epoch_dataloader = train_dataloader if hasattr(epoch_dataloader, 'set_epoch'): epoch_dataloader.set_epoch(epoch) if args.past_index >= 0: self._past = None steps_in_epoch = len(epoch_dataloader) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) step = -1 rng_to_sync = False if epoch == epochs_trained and resume_from_checkpoint is not None: if steps_trained_in_current_epoch > 0 and (not args.ignore_data_skip): epoch_dataloader = skip_first_batches(epoch_dataloader, steps_trained_in_current_epoch) step = steps_trained_in_current_epoch - 1 rng_to_sync = True elif steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) epoch_iterator = iter(epoch_dataloader) remainder = steps_in_epoch % args.gradient_accumulation_steps if remainder == 0: remainder = args.gradient_accumulation_steps update_step = -1 total_updates = steps_in_epoch // args.gradient_accumulation_steps + int(remainder < args.gradient_accumulation_steps) for _ in range(total_updates): update_step += 1 num_batches = args.gradient_accumulation_steps if update_step != total_updates - 1 else remainder batch_samples, num_items_in_batch = self.get_batch_samples(epoch_iterator, num_batches, args.device) self.current_gradient_accumulation_steps = len(batch_samples) for i, inputs in enumerate(batch_samples): step += 1 do_sync_step = (step + 1) % args.gradient_accumulation_steps == 0 or step + 1 == steps_in_epoch self.accelerator.gradient_state._set_sync_gradients(do_sync_step) if self.args.include_num_input_tokens_seen not in ['no', False]: main_input_name = getattr(self.model, 'main_input_name', 'input_ids') if main_input_name not in inputs: logger.warning('Tried to track the number of tokens seen, however the current model is not configured properly to know what item is the input. To fix this, add a `main_input_name` attribute to the model class you are using.') else: if self.args.include_num_input_tokens_seen == 'non_padding': if 'attention_mask' in inputs: input_tokens = inputs['attention_mask'].sum() elif self.processing_class is not None and hasattr(self.processing_class, 'pad_token_id') and (self.processing_class.pad_token_id is not None): input_tokens = (inputs[main_input_name] != self.processing_class.pad_token_id).sum() else: logger.warning('Could not determine method to count non-padding tokens, falling back to counting all tokens.') input_tokens = inputs[main_input_name].numel() else: input_tokens = inputs[main_input_name].numel() input_tokens = torch.tensor(input_tokens, device=self.args.device, dtype=torch.int64) self.state.num_input_tokens_seen += self.accelerator.gather(input_tokens).sum().item() if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) context = functools.partial(self.accelerator.no_sync, model=model) if i != len(batch_samples) - 1 and self.accelerator.distributed_type != DistributedType.DEEPSPEED else contextlib.nullcontext with context(): tr_loss_step = self.training_step(model, inputs, num_items_in_batch) if args.logging_nan_inf_filter and (not is_torch_xla_available()) and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)): tr_loss = tr_loss + tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) else: if tr_loss.device != tr_loss_step.device: raise ValueError(f'Calculated loss must be on the original device: {tr_loss.device} but device in use is {tr_loss_step.device}') tr_loss = tr_loss + tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) if do_sync_step: self.accelerator.gradient_state._set_sync_gradients(True) if args.max_grad_norm is not None and args.max_grad_norm > 0: if is_sagemaker_mp_enabled() and args.fp16: _grad_norm = self.optimizer.clip_master_grads(args.max_grad_norm) elif self.use_apex: from apex import amp _grad_norm = nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), args.max_grad_norm) else: grad_norm_context = contextlib.nullcontext if self.is_tp_enabled: from torch.distributed._tensor.experimental import implicit_replication grad_norm_context = implicit_replication with grad_norm_context(): _grad_norm = self.accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm) if is_accelerate_available() and self.accelerator.distributed_type == DistributedType.DEEPSPEED: grad_norm = model.get_global_grad_norm() if hasattr(grad_norm, 'item'): grad_norm = grad_norm.item() else: grad_norm = _grad_norm self.control = self.callback_handler.on_pre_optimizer_step(args, self.state, self.control) context = contextlib.nullcontext if self.is_tp_enabled: from torch.distributed._tensor.experimental import implicit_replication context = implicit_replication with context(): self.optimizer.step() self.control = self.callback_handler.on_optimizer_step(args, self.state, self.control) learning_rate = self._get_learning_rate() if not self.accelerator.optimizer_step_was_skipped: if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=learning_rate) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break if step < 0: logger.warning(f"There seems not to be a single sample in your epoch_iterator, stopping training at step {self.state.global_step}! This is expected if you're using an IterableDataset and set num_steps ({max_steps}) higher than the number of available samples.") self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=learning_rate) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_xla_available(): xm.master_print(met.metrics_report()) else: logger.warning("You enabled PyTorch/XLA debug metrics but you don't have a TPU configured. Check your training configuration if this is unexpected.") if self.control.should_training_stop: break if args.past_index and hasattr(self, '_past'): delattr(self, '_past') logger.info('\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n') if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: if is_torch_xla_available(): xm.rendezvous('load_best_model_at_end') elif args.parallel_mode == ParallelMode.DISTRIBUTED: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() self._total_loss_scalar += tr_loss.item() effective_global_step = max(self.state.global_step, 0.001) train_loss = self._total_loss_scalar / effective_global_step metrics = speed_metrics('train', start_time, num_samples=num_train_samples, num_steps=self.state.max_steps, num_tokens=num_train_tokens) self.store_flos() metrics['total_flos'] = self.state.total_flos metrics['train_loss'] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) if self.args.should_save and self.state.best_model_checkpoint is not None and (self.args.save_total_limit == 1): for checkpoint in checkpoints_sorted: if not os.path.samefile(checkpoint, self.state.best_model_checkpoint): logger.info(f'Deleting older checkpoint [{checkpoint}] due to args.save_total_limit') shutil.rmtree(checkpoint, ignore_errors=True) self.control = self.callback_handler.on_train_end(args, self.state, self.control) self._finish_current_push() if self.neftune_noise_alpha is not None: self._deactivate_neftune(self.model) return TrainOutput(self.state.global_step, train_loss, metrics) def _get_output_dir(self, trial): if self.hp_search_backend is not None and trial is not None: if self.hp_search_backend == HPSearchBackend.OPTUNA: run_id = trial.number elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train run_id = ray.train.get_context().get_trial_id() elif self.hp_search_backend == HPSearchBackend.SIGOPT: run_id = trial.id elif self.hp_search_backend == HPSearchBackend.WANDB: import wandb run_id = wandb.run.id run_name = self.hp_name(trial) if self.hp_name is not None else f'run-{run_id}' run_dir = os.path.join(self.args.output_dir, run_name) else: run_dir = self.args.output_dir return run_dir def _load_from_checkpoint(self, resume_from_checkpoint, model=None): if model is None: model = self.model config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME) adapter_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_WEIGHTS_NAME) adapter_safe_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME) weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME) safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME) is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and (any((FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)))) or os.path.isfile(os.path.join(resume_from_checkpoint, f'{FSDP_MODEL_NAME}.bin'))) adapter_subdirs = [folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) and (os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_WEIGHTS_NAME)) or os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_SAFE_WEIGHTS_NAME)))] if os.path.isdir(resume_from_checkpoint) else [] if is_fsdp_ckpt and (not self.is_fsdp_enabled): raise ValueError(f'Checkpoint found at {resume_from_checkpoint} is only supported when using PyTorch FSDP') if not (any((os.path.isfile(f) for f in [weights_file, safe_weights_file, weights_index_file, safe_weights_index_file, adapter_weights_file, adapter_safe_weights_file])) or is_fsdp_ckpt or adapter_subdirs): raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") logger.info(f'Loading model from {resume_from_checkpoint}.') if os.path.isfile(config_file): config = PretrainedConfig.from_json_file(config_file) checkpoint_version = config.transformers_version if checkpoint_version is not None and checkpoint_version != __version__: logger.warning(f'You are resuming training from a checkpoint trained with {checkpoint_version} of Transformers but your current version is {__version__}. This is not recommended and could yield to errors or unwanted behaviors.') if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file) or is_fsdp_ckpt: if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(resume_from_checkpoint, 'user_content.pt')): smp.resume_from_checkpoint(path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False) else: if hasattr(self.args, 'fp16') and self.args.fp16 is True: logger.warning('Enabling FP16 and loading from smp < 1.10 checkpoint together is not supported.') check_torch_load_is_safe() state_dict = torch.load(weights_file, map_location='cpu', weights_only=True) state_dict['_smp_is_partial'] = False load_result = model.load_state_dict(state_dict, strict=True) del state_dict elif self.is_fsdp_enabled: load_fsdp_model(self.accelerator.state.fsdp_plugin, self.accelerator, model, resume_from_checkpoint, **_get_fsdp_ckpt_kwargs()) else: if self.args.save_safetensors and os.path.isfile(safe_weights_file): state_dict = safetensors.torch.load_file(safe_weights_file, device='cpu') else: check_torch_load_is_safe() state_dict = torch.load(weights_file, map_location='cpu', weights_only=True) load_result = model.load_state_dict(state_dict, False) del state_dict self._issue_warnings_after_load(load_result) elif _is_peft_model(model): if (hasattr(model, 'active_adapter') or hasattr(model, 'active_adapters')) and hasattr(model, 'load_adapter'): if os.path.exists(resume_from_checkpoint): if hasattr(model, 'active_adapters'): active_adapters = model.active_adapters if len(active_adapters) > 1: logger.warning('Multiple active adapters detected will only consider the first adapter') active_adapter = active_adapters[0] else: active_adapter = model.active_adapter if adapter_subdirs: for subdir_name in adapter_subdirs: peft_id = os.path.join(resume_from_checkpoint, subdir_name) model.load_adapter(peft_id, subdir_name, is_trainable=subdir_name == active_adapter) model.set_adapter(active_adapter) else: model.load_adapter(resume_from_checkpoint, active_adapter, is_trainable=True) else: logger.warning(f'The intermediate checkpoints of PEFT may not be saved correctly, consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. Check some examples here: https://github.com/huggingface/peft/issues/96') else: logger.warning('Could not load adapter model, make sure to have `peft>=0.3.0` installed') else: load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) def _load_best_model(self): logger.info(f'Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).') best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME) best_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_WEIGHTS_NAME) best_safe_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.is_deepspeed_enabled: deepspeed_load_checkpoint(self.model_wrapped, self.state.best_model_checkpoint, load_module_strict=not _is_peft_model(self.model)) elif self.is_fsdp_enabled: load_result = load_fsdp_model(self.accelerator.state.fsdp_plugin, self.accelerator, model, self.state.best_model_checkpoint, **_get_fsdp_ckpt_kwargs()) elif os.path.exists(best_model_path) or os.path.exists(best_safe_model_path) or os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path): has_been_loaded = True if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(self.state.best_model_checkpoint, 'user_content.pt')): smp.resume_from_checkpoint(path=self.state.best_model_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False) else: if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device='cpu') else: check_torch_load_is_safe() state_dict = torch.load(best_model_path, map_location='cpu', weights_only=True) state_dict['_smp_is_partial'] = False load_result = model.load_state_dict(state_dict, strict=True) else: if _is_peft_model(model): if (hasattr(model, 'active_adapter') or hasattr(model, 'active_adapters')) and hasattr(model, 'load_adapter'): if hasattr(model, 'active_adapters'): active_adapter = model.active_adapters[0] if len(model.active_adapters) > 1: logger.warning('Detected multiple active adapters, will only consider the first one') else: active_adapter = model.active_adapter if os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path): try: model.load_adapter(self.state.best_model_checkpoint, active_adapter) except RuntimeError as exc: if model.peft_config[active_adapter].is_prompt_learning: msg = f'When using prompt learning PEFT methods such as {model.peft_config[active_adapter].peft_type.value}, setting load_best_model_at_end=True can lead to errors, it is recommended to set this to False and to load the model manually from the checkpoint directory using PeftModel.from_pretrained(base_model, <path>) after training has finished.' raise RuntimeError(msg) from exc else: raise from torch.nn.modules.module import _IncompatibleKeys load_result = _IncompatibleKeys([], []) else: logger.warning(f'The intermediate checkpoints of PEFT may not be saved correctly, consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. Check some examples here: https://github.com/huggingface/peft/issues/96') has_been_loaded = False else: logger.warning('Could not load adapter model, make sure to have `peft>=0.3.0` installed') has_been_loaded = False else: if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device='cpu') else: check_torch_load_is_safe() state_dict = torch.load(best_model_path, map_location='cpu', weights_only=True) load_result = model.load_state_dict(state_dict, False) if not is_sagemaker_mp_enabled() and has_been_loaded: self._issue_warnings_after_load(load_result) elif os.path.exists(os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_INDEX_NAME)) or os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): load_result = load_sharded_checkpoint(model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled()) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) else: logger.warning(f'Could not locate the best model at {best_model_path}, if you are running a distributed training on multiple nodes, you should activate `--save_on_each_node`.') def _issue_warnings_after_load(self, load_result): if len(load_result.missing_keys) != 0: if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save): self.model.tie_weights() else: logger.warning(f'There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.') if len(load_result.unexpected_keys) != 0: logger.warning(f'There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.') def _evaluate(self, trial, ignore_keys_for_eval, skip_scheduler=False): metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) self._report_to_hp_search(trial, self.state.global_step, metrics) if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) and (not skip_scheduler): metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith('eval_'): metric_to_check = f'eval_{metric_to_check}' try: self.lr_scheduler.step(metrics[metric_to_check]) except KeyError as exc: raise KeyError(f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. The available evaluation metrics are: {list(metrics.keys())}. Please ensure that the `compute_metrics` function returns a dictionary that includes '{metric_to_check}' or consider changing the `metric_for_best_model` via the TrainingArguments.") from exc return metrics def _maybe_log_save_evaluate(self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=None): if self.control.should_log and self.state.global_step > self._globalstep_last_logged: if is_torch_xla_available(): xm.mark_step() logs: dict[str, float] = {} tr_loss_scalar = self._nested_gather(tr_loss).mean().item() tr_loss -= tr_loss logs['loss'] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) if grad_norm is not None: logs['grad_norm'] = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm if learning_rate is not None: logs['learning_rate'] = learning_rate else: logs['learning_rate'] = self._get_learning_rate() self._total_loss_scalar += tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.store_flos() self.log(logs, start_time) metrics = None if self.control.should_evaluate: metrics = self._evaluate(trial, ignore_keys_for_eval) is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial) if self.args.save_strategy == SaveStrategy.BEST: self.control.should_save = is_new_best_metric if self.control.should_save: self._save_checkpoint(model, trial) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def _load_rng_state(self, checkpoint): if checkpoint is None: return if self.args.world_size > 1: process_index = self.args.process_index rng_file = os.path.join(checkpoint, f'rng_state_{process_index}.pth') if not os.path.isfile(rng_file): logger.info(f"Didn't find an RNG file for process {process_index}, if you are resuming a training that wasn't launched in a distributed fashion, reproducibility is not guaranteed.") return else: rng_file = os.path.join(checkpoint, 'rng_state.pth') if not os.path.isfile(rng_file): logger.info("Didn't find an RNG file, if you are resuming a training that was launched in a distributed fashion, reproducibility is not guaranteed.") return with safe_globals(): checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state['python']) np.random.set_state(checkpoint_rng_state['numpy']) torch.random.set_rng_state(checkpoint_rng_state['cpu']) if is_torch_xla_available(): xm.set_rng_state(checkpoint_rng_state['xla']) is_distributed = self.args.parallel_mode == ParallelMode.DISTRIBUTED if torch.cuda.is_available(): set_rng_state_for_device('CUDA', torch.cuda, checkpoint_rng_state, is_distributed) if is_torch_npu_available(): set_rng_state_for_device('NPU', torch.npu, checkpoint_rng_state, is_distributed) if is_torch_hpu_available(): set_rng_state_for_device('HPU', torch.hpu, checkpoint_rng_state, is_distributed) if is_torch_mlu_available(): set_rng_state_for_device('MLU', torch.mlu, checkpoint_rng_state, is_distributed) if is_torch_musa_available(): set_rng_state_for_device('MUSA', torch.musa, checkpoint_rng_state, is_distributed) def _determine_best_metric(self, metrics, trial): """ Determine if the model should be saved based on the evaluation metrics. Returns: bool: True if a new best metric was found, else False """ is_new_best_metric = False if self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith('eval_'): metric_to_check = f'eval_{metric_to_check}' try: metric_value = metrics[metric_to_check] except KeyError as exc: raise KeyError(f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments.") from exc operator = np.greater if self.args.greater_is_better else np.less if self.state.best_metric is None: self.state.best_metric = float('-inf') if self.args.greater_is_better else float('inf') if operator(metric_value, self.state.best_metric): self.state.best_metric = metric_value if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH]: self.state.best_global_step = self.state.global_step is_new_best_metric = True return is_new_best_metric def _save_checkpoint(self, model, trial): checkpoint_folder = f'{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}' if self.hp_search_backend is None and trial is None: self.store_flos() run_dir = self._get_output_dir(trial=trial) output_dir = os.path.join(run_dir, checkpoint_folder) self.save_model(output_dir, _internal_call=True) if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH] and self.state.best_global_step: best_checkpoint_folder = f'{PREFIX_CHECKPOINT_DIR}-{self.state.best_global_step}' best_checkpoint_dir = os.path.join(run_dir, best_checkpoint_folder) if os.path.exists(best_checkpoint_dir): self.state.best_model_checkpoint = best_checkpoint_dir if not self.args.save_only_model: self._save_optimizer_and_scheduler(output_dir) self._save_scaler(output_dir) self._save_rng_state(output_dir) if self.args.should_save: for cb in [cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)]: cb_name = cb.__class__.__name__ cb_state = cb.state() if isinstance(self.state.stateful_callbacks[cb_name], list): self.state.stateful_callbacks[cb_name].append(cb_state) else: self.state.stateful_callbacks[cb_name] = cb_state self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) if self.args.push_to_hub: self._push_from_checkpoint(output_dir) if self.args.should_save: self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) def _save_rng_state(self, output_dir): rng_states = {'python': random.getstate(), 'numpy': np.random.get_state(), 'cpu': torch.random.get_rng_state()} if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states['cuda'] = torch.cuda.random.get_rng_state_all() else: rng_states['cuda'] = torch.cuda.random.get_rng_state() if is_torch_xla_available(): rng_states['xla'] = xm.get_rng_state() if is_torch_npu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states['npu'] = torch.npu.random.get_rng_state_all() else: rng_states['npu'] = torch.npu.random.get_rng_state() if is_torch_hpu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states['hpu'] = torch.hpu.random.get_rng_state_all() else: rng_states['hpu'] = torch.hpu.random.get_rng_state() if is_torch_mlu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states['mlu'] = torch.mlu.random.get_rng_state_all() else: rng_states['mlu'] = torch.mlu.random.get_rng_state() if is_torch_musa_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states['musa'] = torch.musa.get_rng_state_all() else: rng_states['musa'] = torch.musa.get_rng_state() os.makedirs(output_dir, exist_ok=True) if self.args.world_size <= 1: torch.save(rng_states, os.path.join(output_dir, 'rng_state.pth')) else: torch.save(rng_states, os.path.join(output_dir, f'rng_state_{self.args.process_index}.pth')) def _save_optimizer_and_scheduler(self, output_dir): if is_torch_xla_available(): xm.rendezvous('saving_optimizer_states') if self.is_fsdp_xla_v1_enabled: optm = {'optimizer': self.optimizer.state_dict(), 'shard_metadata': self.model.get_shard_metadata()} xm.save(optm, os.path.join(output_dir, f'rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}'), master_only=False) else: xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) elif is_sagemaker_mp_enabled(): opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) smp.barrier() if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: smp.save(opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME), partial=True, v3=smp.state.cfg.shard_optimizer_state) elif self.is_deepspeed_enabled: accept_exclude_frozen_parameters = 'exclude_frozen_parameters' in set(inspect.signature(self.model_wrapped.save_checkpoint).parameters.keys()) if accept_exclude_frozen_parameters and _is_peft_model(self.model): self.model_wrapped.save_checkpoint(output_dir, exclude_frozen_parameters=True) else: self.model_wrapped.save_checkpoint(output_dir) elif self.is_fsdp_enabled: save_fsdp_model(self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir, **_get_fsdp_ckpt_kwargs()) save_fsdp_optimizer(self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir) elif self.args.should_save: torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and (not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper)) if self.args.should_save and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) and (not is_torch_xla_available()): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) def _load_optimizer_and_scheduler(self, checkpoint): """If optimizer and scheduler states exist, load them.""" if checkpoint is None: return if self.is_deepspeed_enabled: if not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper): with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME), weights_only=True)) reissue_pt_warnings(caught_warnings) return checkpoint_file_exists = glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + '_*') if is_sagemaker_mp_enabled() else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) or os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME_BIN)) or (os.path.isdir(checkpoint) and any((OPTIMIZER_NAME_BIN.split('.')[0] in folder_name for folder_name in os.listdir(checkpoint) if os.path.isdir(os.path.join(checkpoint, folder_name))))) checkpoint_file_exists = glob.glob(os.path.join(checkpoint, f'rank*-of-{self.args.world_size}-{OPTIMIZER_NAME}')) if self.is_fsdp_xla_v1_enabled else checkpoint_file_exists if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): if is_torch_xla_available(): if self.is_fsdp_xla_v1_enabled: check_torch_load_is_safe() optimizer_state = torch.load(os.path.join(checkpoint, f'rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}'), map_location='cpu', weights_only=True) optimizer_state = optimizer_state['optimizer'] else: check_torch_load_is_safe() optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location='cpu', weights_only=True) with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location='cpu', weights_only=True) reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(optimizer_state, self.args.device) xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) self.optimizer.load_state_dict(optimizer_state) self.lr_scheduler.load_state_dict(lr_scheduler_state) else: if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(checkpoint, 'user_content.pt')): def opt_load_hook(mod, opt): opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) else: def opt_load_hook(mod, opt): if IS_SAGEMAKER_MP_POST_1_10: opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True)) else: opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) self.model_wrapped.register_post_step_hook(opt_load_hook) else: map_location = self.args.device if self.args.world_size > 1 else 'cpu' if self.is_fsdp_enabled: load_fsdp_optimizer(self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, checkpoint, **_get_fsdp_ckpt_kwargs()) else: check_torch_load_is_safe() self.optimizer.load_state_dict(torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location, weights_only=True)) with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME), weights_only=True)) reissue_pt_warnings(caught_warnings) def _save_scaler(self, output_dir): try: scaler = self.accelerator.scaler except AttributeError: return if scaler is None: return if is_torch_xla_available(): xm.rendezvous('saving_scaler_state') with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.accelerator.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) reissue_pt_warnings(caught_warnings) if self.args.should_save and (not is_torch_xla_available()): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.accelerator.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) reissue_pt_warnings(caught_warnings) def _load_scaler(self, checkpoint): """If scaler state exists, load it.""" if checkpoint is None: return checkpoint_file_exists = os.path.isfile(os.path.join(checkpoint, SCALER_NAME)) if checkpoint_file_exists: if is_torch_xla_available(): with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() scaler_state = torch.load(os.path.join(checkpoint, SCALER_NAME), map_location='cpu', weights_only=True) reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(scaler_state, self.args.device) self.accelerator.scaler.load_state_dict(scaler_state) else: with warnings.catch_warnings(record=True) as caught_warnings: check_torch_load_is_safe() self.accelerator.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME), weights_only=True)) reissue_pt_warnings(caught_warnings) def _load_callback_state(self): """If callback states exist and were passed in, restore their states if enabled""" if not self.args.restore_callback_states_from_checkpoint: return not_found = [] new_callbacks = [] original_callbacks = self.callback_handler.callbacks + [self.control] for stored_callback, data in self.state.stateful_callbacks.items(): if not isinstance(data, list): data = [data] if any((callback.__class__.__name__ == stored_callback for callback in original_callbacks)): duplicates = [callback for callback in original_callbacks if callback.__class__.__name__ == stored_callback] for callback, callback_data in zip(duplicates, data): args = callback_data.get('args', {}) attributes = callback_data.get('attributes', {}) new_callback = type(callback)(**args) for attribute, value in attributes.items(): setattr(new_callback, attribute, value) if isinstance(callback, TrainerControl): self.control = new_callback else: new_callbacks.append(new_callback) self.callback_handler.remove_callback(type(new_callback)) logger.info('Continuing training from checkpoint, restoring any callbacks that were passed in') else: not_found.append(stored_callback) if len(not_found) > 0: logger.warning(f"Checkpoint included callbacks not included in current configuration. Ignoring. ({', '.join(not_found)})") for callback in new_callbacks: self.callback_handler.add_callback(callback) def hyperparameter_search(self, hp_space: Optional[Callable[['optuna.Trial'], dict[str, float]]]=None, compute_objective: Optional[Callable[[dict[str, float]], float]]=None, n_trials: int=20, direction: Union[str, list[str]]='minimize', backend: Optional[Union['str', HPSearchBackend]]=None, hp_name: Optional[Callable[['optuna.Trial'], str]]=None, **kwargs) -> Union[BestRun, list[BestRun]]: """ Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise. <Tip warning={true}> To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom optimizer/scheduler. </Tip> Args: hp_space (`Callable[["optuna.Trial"], dict[str, float]]`, *optional*): A function that defines the hyperparameter search space. Will default to [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. compute_objective (`Callable[[dict[str, float]], float]`, *optional*): A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` method. Will default to [`~trainer_utils.default_compute_objective`]. n_trials (`int`, *optional*, defaults to 100): The number of trial runs to test. direction (`str` or `list[str]`, *optional*, defaults to `"minimize"`): If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. If it's multi objectives optimization, direction is `list[str]`, can be List of `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna. hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): A function that defines the trial/run name. Will default to None. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments for each backend: - `optuna`: parameters from [optuna.study.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) and also the parameters `timeout`, `n_jobs` and `gc_after_trial` from [optuna.study.Study.optimize](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) - `ray`: parameters from [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run). If `resources_per_trial` is not set in the `kwargs`, it defaults to 1 CPU core and 1 GPU (if available). If `progress_reporter` is not set in the `kwargs`, [ray.tune.CLIReporter](https://docs.ray.io/en/latest/tune/api/doc/ray.tune.CLIReporter.html) is used. - `sigopt`: the parameter `proxies` from [sigopt.Connection.set_proxies](https://docs.sigopt.com/support/faq#how-do-i-use-sigopt-with-a-proxy). Returns: [`trainer_utils.BestRun` or `list[trainer_utils.BestRun]`]: All the information about the best run or best runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray backend. """ if backend is None: backend = default_hp_search_backend() backend = HPSearchBackend(backend) backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]() backend_obj.ensure_available() self.hp_search_backend = backend if self.model_init is None: raise RuntimeError('To use hyperparameter search, you need to pass your model through a model_init function.') self.hp_space = backend_obj.default_hp_space if hp_space is None else hp_space self.hp_name = hp_name self.compute_objective = default_compute_objective if compute_objective is None else compute_objective best_run = backend_obj.run(self, n_trials, direction, **kwargs) self.hp_search_backend = None return best_run def log(self, logs: dict[str, float], start_time: Optional[float]=None) -> None: """ Log `logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (`dict[str, float]`): The values to log. start_time (`Optional[float]`): The start of training. """ if self.state.epoch is not None: logs['epoch'] = self.state.epoch if self.args.include_num_input_tokens_seen != 'no': logs['num_input_tokens_seen'] = self.state.num_input_tokens_seen if start_time is not None: logs.update(speed_metrics('train', start_time, num_tokens=self.state.num_input_tokens_seen)) output = {**logs, **{'step': self.state.global_step}} self.state.log_history.append(output) self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: """ Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. """ if isinstance(data, Mapping): return type(data)({k: self._prepare_input(v) for k, v in data.items()}) elif isinstance(data, (tuple, list)): return type(data)((self._prepare_input(v) for v in data)) elif isinstance(data, torch.Tensor): kwargs = {'device': self.args.device} if self.is_deepspeed_enabled and (torch.is_floating_point(data) or torch.is_complex(data)): kwargs.update({'dtype': self.accelerator.state.deepspeed_plugin.hf_ds_config.dtype()}) return data.to(**kwargs) return data def _prepare_inputs(self, inputs: dict[str, Union[torch.Tensor, Any]]) -> dict[str, Union[torch.Tensor, Any]]: """ Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ inputs = self._prepare_input(inputs) if len(inputs) == 0: raise ValueError(f"The batch received was empty, your model won't be able to train on it. Double-check that your training dataset contains keys expected by the model: {','.join(self._signature_columns)}.") if self.args.past_index >= 0 and self._past is not None: inputs['mems'] = self._past return inputs def _is_attention_mask_causal(self, attention_mask): """ Check if an attention mask is causal (compatible with causal attention). Context parallelism only supports causal attention patterns. This function checks if the provided attention mask is compatible. Args: attention_mask (torch.Tensor): The attention mask to check Returns: bool: True if the mask is causal or compatible with causal attention """ if attention_mask is None: return True if attention_mask.dim() == 2: return True elif attention_mask.dim() in [3, 4]: seq_len = attention_mask.shape[-1] if seq_len <= 1: return True if attention_mask.dim() == 4: mask = attention_mask[0, 0] else: mask = attention_mask[0] upper_triangular = torch.triu(mask, diagonal=1) is_causal = torch.all(upper_triangular <= 1e-06) or torch.all(upper_triangular < -10000.0) return is_causal.item() if isinstance(is_causal, torch.Tensor) else is_causal return False def _prepare_context_parallel_inputs(self, model, inputs: dict[str, Union[torch.Tensor, Any]]): """ Prepare inputs for context parallelism by setting up buffers and validation. Args: model: The model being trained inputs: Input tensors to prepare Returns: tuple: (context_manager, prepared_inputs) where context_manager is either the context parallelism wrapper or a no-op context """ if getattr(self.accelerator, 'parallelism_config', None) is not None and self.accelerator.parallelism_config.cp_enabled: if hasattr(model, 'config'): if model.config._attn_implementation != 'sdpa': raise ValueError(f'Context parallelism is supported only with SDPA attention, you are using {model.config._attn_implementation}.') if 'position_ids' not in inputs: logger.warning_once('Position IDs not found in the inputs, generating manually') inputs['position_ids'] = torch.arange(inputs['input_ids'].size(1), device=inputs['input_ids'].device).expand(inputs['input_ids'].size(0), -1) if 'shift_labels' not in inputs: logger.warning_once('Shift labels not found in the inputs, shifting manually') if 'labels' in inputs: _ignore_index = -100 labels = nn.functional.pad(inputs['labels'], (0, 1), value=_ignore_index) inputs['shift_labels'] = labels[:, 1:].contiguous() buffers = [] buffer_seq_dims = [] if 'input_ids' in inputs: buffers.append(inputs['input_ids']) buffer_seq_dims.append(1) if 'labels' in inputs: buffers.append(inputs['labels']) buffer_seq_dims.append(1) if 'shift_labels' in inputs: buffers.append(inputs['shift_labels']) buffer_seq_dims.append(1) if 'attention_mask' in inputs: if not getattr(self, '_attn_mask_causal_checked', False): attention_mask = inputs['attention_mask'] if not self._is_attention_mask_causal(attention_mask): raise ValueError("Context parallelism only supports causal attention masks. The provided attention_mask is not causal. Please ensure your data uses causal masking (lower triangular) or remove the attention_mask to use the model's default causal masking.") self._attn_mask_causal_checked = True if self._attn_mask_causal_checked: attention_mask = inputs['attention_mask'] if attention_mask.dim() == 2: buffers.append(attention_mask) buffer_seq_dims.append(1) else: pass if 'position_ids' in inputs and inputs['position_ids'] is not None: buffers.append(inputs['position_ids']) buffer_seq_dims.append(1) return (partial(self.accelerator.maybe_context_parallel, buffers=buffers, buffer_seq_dims=buffer_seq_dims, no_restore_buffers=set(buffers)), inputs) return (contextlib.nullcontext, inputs) def compute_loss_context_manager(self): """ A helper wrapper to group together context managers. """ ctx_stack = contextlib.ExitStack() autocast_ctx = self.autocast_smart_context_manager() if not isinstance(autocast_ctx, contextlib.nullcontext): ctx_stack.enter_context(autocast_ctx) return ctx_stack def autocast_smart_context_manager(self, cache_enabled: Optional[bool]=True): """ A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired arguments, depending on the situation. """ if self.use_cpu_amp: ctx_manager = torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) else: ctx_manager = contextlib.nullcontext() return ctx_manager def training_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[torch.Tensor]=None) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to train. inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. Return: `torch.Tensor`: The tensor with training loss on this batch. """ cp_context, inputs = self._prepare_context_parallel_inputs(model, inputs) with cp_context(): model.train() if hasattr(self.optimizer, 'train') and callable(self.optimizer.train): self.optimizer.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch) del inputs if self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0: if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_mlu_available(): torch.mlu.empty_cache() elif is_torch_musa_available(): torch.musa.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() elif is_torch_mps_available(): torch.mps.empty_cache() elif is_torch_hpu_available(): logger.warning('`torch_empty_cache_steps` is set but HPU device/backend does not support empty_cache().') else: torch.cuda.empty_cache() kwargs = {} if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs['learning_rate'] = self._get_learning_rate() if self.args.n_gpu > 1: loss = loss.mean() if self.use_apex: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: if (not self.model_accepts_loss_kwargs or num_items_in_batch is None) and self.compute_loss_func is None: loss = loss / self.current_gradient_accumulation_steps if self.accelerator.distributed_type == DistributedType.DEEPSPEED: kwargs['scale_wrt_gas'] = False self.accelerator.backward(loss, **kwargs) return loss.detach() def compute_loss(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], return_outputs: bool=False, num_items_in_batch: Optional[torch.Tensor]=None): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Args: model (`nn.Module`): The model to compute the loss for. inputs (`dict[str, Union[torch.Tensor, Any]]`): The input data for the model. return_outputs (`bool`, *optional*, defaults to `False`): Whether to return the model outputs along with the loss. num_items_in_batch (Optional[torch.Tensor], *optional*): The number of items in the batch. If num_items_in_batch is not passed, Returns: The loss of the model along with its output if return_outputs was set to True Subclass and override for custom behavior. If you are not using `num_items_in_batch` when computing your loss, make sure to overwrite `self.model_accepts_loss_kwargs` to `False`. Otherwise, the loss calculating might be slightly inaccurate when performing gradient accumulation. """ if (self.label_smoother is not None or self.compute_loss_func is not None) and 'labels' in inputs: labels = inputs.pop('labels') else: labels = None if self.model_accepts_loss_kwargs: kwargs = {} if num_items_in_batch is not None: kwargs['num_items_in_batch'] = num_items_in_batch inputs = {**inputs, **kwargs} outputs = model(**inputs) if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: unwrapped_model = self.accelerator.unwrap_model(model) if _is_peft_model(unwrapped_model): model_name = unwrapped_model.base_model.model._get_name() else: model_name = unwrapped_model._get_name() if self.compute_loss_func is not None: loss = self.compute_loss_func(outputs, labels, num_items_in_batch=num_items_in_batch) elif model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and 'loss' not in outputs: raise ValueError(f"The model did not return a loss from the inputs, only the following keys: {','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}.") loss = outputs['loss'] if isinstance(outputs, dict) else outputs[0] if self.args.average_tokens_across_devices and (self.model_accepts_loss_kwargs or self.compute_loss_func) and (num_items_in_batch is not None): loss *= self.accelerator.num_processes if self.args.n_gpu <= 1 else self.args.n_gpu return (loss, outputs) if return_outputs else loss def is_local_process_zero(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. """ return self.args.local_process_index == 0 def is_world_process_zero(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). """ if is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.args.process_index == 0 def save_model(self, output_dir: Optional[str]=None, _internal_call: bool=False): """ Will save the model, so you can reload it using `from_pretrained()`. Will only save from the main process. """ if output_dir is None: output_dir = self.args.output_dir if is_torch_xla_available(): self._save_tpu(output_dir) elif is_sagemaker_mp_enabled(): os.makedirs(output_dir, exist_ok=True) state_dict = self.model_wrapped.state_dict() if self.args.should_save: self._save(output_dir, state_dict=state_dict) if IS_SAGEMAKER_MP_POST_1_10: Path(os.path.join(output_dir, 'user_content.pt')).touch() elif getattr(self.accelerator, 'parallelism_config', None) is not None: if self.accelerator.should_save_model: self._save(output_dir) elif (tp_size := getattr(self.model, '_tp_size', 0)) is not None and tp_size > 1: self._save(output_dir) elif self.is_fsdp_enabled: if 'FULL_STATE_DICT' in str(self.accelerator.state.fsdp_plugin.state_dict_type): state_dict = self.accelerator.get_state_dict(self.model) if self.args.should_save: self._save(output_dir, state_dict=state_dict) elif self.is_deepspeed_enabled: try: state_dict = self.accelerator.get_state_dict(self.deepspeed) if self.args.should_save: self._save(output_dir, state_dict=state_dict) except ValueError: logger.warning(' stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use zero_to_fp32.py to recover weights') if self.args.should_save: self._save(output_dir, state_dict={}) remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) self.model_wrapped.save_checkpoint(output_dir) elif self.args.should_save: self._save(output_dir) if self.args.push_to_hub and (not _internal_call): self.push_to_hub(commit_message='Model save', revision=self.args.hub_revision) def _save_tpu(self, output_dir: Optional[str]=None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info(f'Saving model checkpoint to {output_dir}') model = self.model xm.mark_step() if xm.is_master_ordinal(local=False): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) supported_classes = (PushToHubMixin,) xm.rendezvous('saving_checkpoint') if self.is_fsdp_xla_v1_enabled: ckpt = {'model': model.state_dict(), 'shard_metadata': model.get_shard_metadata()} ckpt_path = os.path.join(output_dir, f'rank{self.args.process_index}-of-{self.args.world_size}-{WEIGHTS_NAME}') xm.save(ckpt, ckpt_path, master_only=False) xm.rendezvous('save_full_checkpoints') if self.args.should_save: from torch_xla.distributed.fsdp import consolidate_sharded_model_checkpoints full_state_dict, _ = consolidate_sharded_model_checkpoints(ckpt_prefix=os.path.join(output_dir, ''), ckpt_suffix=f'rank*-of-*-{WEIGHTS_NAME}', save_model=False) model = model.module.module unwrapped_model = self.accelerator.unwrap_model(model) if isinstance(unwrapped_model, supported_classes): unwrapped_model.save_pretrained(output_dir, state_dict=full_state_dict, save_function=xm.save, safe_serialization=self.args.save_safetensors) else: logger.info('Trainer.model is not a `PreTrainedModel`, only saving its state dict.') xm.save(full_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) elif not isinstance(model, supported_classes): if isinstance(self.accelerator.unwrap_model(model), supported_classes): self.accelerator.unwrap_model(model).save_pretrained(output_dir, is_main_process=self.args.should_save, state_dict=xm._maybe_convert_to_cpu(model.state_dict()), save_function=xm.save, safe_serialization=self.args.save_safetensors) else: logger.info('Trainer.model is not a `PreTrainedModel`, only saving its state dict.') state_dict = xm._maybe_convert_to_cpu(model.state_dict()) xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save, safe_serialization=self.args.save_safetensors, state_dict=xm._maybe_convert_to_cpu(model.state_dict())) if self.processing_class is not None and self.args.should_save: self.processing_class.save_pretrained(output_dir) def _save(self, output_dir: Optional[str]=None, state_dict=None): output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f'Saving model checkpoint to {output_dir}') supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) if not isinstance(self.model, supported_classes): if state_dict is None: state_dict = self.model.state_dict() if isinstance(self.accelerator.unwrap_model(self.model, keep_torch_compile=False), supported_classes): self.accelerator.unwrap_model(self.model, keep_torch_compile=False).save_pretrained(output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors) else: logger.info('Trainer.model is not a `PreTrainedModel`, only saving its state dict.') if self.args.save_safetensors: safetensors.torch.save_file(state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME), metadata={'format': 'pt'}) else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained(output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors) if self.processing_class is not None: self.processing_class.save_pretrained(output_dir) elif self.data_collator is not None and hasattr(self.data_collator, 'tokenizer') and (self.data_collator.tokenizer is not None): logger.info('Saving Trainer.data_collator.tokenizer by default as Trainer.processing_class is `None`') self.data_collator.tokenizer.save_pretrained(output_dir) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def store_flos(self): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: self.state.total_flos += distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() self.current_flos = 0 else: self.state.total_flos += self.current_flos self.current_flos = 0 def _sorted_checkpoints(self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> list[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f'{checkpoint_prefix}-*') if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f'.*{checkpoint_prefix}-([0-9]+)', path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) if use_mtime and len(ordering_and_checkpoint_path) > 1: mtime_diff = checkpoints_sorted[-1][0] - checkpoints_sorted[0][0] if mtime_diff < 1.0: warnings.warn('mtime may not be reliable on this filesystem, falling back to numerical ordering') return self._sorted_checkpoints(use_mtime=False, output_dir=output_dir, checkpoint_prefix=checkpoint_prefix) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] if self.state.best_model_checkpoint is not None and str(Path(self.state.best_model_checkpoint)) in checkpoints_sorted: best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) for i in range(best_model_index, len(checkpoints_sorted) - 2): checkpoints_sorted[i], checkpoints_sorted[i + 1] = (checkpoints_sorted[i + 1], checkpoints_sorted[i]) return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if len(checkpoints_sorted) <= self.args.save_total_limit: return save_total_limit = self.args.save_total_limit if self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and (checkpoints_sorted[-1] != self.state.best_model_checkpoint): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info(f'Deleting older checkpoint [{checkpoint}] due to args.save_total_limit') shutil.rmtree(checkpoint, ignore_errors=True) def evaluate(self, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]]=None, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='eval') -> dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (Union[`Dataset`, dict[str, `Dataset`]), *optional*): Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the `__len__` method. <Tip> If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run separate evaluations on each dataset. This can be useful to monitor how training affects other datasets or simply to get a more fine-grained evaluation. When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets `data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the loss on `data1` and `metric_for_best_model="eval_data2_loss"` for the loss on `data2`. </Tip> ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is "eval" (default) Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ override = eval_dataset is not None eval_dataset = eval_dataset if override else self.eval_dataset if isinstance(eval_dataset, dict): metrics = {} for eval_dataset_name, _eval_dataset in eval_dataset.items(): dataset_metrics = self.evaluate(eval_dataset=_eval_dataset if override else eval_dataset_name, ignore_keys=ignore_keys, metric_key_prefix=f'{metric_key_prefix}_{eval_dataset_name}') metrics.update(dataset_metrics) return metrics self._memory_tracker.start() eval_dataloader = self.get_eval_dataloader(eval_dataset) if self.is_fsdp_xla_v2_enabled: eval_dataloader = tpu_spmd_dataloader(eval_dataloader) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop(eval_dataloader, description='Evaluation', prediction_loss_only=True if self.compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) total_batch_size = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] if f'{metric_key_prefix}_model_preparation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_model_preparation_time'] output.metrics.update(speed_metrics(metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size))) self.log(output.metrics) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return output.metrics def predict(self, test_dataset: Dataset, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='test') -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `evaluate()`. Args: test_dataset (`Dataset`): Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the `model.forward()` method are automatically removed. Has to implement the method `__len__` ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"test"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "test_bleu" if the prefix is "test" (default) <Tip> If your predictions or labels have different sequence length (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. </Tip> Returns: *NamedTuple* A namedtuple with the following keys: - predictions (`np.ndarray`): The predictions on `test_dataset`. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). - metrics (`dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained labels). """ self._memory_tracker.start() test_dataloader = self.get_test_dataloader(test_dataset) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop(test_dataloader, description='Prediction', ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) total_batch_size = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] if f'{metric_key_prefix}_model_preparation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_model_preparation_time'] output.metrics.update(speed_metrics(metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size))) self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) def evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='eval') -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: start_time = time.time() model = self.accelerator.prepare(model) if self.is_deepspeed_enabled or (self.is_fsdp_enabled and self.accelerator.mixed_precision != 'fp8' and (not self.args.torch_compile)) else self.accelerator.prepare_model(model, evaluation_mode=True) self.model_preparation_time = round(time.time() - start_time, 4) if self.is_fsdp_enabled: self.model = model if model is not self.model: self.model_wrapped = model if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = self.args.eval_batch_size logger.info(f'\n***** Running {description} *****') if has_length(dataloader): logger.info(f' Num examples = {self.num_examples(dataloader)}') else: logger.info(' Num examples: Unknown') logger.info(f' Batch size = {batch_size}') if hasattr(model, 'eval') and callable(model.eval): model.eval() if hasattr(self.optimizer, 'eval') and callable(self.optimizer.eval): self.optimizer.eval() self.callback_handler.eval_dataloader = dataloader eval_dataset = getattr(dataloader, 'dataset', None) if args.past_index >= 0: self._past = None all_losses = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_preds = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_labels = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) all_inputs = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100) metrics = None eval_set_kwargs = {} observed_num_examples = 0 for step, inputs in enumerate(dataloader): observed_batch_size = find_batch_size(inputs) if observed_batch_size is not None: observed_num_examples += observed_batch_size if batch_size is None: batch_size = observed_batch_size losses, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, 'main_input_name', 'input_ids') inputs_decode = self._prepare_input(inputs[main_input_name]) if 'inputs' in args.include_for_metrics else None if is_torch_xla_available(): xm.mark_step() if losses is not None: losses = self.gather_function(losses.repeat(batch_size)) all_losses.add(losses) if inputs_decode is not None: inputs_decode = self.accelerator.pad_across_processes(inputs_decode, dim=1, pad_index=-100) inputs_decode = self.gather_function(inputs_decode) if not self.args.batch_eval_metrics or description == 'Prediction': all_inputs.add(inputs_decode) if labels is not None: labels = self.accelerator.pad_across_processes(labels, dim=1, pad_index=-100) if logits is not None: logits = self.accelerator.pad_across_processes(logits, dim=1, pad_index=-100) if self.preprocess_logits_for_metrics is not None: logits = self.preprocess_logits_for_metrics(logits, labels) logits = self.gather_function(logits) if not self.args.batch_eval_metrics or description == 'Prediction': all_preds.add(logits) if labels is not None: labels = self.gather_function(labels) if not self.args.batch_eval_metrics or description == 'Prediction': all_labels.add(labels) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if self.args.batch_eval_metrics: if self.compute_metrics is not None and logits is not None and (labels is not None): is_last_step = self.accelerator.gradient_state.end_of_dataloader batch_kwargs = {} batch_kwargs['losses'] = losses if 'loss' in args.include_for_metrics else None batch_kwargs['inputs'] = inputs if 'inputs' in args.include_for_metrics else None metrics = self.compute_metrics(EvalPrediction(predictions=logits, label_ids=labels, **batch_kwargs), compute_result=is_last_step) del losses, logits, labels, inputs torch.cuda.empty_cache() elif args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: all_losses.to_cpu_and_numpy() all_preds.to_cpu_and_numpy() all_labels.to_cpu_and_numpy() all_inputs.to_cpu_and_numpy() del losses, logits, labels, inputs torch.cuda.empty_cache() self.gather_function = self.accelerator.gather_for_metrics if args.past_index and hasattr(self, '_past'): delattr(self, '_past') all_losses = all_losses.get_arrays() all_preds = all_preds.get_arrays() all_labels = all_labels.get_arrays() all_inputs = all_inputs.get_arrays() if has_length(eval_dataset): num_samples = len(eval_dataset) elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, 'num_examples', 0) > 0: num_samples = eval_dataset.num_examples elif has_length(dataloader): num_samples = self.num_examples(dataloader) else: num_samples = observed_num_examples if num_samples == 0 and observed_num_examples > 0: num_samples = observed_num_examples if self.compute_metrics is not None and all_preds is not None and (all_labels is not None) and (not self.args.batch_eval_metrics): eval_set_kwargs['losses'] = all_losses if 'loss' in args.include_for_metrics else None eval_set_kwargs['inputs'] = all_inputs if 'inputs' in args.include_for_metrics else None metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels, **eval_set_kwargs)) elif metrics is None: metrics = {} metrics = denumpify_detensorize(metrics) if isinstance(all_losses, list) and all_losses: metrics[f'{metric_key_prefix}_loss'] = np.concatenate(all_losses).mean().item() elif isinstance(all_losses, np.ndarray): metrics[f'{metric_key_prefix}_loss'] = all_losses.mean().item() if hasattr(self, 'jit_compilation_time'): metrics[f'{metric_key_prefix}_jit_compilation_time'] = self.jit_compilation_time if hasattr(self, 'model_preparation_time'): metrics[f'{metric_key_prefix}_model_preparation_time'] = self.model_preparation_time for key in list(metrics.keys()): if not key.startswith(f'{metric_key_prefix}_'): metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key) return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def _nested_gather(self, tensors, name=None): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_xla_available(): if name is None: name = 'nested_gather' tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif self.args.distributed_state is not None and self.args.distributed_state.distributed_type != 'NO' or (self.args.distributed_state is None and self.args.local_rank != -1): tensors = distributed_concat(tensors) return tensors def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. Return: tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ has_labels = False if len(self.label_names) == 0 else all((inputs.get(k) is not None for k in self.label_names)) return_loss = inputs.get('return_loss') if return_loss is None: return_loss = self.can_return_loss loss_without_labels = len(self.label_names) == 0 and return_loss inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, 'config'): ignore_keys = getattr(self.model.config, 'keys_to_ignore_at_inference', ['past_key_values']) else: ignore_keys = [] if has_labels or loss_without_labels: labels = nested_detach(tuple((inputs.get(name) for name in self.label_names))) if len(labels) == 1: labels = labels[0] else: labels = None with torch.no_grad(): if is_sagemaker_mp_enabled(): raw_outputs = smp_forward_only(model, inputs) if has_labels or loss_without_labels: if isinstance(raw_outputs, dict): loss_mb = raw_outputs['loss'] logits_mb = tuple((v for k, v in raw_outputs.items() if k not in ignore_keys + ['loss'])) else: loss_mb = raw_outputs[0] logits_mb = raw_outputs[1:] loss = loss_mb.reduce_mean().detach().cpu() logits = smp_nested_concat(logits_mb) else: loss = None if isinstance(raw_outputs, dict): logits_mb = tuple((v for k, v in raw_outputs.items() if k not in ignore_keys)) else: logits_mb = raw_outputs logits = smp_nested_concat(logits_mb) elif has_labels or loss_without_labels: with self.compute_loss_context_manager(): loss, outputs = self.compute_loss(model, inputs, return_outputs=True) loss = loss.detach().mean() if isinstance(outputs, dict): logits = tuple((v for k, v in outputs.items() if k not in ignore_keys + ['loss'])) else: logits = outputs[1:] else: loss = None with self.compute_loss_context_manager(): outputs = model(**inputs) if isinstance(outputs, dict): logits = tuple((v for k, v in outputs.items() if k not in ignore_keys)) else: logits = outputs if self.args.past_index >= 0: self._past = outputs[self.args.past_index - 1] if prediction_loss_only: return (loss, None, None) logits = nested_detach(logits) if len(logits) == 1: logits = logits[0] return (loss, logits, labels) def floating_point_ops(self, inputs: dict[str, Union[torch.Tensor, Any]]): """ For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method. Args: inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. Returns: `int`: The number of floating-point operations. """ if hasattr(self.model, 'floating_point_ops'): return self.model.floating_point_ops(inputs) else: return 0 def init_hf_repo(self, token: Optional[str]=None): """ Initializes a git repo in `self.args.hub_model_id`. """ if not self.is_world_process_zero(): return if self.args.hub_model_id is None: repo_name = Path(self.args.output_dir).absolute().name else: repo_name = self.args.hub_model_id token = token if token is not None else self.args.hub_token repo_url = create_repo(repo_name, token=token, private=self.args.hub_private_repo, exist_ok=True) self.hub_model_id = repo_url.repo_id self.push_in_progress = None def create_model_card(self, language: Optional[str]=None, license: Optional[str]=None, tags: Union[str, list[str], None]=None, model_name: Optional[str]=None, finetuned_from: Optional[str]=None, tasks: Union[str, list[str], None]=None, dataset_tags: Union[str, list[str], None]=None, dataset: Union[str, list[str], None]=None, dataset_args: Union[str, list[str], None]=None): """ Creates a draft of a model card using the information available to the `Trainer`. Args: language (`str`, *optional*): The language of the model (if applicable) license (`str`, *optional*): The license of the model. Will default to the license of the pretrained model used, if the original model given to the `Trainer` comes from a repo on the Hub. tags (`str` or `list[str]`, *optional*): Some tags to be included in the metadata of the model card. model_name (`str`, *optional*): The name of the model. finetuned_from (`str`, *optional*): The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the `Trainer` (if it comes from the Hub). tasks (`str` or `list[str]`, *optional*): One or several task identifiers, to be included in the metadata of the model card. dataset_tags (`str` or `list[str]`, *optional*): One or several dataset tags, to be included in the metadata of the model card. dataset (`str` or `list[str]`, *optional*): One or several dataset identifiers, to be included in the metadata of the model card. dataset_args (`str` or `list[str]`, *optional*): One or several dataset arguments, to be included in the metadata of the model card. """ if not self.is_world_process_zero(): return model_card_filepath = os.path.join(self.args.output_dir, 'README.md') is_peft_library = False if os.path.exists(model_card_filepath): library_name = ModelCard.load(model_card_filepath).data.get('library_name') is_peft_library = library_name == 'peft' existing_tags = ModelCard.load(model_card_filepath).data.tags if tags is not None and existing_tags is not None: if isinstance(tags, str): tags = [tags] for tag in existing_tags: if tag not in tags: tags.append(tag) training_summary = TrainingSummary.from_trainer(self, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args) model_card = training_summary.to_model_card() with open(model_card_filepath, 'w') as f: f.write(model_card) if is_peft_library: self.accelerator.unwrap_model(self.model).create_or_update_model_card(self.args.output_dir) def _push_from_checkpoint(self, checkpoint_folder): if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: return if not self.args.hub_always_push and self.push_in_progress is not None and (not self.push_in_progress.is_done()): return output_dir = self.args.output_dir modeling_files = [CONFIG_NAME, GENERATION_CONFIG_NAME, WEIGHTS_NAME, SAFE_WEIGHTS_NAME] for index_file in [WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME]: index_path = os.path.join(checkpoint_folder, index_file) if os.path.isfile(index_path): modeling_files.append(index_file) with open(index_path) as f: index = json.loads(f.read()) shard_files = list(set(index['weight_map'].values())) modeling_files.extend(shard_files) if is_peft_available(): modeling_files.extend([ADAPTER_CONFIG_NAME, ADAPTER_WEIGHTS_NAME, ADAPTER_SAFE_WEIGHTS_NAME]) for modeling_file in modeling_files: if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) if self.processing_class is not None: self.processing_class.save_pretrained(output_dir) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) if self.args.save_strategy == SaveStrategy.STEPS: commit_message = f'Training in progress, step {self.state.global_step}' else: commit_message = f'Training in progress, epoch {int(self.state.epoch)}' model_push_job = upload_folder(repo_id=self.hub_model_id, folder_path=output_dir, commit_message=commit_message, token=self.args.hub_token, run_as_future=True, ignore_patterns=['_*', f'{PREFIX_CHECKPOINT_DIR}-*'], revision=self.args.hub_revision) push_jobs = [model_push_job] if self.args.hub_strategy in [HubStrategy.CHECKPOINT, HubStrategy.ALL_CHECKPOINTS]: path_in_repo = 'last-checkpoint' if self.args.hub_strategy == HubStrategy.CHECKPOINT else Path(checkpoint_folder).name checkpoint_push = upload_folder(repo_id=self.hub_model_id, folder_path=checkpoint_folder, path_in_repo=path_in_repo, commit_message=commit_message + ', checkpoint', token=self.args.hub_token, run_as_future=True, revision=self.args.hub_revision) push_jobs.append(checkpoint_push) if self.push_in_progress is None or self.push_in_progress.is_done(): self.push_in_progress = PushInProgress(push_jobs) else: self.push_in_progress.jobs.extend(push_jobs) def _finish_current_push(self): if not hasattr(self, 'push_in_progress'): return if self.push_in_progress is not None and (not self.push_in_progress.is_done()): logger.info('Waiting for the current checkpoint push to be finished, this might take a couple of minutes.') self.push_in_progress.wait_until_done() def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, token: Optional[str]=None, revision: Optional[str]=None, **kwargs) -> str: """ Upload `self.model` and `self.processing_class` to the 🤗 model hub on the repo `self.args.hub_model_id`. Parameters: commit_message (`str`, *optional*, defaults to `"End of training"`): Message to commit while pushing. blocking (`bool`, *optional*, defaults to `True`): Whether the function should return only when the `git push` has finished. token (`str`, *optional*, defaults to `None`): Token with write permission to overwrite Trainer's original args. revision (`str`, *optional*): The git revision to commit from. Defaults to the head of the "main" branch. kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments passed along to [`~Trainer.create_model_card`]. Returns: The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the progress of the commit if `blocking=True`. """ model_name = kwargs.pop('model_name', None) if model_name is None and self.args.should_save: if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split('/')[-1] token = token if token is not None else self.args.hub_token if self.hub_model_id is None: self.init_hf_repo(token=token) self.save_model(_internal_call=True) if not self.is_world_process_zero(): return if getattr(self.model, 'model_tags', None) is not None: if 'tags' not in kwargs: kwargs['tags'] = [] if isinstance(kwargs['tags'], str): kwargs['tags'] = [kwargs['tags']] for model_tag in self.model.model_tags: if model_tag not in kwargs['tags']: kwargs['tags'].append(model_tag) self.create_model_card(model_name=model_name, **kwargs) if revision is None: revision = self.args.hub_revision self._finish_current_push() return upload_folder(repo_id=self.hub_model_id, folder_path=self.args.output_dir, commit_message=commit_message, token=token, run_as_future=not blocking, ignore_patterns=['_*', f'{PREFIX_CHECKPOINT_DIR}-*'], revision=revision) def prediction_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='eval') -> EvalLoopOutput: """ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ args = self.args if not has_length(dataloader): raise ValueError('dataloader must implement a working __len__') prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: model = self.accelerator.prepare(model) if self.is_deepspeed_enabled or self.is_fsdp_enabled else self.accelerator.prepare_model(model, evaluation_mode=True) if self.is_fsdp_enabled: self.model = model if model is not self.model: self.model_wrapped = model if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = dataloader.total_batch_size if getattr(dataloader, '_is_accelerate_prepared', False) else dataloader.batch_size if batch_size is None: raise ValueError('Batch size cannot be None. Ensure the dataloader has a valid batch_size or total_batch_size.') num_examples = self.num_examples(dataloader) logger.info(f'\n***** Running {description} *****') logger.info(f' Num examples = {num_examples}') logger.info(f' Batch size = {batch_size}') losses_host: Optional[torch.Tensor] = None preds_host: Union[torch.Tensor, list[torch.Tensor], None] = None labels_host: Union[torch.Tensor, list[torch.Tensor], None] = None inputs_host: Union[torch.Tensor, list[torch.Tensor], None] = None metrics: Optional[dict] = None eval_set_kwargs: dict = {} world_size = max(1, args.world_size) eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) if not prediction_loss_only: make_multiple_of = None if hasattr(dataloader, 'sampler') and isinstance(dataloader.sampler, SequentialDistributedSampler): make_multiple_of = dataloader.sampler.batch_size preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) model.eval() if hasattr(self.optimizer, 'eval') and callable(self.optimizer.eval): self.optimizer.eval() if args.past_index >= 0: self._past = None self.callback_handler.eval_dataloader = dataloader for step, inputs in enumerate(dataloader): loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, 'main_input_name', 'input_ids') inputs_decode = self._prepare_input(inputs[main_input_name]) if 'inputs' in args.include_for_metrics else None if loss is not None: losses = loss.repeat(batch_size) losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) if logits is not None: preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) if inputs_decode is not None: inputs_host = inputs_decode if inputs_host is None else nested_concat(inputs_host, inputs_decode, padding_index=-100) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if self.args.batch_eval_metrics: if self.compute_metrics is not None and preds_host is not None and (labels_host is not None): is_last_step = self.accelerator.gradient_state.end_of_dataloader batch_kwargs = {} batch_kwargs['losses'] = losses_host if 'loss' in args.include_for_metrics else None batch_kwargs['inputs'] = inputs_host if 'inputs' in args.include_for_metrics else None metrics = self.compute_metrics(EvalPrediction(predictions=preds_host, label_ids=labels_host, **batch_kwargs), compute_result=is_last_step) if self.args.batch_eval_metrics or (args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0): eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, 'eval_losses')) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, 'eval_preds')) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, 'eval_label_ids')) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, 'eval_inputs_ids')) del losses_host, preds_host, labels_host, inputs_host torch.cuda.empty_cache() losses_host, preds_host, labels_host, inputs_host = (None, None, None, None) if args.past_index and hasattr(self, '_past'): delattr(self, '_past') eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, 'eval_losses')) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, 'eval_preds')) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, 'eval_label_ids')) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, 'eval_inputs_ids')) eval_loss = eval_losses_gatherer.finalize() preds = preds_gatherer.finalize() if not prediction_loss_only else None label_ids = labels_gatherer.finalize() if not prediction_loss_only else None inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None if self.compute_metrics is not None and preds is not None and (label_ids is not None) and (not self.args.batch_eval_metrics): eval_set_kwargs['losses'] = eval_loss if 'loss' in args.include_for_metrics else None eval_set_kwargs['inputs'] = inputs_ids if 'inputs' in args.include_for_metrics else None metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids, **eval_set_kwargs)) elif metrics is None: metrics = {} metrics = denumpify_detensorize(metrics) if eval_loss is not None: metrics[f'{metric_key_prefix}_loss'] = eval_loss.mean().item() for key in list(metrics.keys()): if not key.startswith(f'{metric_key_prefix}_'): metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key) return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) def _gather_and_numpify(self, tensors, name): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_xla_available(): tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: tensors = distributed_concat(tensors) return nested_numpify(tensors) def _add_sm_patterns_to_gitignore(self) -> None: """Add SageMaker Checkpointing patterns to .gitignore file.""" if not self.is_world_process_zero(): return patterns = ['*.sagemaker-uploading', '*.sagemaker-uploaded'] if os.path.exists(os.path.join(self.repo.local_dir, '.gitignore')): with open(os.path.join(self.repo.local_dir, '.gitignore')) as f: current_content = f.read() else: current_content = '' content = current_content for pattern in patterns: if pattern not in content: if content.endswith('\n'): content += pattern else: content += f'\n{pattern}' if content != current_content: with open(os.path.join(self.repo.local_dir, '.gitignore'), 'w') as f: logger.debug(f'Writing .gitignore file. Content: {content}') f.write(content) self.repo.git_add('.gitignore') time.sleep(0.5) if not self.repo.is_repo_clean(): self.repo.git_commit('Add *.sagemaker patterns to .gitignore.') self.repo.git_push() def create_accelerator_and_postprocess(self): grad_acc_kwargs = {} if is_accelerate_available('0.28.0') and self.args.accelerator_config.gradient_accumulation_kwargs is not None: grad_acc_kwargs = self.args.accelerator_config.gradient_accumulation_kwargs if 'num_steps' in grad_acc_kwargs: if self.args.gradient_accumulation_steps > 1: raise ValueError("The `AcceleratorConfig`'s `num_steps` is set but `gradient_accumulation_steps` is greater than 1 in the passed `TrainingArguments`If using the passed `AcceleratorConfig` is desired, do not set the `TrainingArguments` `gradient_accumulation_steps`.") else: self.args.gradient_accumulation_steps = grad_acc_kwargs['num_steps'] accelerator_config = self.args.accelerator_config.to_dict() if is_accelerate_available('0.28.0'): dataloader_params = ['split_batches', 'dispatch_batches', 'even_batches', 'use_seedable_sampler'] dataloader_config = DataLoaderConfiguration(**{param: accelerator_config.pop(param) for param in dataloader_params}) if is_accelerate_available('1.1.0'): dataloader_config.data_seed = self.args.data_seed non_blocking = accelerator_config.pop('non_blocking') if not is_accelerate_available('0.30.0'): if non_blocking: raise ImportError('`non_blocking` is only supported in accelerate v0.30.0 and above. Please upgrade accelerate to use this feature.') else: if non_blocking and (not self.args.dataloader_pin_memory): logger.warning("`non_blocking` is enabled but `dataloader_pin_memory` is not. For the best performance, it's recommended to enable both.") dataloader_config.non_blocking = non_blocking accelerator_config.pop('gradient_accumulation_kwargs') args = {'deepspeed_plugin': self.args.deepspeed_plugin} if self.args.parallelism_config is not None: if not is_accelerate_available('1.10.1'): raise ImportError('ParallelismConfig requires accelerate v1.10.1 and above. Please upgrade accelerate to use this feature.') args['parallelism_config'] = self.args.parallelism_config if is_accelerate_available('0.28.0'): args['dataloader_config'] = dataloader_config else: args.update(accelerator_config) if hasattr(self.model, 'tp_size') and self.model.tp_size is not None and (self.model.tp_size > 1): self.is_tp_enabled = True if version.parse(accelerate_version) > version.parse('1.3.0'): args['torch_tp_plugin'] = TorchTensorParallelPlugin(tp_size=self.model.tp_size) else: raise ValueError('Requires accelerate>1.3.0 to use Tensor Parallelism.') self.accelerator = Accelerator(**args) self.gather_function = self.accelerator.gather_for_metrics if 'use_gather_object' in inspect.signature(self.gather_function).parameters: self.gather_function = functools.partial(self.gather_function, use_gather_object=self.args.eval_use_gather_object) self.is_deepspeed_enabled = getattr(self.accelerator.state, 'deepspeed_plugin', None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, 'fsdp_plugin', None) is not None self.is_tp_enabled = getattr(self.accelerator.state, 'torch_tp_plugin', None) is not None if self.is_fsdp_enabled: fsdp_plugin = self.accelerator.state.fsdp_plugin for param in ['limit_all_gathers', 'activation_checkpointing']: setattr(fsdp_plugin, param, self.args.fsdp_config.get(param, getattr(fsdp_plugin, param))) if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing: raise ValueError("The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic when using FSDP.") if self.is_deepspeed_enabled and getattr(self.args, 'hf_deepspeed_config', None) is None: self.propagate_args_to_deepspeed() if self.args.save_only_model and (self.is_deepspeed_enabled or self.is_fsdp_enabled) and self.args.load_best_model_at_end: wrapper = 'DeepSpeed' if self.is_deepspeed_enabled else 'FSDP' raise ValueError(f"{wrapper} can't be used with `save_only_model` along with `load_best_model_at_end`.") if self.is_deepspeed_enabled and self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.args.auto_find_batch_size: raise ValueError("`auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3. Please consider using Zero-2, Zero-1, or FSDP") if self.args.save_only_model and self.is_fsdp_enabled and ('SHARDED_STATE_DICT' in str(self.accelerator.state.fsdp_plugin.state_dict_type)): raise ValueError("save_only_model option is not compatible with FSDP state dict type 'SHARDED_STATE_DICT'") def propagate_args_to_deepspeed(self, auto_find_batch_size=False): """ Sets values in the deepspeed plugin based on the Trainer args """ from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig ds_plugin = self.accelerator.state.deepspeed_plugin ds_plugin.hf_ds_config = HfTrainerDeepSpeedConfig(ds_plugin.hf_ds_config.config) ds_plugin.deepspeed_config = ds_plugin.hf_ds_config.config ds_plugin.hf_ds_config.trainer_config_process(self.args, auto_find_batch_size) def _fsdp_qlora_plugin_updates(self): if self.is_fsdp_enabled and _is_peft_model(self.model): from peft import PeftConfig from peft.utils.other import fsdp_auto_wrap_policy if isinstance(self.model.active_peft_config, PeftConfig): self.accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(self.model) if getattr(self.model, 'quantization_method', None) == QuantizationMethod.BITS_AND_BYTES and self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage.is_floating_point and (version.parse(accelerate_version) > version.parse('0.27.0')): self.accelerator.state.fsdp_plugin.set_mixed_precision(self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage, override=True) def get_batch_samples(self, epoch_iterator: Iterator, num_batches: int, device: torch.device) -> tuple[list, Optional[Union[torch.Tensor, int]]]: """ Collects a specified number of batches from the epoch iterator and optionally counts the number of items in the batches to properly scale the loss. """ batch_samples = [] num_items_in_batch = None for _ in range(num_batches): try: batch_samples.append(next(epoch_iterator)) except StopIteration: break count_num_items_in_batch = len(batch_samples) > 0 and 'labels' in batch_samples[0] and (self.model_accepts_loss_kwargs or self.compute_loss_func is not None) if count_num_items_in_batch: try: num_items_in_batch = sum([batch['labels'].ne(-100).sum() for batch in batch_samples]) except (TypeError, AttributeError): pass if num_items_in_batch is not None: if self.args.average_tokens_across_devices and self.args.world_size >= 1: num_items_in_batch = self.accelerator.gather(num_items_in_batch.to(device)).sum() elif self.args.n_gpu >= 1: num_items_in_batch = num_items_in_batch // self.args.n_gpu if torch.is_tensor(num_items_in_batch): num_items_in_batch = num_items_in_batch.to(device) if self.args.n_gpu > 1 and num_items_in_batch.dim() == 0: num_items_in_batch = num_items_in_batch.unsqueeze(0).expand(self.args.n_gpu, -1) if (pc := getattr(self.accelerator, 'parallelism_config', None)): num_items_in_batch = num_items_in_batch // pc.non_data_parallel_size return (batch_samples, num_items_in_batch) def set_initial_training_values(self, args: TrainingArguments, dataloader: DataLoader, total_train_batch_size: int): """ Calculates and returns the following values: - `num_train_epochs` - `num_update_steps_per_epoch` - `num_examples` - `num_train_samples` - `epoch_based` - `len_dataloader` - `max_steps` """ max_steps = args.max_steps epoch_based = max_steps < 0 len_dataloader = len(dataloader) if has_length(dataloader) else None if len_dataloader is not None: num_update_steps_per_epoch = max(len_dataloader // args.gradient_accumulation_steps + int(len_dataloader % args.gradient_accumulation_steps > 0), 1) if epoch_based: max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) if len_dataloader: num_examples = self.num_examples(dataloader) if args.max_steps > 0: num_train_epochs = max_steps // num_update_steps_per_epoch + int(max_steps % num_update_steps_per_epoch > 0) num_train_samples = max_steps * total_train_batch_size else: num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples(dataloader) * args.num_train_epochs elif args.max_steps > 0: num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError(f'args.max_steps must be set to a positive value if dataloader does not have a length, was {args.max_steps}') return (num_train_epochs, num_update_steps_per_epoch, num_examples, num_train_samples, epoch_based, len_dataloader, max_steps)
null
106
59
52
6
37
8
9
0.24
0
94
41
2
83
57
85
85
4,852
649
3,384
690
3,159
826
2,235
571
2,109
81
0
7
862
6,505
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.CallbackHandler
from .training_args import TrainingArguments class CallbackHandler(TrainerCallback): """Internal class that just calls the list of callbacks in order.""" def __init__(self, callbacks, model, processing_class, optimizer, lr_scheduler): self.callbacks = [] for cb in callbacks: self.add_callback(cb) self.model = model self.processing_class = processing_class self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_dataloader = None self.eval_dataloader = None if not any((isinstance(cb, DefaultFlowCallback) for cb in self.callbacks)): logger.warning("The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n" + 'should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of' + 'callbacks is\n:' + self.callback_list) def add_callback(self, callback): cb = callback() if isinstance(callback, type) else callback cb_class = callback if isinstance(callback, type) else callback.__class__ if cb_class in [c.__class__ for c in self.callbacks]: logger.warning(f'You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current' + 'list of callbacks is\n:' + self.callback_list) self.callbacks.append(cb) def pop_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return cb else: for cb in self.callbacks: if cb == callback: self.callbacks.remove(cb) return cb def remove_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return else: self.callbacks.remove(callback) @property def callback_list(self): return '\n'.join((cb.__class__.__name__ for cb in self.callbacks)) def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_init_end', args, state, control) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_training_stop = False return self.call_event('on_train_begin', args, state, control) def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_train_end', args, state, control) def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_epoch_stop = False return self.call_event('on_epoch_begin', args, state, control) def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_epoch_end', args, state, control) def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_log = False control.should_evaluate = False control.should_save = False return self.call_event('on_step_begin', args, state, control) def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_pre_optimizer_step', args, state, control) def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_optimizer_step', args, state, control) def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_substep_end', args, state, control) def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_step_end', args, state, control) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): control.should_evaluate = False return self.call_event('on_evaluate', args, state, control, metrics=metrics) def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): return self.call_event('on_predict', args, state, control, metrics=metrics) def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_save = False return self.call_event('on_save', args, state, control) def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs): control.should_log = False return self.call_event('on_log', args, state, control, logs=logs) def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event('on_prediction_step', args, state, control) def call_event(self, event, args, state, control, **kwargs): for callback in self.callbacks: result = getattr(callback, event)(args, state, control, model=self.model, processing_class=self.processing_class, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, eval_dataloader=self.eval_dataloader, **kwargs) if result is not None: control = result return control
class CallbackHandler(TrainerCallback): '''Internal class that just calls the list of callbacks in order.''' def __init__(self, callbacks, model, processing_class, optimizer, lr_scheduler): pass def add_callback(self, callback): pass def pop_callback(self, callback): pass def remove_callback(self, callback): pass @property def callback_list(self): pass def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs): pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): pass def call_event(self, event, args, state, control, **kwargs): pass
23
1
5
0
5
0
2
0.02
1
5
4
0
21
7
21
36
129
22
105
37
82
2
82
36
60
6
1
3
36
6,506
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.DefaultFlowCallback
from .training_args import TrainingArguments from .trainer_utils import HPSearchBackend, IntervalStrategy, SaveStrategy, has_length class DefaultFlowCallback(TrainerCallback): """ A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints. """ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): if state.global_step == 1 and args.logging_first_step: control.should_log = True if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0: control.should_log = True if args.eval_strategy == IntervalStrategy.STEPS and state.global_step % state.eval_steps == 0 and (args.eval_delay <= state.global_step): control.should_evaluate = True if args.save_strategy == SaveStrategy.STEPS and state.save_steps > 0 and (state.global_step % state.save_steps == 0): control.should_save = True if state.global_step >= state.max_steps: control.should_training_stop = True if args.save_strategy == SaveStrategy.STEPS: control.should_save = True return control def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): if args.logging_strategy == IntervalStrategy.EPOCH: control.should_log = True if args.eval_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch: control.should_evaluate = True if args.save_strategy == SaveStrategy.EPOCH: control.should_save = True return control
class DefaultFlowCallback(TrainerCallback): ''' A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints. ''' def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass
3
1
23
4
15
4
6
0.35
1
5
5
0
2
0
2
17
51
9
31
3
28
11
23
3
20
7
1
2
11
6,507
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.EarlyStoppingCallback
from .trainer_utils import HPSearchBackend, IntervalStrategy, SaveStrategy, has_length import numpy as np from typing import Optional, Union class EarlyStoppingCallback(TrainerCallback, ExportableState): """ A [`TrainerCallback`] that handles early stopping. Args: early_stopping_patience (`int`): Use with `metric_for_best_model` to stop training when the specified metric worsens for `early_stopping_patience` evaluation calls. early_stopping_threshold(`float`, *optional*): Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the specified metric must improve to satisfy early stopping conditions. ` This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the early stopping will not occur until the next save step. """ def __init__(self, early_stopping_patience: int=1, early_stopping_threshold: Optional[float]=0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold self.early_stopping_patience_counter = 0 def check_metric_value(self, args, state, control, metric_value): operator = np.greater if args.greater_is_better else np.less if state.best_metric is None or (operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold): self.early_stopping_patience_counter = 0 else: self.early_stopping_patience_counter += 1 def on_train_begin(self, args, state, control, **kwargs): if not args.load_best_model_at_end: logger.warning('Using EarlyStoppingCallback without load_best_model_at_end=True. Once training is finished, the best model will not be loaded automatically.') assert args.metric_for_best_model is not None, 'EarlyStoppingCallback requires metric_for_best_model to be defined' assert args.eval_strategy != IntervalStrategy.NO, 'EarlyStoppingCallback requires IntervalStrategy of steps or epoch' def on_evaluate(self, args, state, control, metrics, **kwargs): metric_to_check = args.metric_for_best_model if not metric_to_check.startswith('eval_'): metric_to_check = f'eval_{metric_to_check}' metric_value = metrics.get(metric_to_check) if metric_value is None: logger.warning(f'early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping is disabled') return self.check_metric_value(args, state, control, metric_value) if self.early_stopping_patience_counter >= self.early_stopping_patience: control.should_training_stop = True def state(self) -> dict: return {'args': {'early_stopping_patience': self.early_stopping_patience, 'early_stopping_threshold': self.early_stopping_threshold}, 'attributes': {'early_stopping_patience_counter': self.early_stopping_patience_counter}}
class EarlyStoppingCallback(TrainerCallback, ExportableState): ''' A [`TrainerCallback`] that handles early stopping. Args: early_stopping_patience (`int`): Use with `metric_for_best_model` to stop training when the specified metric worsens for `early_stopping_patience` evaluation calls. early_stopping_threshold(`float`, *optional*): Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the specified metric must improve to satisfy early stopping conditions. ` This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the early stopping will not occur until the next save step. ''' def __init__(self, early_stopping_patience: int=1, early_stopping_threshold: Optional[float]=0.0): pass def check_metric_value(self, args, state, control, metric_value): pass def on_train_begin(self, args, state, control, **kwargs): pass def on_evaluate(self, args, state, control, metrics, **kwargs): pass def state(self) -> dict: pass
6
1
11
0
10
0
2
0.3
2
4
1
0
5
3
5
22
74
9
50
12
44
15
28
12
22
4
1
1
11
6,508
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.ExportableState
class ExportableState: """ A class for objects that include the ability to have its state be saved during `Trainer._save_checkpoint` and loaded back in during `Trainer._load_from_checkpoint`. These must implement a `state` function that gets called during the respective Trainer function call. It should only include parameters and attributes needed to recreate the state at a particular time, to avoid utilizing pickle/maintain standard file IO writing. Example: ```python class EarlyStoppingCallback(TrainerCallback, ExportableState): def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, } } ```""" def state(self) -> dict: raise NotImplementedError('You must implement a `state` function to utilize this class.') @classmethod def from_state(cls, state): instance = cls(**state['args']) for k, v in state['attributes'].items(): setattr(instance, k, v) return instance
class ExportableState: ''' A class for objects that include the ability to have its state be saved during `Trainer._save_checkpoint` and loaded back in during `Trainer._load_from_checkpoint`. These must implement a `state` function that gets called during the respective Trainer function call. It should only include parameters and attributes needed to recreate the state at a particular time, to avoid utilizing pickle/maintain standard file IO writing. Example: ```python class EarlyStoppingCallback(TrainerCallback, ExportableState): def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, } } ```''' def state(self) -> dict: pass @classmethod def from_state(cls, state): pass
4
1
4
0
4
0
2
3
0
2
0
2
1
0
2
2
42
6
9
6
5
27
8
5
5
2
0
1
3
6,509
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.PrinterCallback
class PrinterCallback(TrainerCallback): """ A bare [`TrainerCallback`] that just prints the logs. """ def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop('total_flos', None) if state.is_local_process_zero: print(logs)
class PrinterCallback(TrainerCallback): ''' A bare [`TrainerCallback`] that just prints the logs. ''' def on_log(self, args, state, control, logs=None, **kwargs): pass
2
1
4
0
4
0
2
0.6
1
0
0
0
1
0
1
16
9
1
5
3
3
3
5
3
3
2
1
1
2
6,510
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.ProgressCallback
from .trainer_utils import HPSearchBackend, IntervalStrategy, SaveStrategy, has_length from tqdm.auto import tqdm class ProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation. You can modify `max_str_len` to control how long strings are truncated when logging. """ def __init__(self, max_str_len: int=100): """ Initialize the callback with optional max_str_len parameter to control string truncation length. Args: max_str_len (`int`): Maximum length of strings to display in logs. Longer strings will be truncated with a message. """ self.training_bar = None self.prediction_bar = None self.max_str_len = max_str_len def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(state.global_step - self.current_step) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: self.prediction_bar = tqdm(total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True) self.prediction_bar.update(1) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: shallow_logs = {} for k, v in logs.items(): if isinstance(v, str) and len(v) > self.max_str_len: shallow_logs[k] = f'[String too long to display, length: {len(v)} > {self.max_str_len}. Consider increasing `max_str_len` if needed.]' else: shallow_logs[k] = v _ = shallow_logs.pop('total_flos', None) if 'epoch' in shallow_logs: shallow_logs['epoch'] = round(shallow_logs['epoch'], 2) self.training_bar.write(str(shallow_logs)) def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.close() self.training_bar = None
class ProgressCallback(TrainerCallback): ''' A [`TrainerCallback`] that displays the progress of training or evaluation. You can modify `max_str_len` to control how long strings are truncated when logging. ''' def __init__(self, max_str_len: int=100): ''' Initialize the callback with optional max_str_len parameter to control string truncation length. Args: max_str_len (`int`): Maximum length of strings to display in logs. Longer strings will be truncated with a message. ''' pass def on_train_begin(self, args, state, control, **kwargs): pass def on_step_end(self, args, state, control, **kwargs): pass def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): pass def on_evaluate(self, args, state, control, **kwargs): pass def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): pass def on_log(self, args, state, control, logs=None, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass
9
2
7
0
6
1
3
0.29
1
2
0
0
8
4
8
23
72
9
49
16
40
14
43
16
34
5
1
3
21
6,511
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.TrainerCallback
from .training_args import TrainingArguments class TrainerCallback: """ A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available: Args: args ([`TrainingArguments`]): The training arguments used to instantiate the [`Trainer`]. state ([`TrainerState`]): The current state of the [`Trainer`]. control ([`TrainerControl`]): The object that is returned to the [`Trainer`] and can be used to make some decisions. model ([`PreTrainedModel`] or `torch.nn.Module`): The model being trained. tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for encoding the data. This is deprecated in favour of `processing_class`. processing_class ([`PreTrainedTokenizer` or `BaseImageProcessor` or `ProcessorMixin` or `FeatureExtractionMixin`]): The processing class used for encoding the data. Can be a tokenizer, a processor, an image processor or a feature extractor. optimizer (`torch.optim.Optimizer`): The optimizer used for the training steps. lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler used for setting the learning rate. train_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for training. eval_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for evaluation. metrics (`dict[str, float]`): The metrics computed by the last evaluation phase. Those are only accessible in the event `on_evaluate`. logs (`dict[str, float]`): The values to log. Those are only accessible in the event `on_log`. The `control` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version. The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple [`~transformers.PrinterCallback`]. Example: ```python class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) ```""" def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of the initialization of the [`Trainer`]. """ pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of training. """ pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of training. """ pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of an epoch. """ pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an epoch. """ pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called before the optimizer step but after gradient clipping. Useful for monitoring gradients. """ pass def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after the optimizer step but before gradients are zeroed out. Useful for monitoring gradients. """ pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an substep during gradient accumulation. """ pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after an evaluation phase. """ pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs): """ Event called after a successful prediction. """ pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a checkpoint save. """ pass def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after logging the last logs. """ pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a prediction step. """ pass
class TrainerCallback: ''' A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available: Args: args ([`TrainingArguments`]): The training arguments used to instantiate the [`Trainer`]. state ([`TrainerState`]): The current state of the [`Trainer`]. control ([`TrainerControl`]): The object that is returned to the [`Trainer`] and can be used to make some decisions. model ([`PreTrainedModel`] or `torch.nn.Module`): The model being trained. tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for encoding the data. This is deprecated in favour of `processing_class`. processing_class ([`PreTrainedTokenizer` or `BaseImageProcessor` or `ProcessorMixin` or `FeatureExtractionMixin`]): The processing class used for encoding the data. Can be a tokenizer, a processor, an image processor or a feature extractor. optimizer (`torch.optim.Optimizer`): The optimizer used for the training steps. lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler used for setting the learning rate. train_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for training. eval_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for evaluation. metrics (`dict[str, float]`): The metrics computed by the last evaluation phase. Those are only accessible in the event `on_evaluate`. logs (`dict[str, float]`): The values to log. Those are only accessible in the event `on_log`. The `control` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version. The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple [`~transformers.PrinterCallback`]. Example: ```python class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) ```''' def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the end of the initialization of the [`Trainer`]. ''' pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the beginning of training. ''' pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the end of training. ''' pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the beginning of an epoch. ''' pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the end of an epoch. ''' pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs. ''' pass def on_pre_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called before the optimizer step but after gradient clipping. Useful for monitoring gradients. ''' pass def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called after the optimizer step but before gradients are zeroed out. Useful for monitoring gradients. ''' pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the end of an substep during gradient accumulation. ''' pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs. ''' pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called after an evaluation phase. ''' pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs): ''' Event called after a successful prediction. ''' pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called after a checkpoint save. ''' pass def on_log(self, args, state, control, logs=None, **kwargs): ''' Event called after logging the last logs. ''' pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): ''' Event called after a prediction step. ''' pass
16
16
5
0
2
3
1
2.94
0
3
3
16
15
0
15
15
144
22
31
16
15
91
31
16
15
1
0
0
15
6,512
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.TrainerControl
from dataclasses import dataclass @dataclass class TrainerControl(ExportableState): """ A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some switches in the training loop. Args: should_training_stop (`bool`, *optional*, defaults to `False`): Whether or not the training should be interrupted. If `True`, this variable will not be set back to `False`. The training will just stop. should_epoch_stop (`bool`, *optional*, defaults to `False`): Whether or not the current epoch should be interrupted. If `True`, this variable will be set back to `False` at the beginning of the next epoch. should_save (`bool`, *optional*, defaults to `False`): Whether or not the model should be saved at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_evaluate (`bool`, *optional*, defaults to `False`): Whether or not the model should be evaluated at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_log (`bool`, *optional*, defaults to `False`): Whether or not the logs should be reported at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. """ should_training_stop: bool = False should_epoch_stop: bool = False should_save: bool = False should_evaluate: bool = False should_log: bool = False def _new_training(self): """Internal method that resets the variable for a new training.""" self.should_training_stop = False def _new_epoch(self): """Internal method that resets the variable for a new epoch.""" self.should_epoch_stop = False def _new_step(self): """Internal method that resets the variable for a new step.""" self.should_save = False self.should_evaluate = False self.should_log = False def state(self) -> dict: return {'args': {'should_training_stop': self.should_training_stop, 'should_epoch_stop': self.should_epoch_stop, 'should_save': self.should_save, 'should_evaluate': self.should_evaluate, 'should_log': self.should_log}, 'attributes': {}}
@dataclass class TrainerControl(ExportableState): ''' A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some switches in the training loop. Args: should_training_stop (`bool`, *optional*, defaults to `False`): Whether or not the training should be interrupted. If `True`, this variable will not be set back to `False`. The training will just stop. should_epoch_stop (`bool`, *optional*, defaults to `False`): Whether or not the current epoch should be interrupted. If `True`, this variable will be set back to `False` at the beginning of the next epoch. should_save (`bool`, *optional*, defaults to `False`): Whether or not the model should be saved at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_evaluate (`bool`, *optional*, defaults to `False`): Whether or not the model should be evaluated at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_log (`bool`, *optional*, defaults to `False`): Whether or not the logs should be reported at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. ''' def _new_training(self): '''Internal method that resets the variable for a new training.''' pass def _new_epoch(self): '''Internal method that resets the variable for a new epoch.''' pass def _new_step(self): '''Internal method that resets the variable for a new step.''' pass def state(self) -> dict: pass
6
4
6
0
5
1
1
0.92
1
1
0
0
4
0
4
6
59
11
25
10
20
23
16
10
11
1
1
0
4
6,513
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_callback.py
transformers.trainer_callback.TrainerState
import math from .trainer_utils import HPSearchBackend, IntervalStrategy, SaveStrategy, has_length import dataclasses import json from dataclasses import dataclass from typing import Optional, Union @dataclass class TrainerState: """ A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing and passed to the [`TrainerCallback`]. <Tip> In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update step requires going through *n* batches. </Tip> Args: epoch (`float`, *optional*): Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed). global_step (`int`, *optional*, defaults to 0): During training, represents the number of update steps completed. max_steps (`int`, *optional*, defaults to 0): The number of update steps to do during the current training. logging_steps (`int`, *optional*, defaults to 500): Log every X updates steps eval_steps (`int`, *optional*): Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. train_batch_size (`int`, *optional*): The batch size for the training dataloader. Only needed when `auto_find_batch_size` has been used. num_input_tokens_seen (`int`, *optional*, defaults to 0): When tracking the inputs tokens, the number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). log_history (`list[dict[str, float]]`, *optional*): The list of logs done since the beginning of training. best_metric (`float`, *optional*): When tracking the best model, the value of the best metric encountered so far. best_global_step (`int`, *optional*): When tracking the best model, the step at which the best metric was encountered. Used for setting `best_model_checkpoint`. best_model_checkpoint (`str`, *optional*): When tracking the best model, the value of the name of the checkpoint for the best model encountered so far. is_local_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. is_world_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). is_hyper_param_search (`bool`, *optional*, defaults to `False`): Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard. stateful_callbacks (`list[StatefulTrainerCallback]`, *optional*): Callbacks attached to the `Trainer` that should have their states be saved or restored. Relevant callbacks should implement a `state` and `from_state` function. """ epoch: Optional[float] = None global_step: int = 0 max_steps: int = 0 logging_steps: int = 500 eval_steps: int = 500 save_steps: int = 500 train_batch_size: Optional[int] = None num_train_epochs: int = 0 num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: list[dict[str, float]] = None best_metric: Optional[float] = None best_global_step: Optional[int] = None best_model_checkpoint: Optional[str] = None is_local_process_zero: bool = True is_world_process_zero: bool = True is_hyper_param_search: bool = False trial_name: Optional[str] = None trial_params: Optional[dict[str, Union[str, float, int, bool]]] = None stateful_callbacks: Optional[list['TrainerCallback']] = None def __post_init__(self): if self.log_history is None: self.log_history = [] if self.stateful_callbacks is None: self.stateful_callbacks = {} elif isinstance(self.stateful_callbacks, dict): pass else: stateful_callbacks = {} for callback in self.stateful_callbacks: if not isinstance(callback, ExportableState): raise TypeError(f'All callbacks passed to be saved must inherit `ExportableState`, but received {type(callback)}') name = callback.__class__.__name__ if name in stateful_callbacks: if not isinstance(stateful_callbacks[name], list): stateful_callbacks[name] = [stateful_callbacks[name]] stateful_callbacks[name].append(callback.state()) else: stateful_callbacks[name] = callback.state() self.stateful_callbacks = stateful_callbacks def save_to_json(self, json_path: str): """Save the content of this instance in JSON format inside `json_path`.""" json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + '\n' with open(json_path, 'w', encoding='utf-8') as f: f.write(json_string) @classmethod def load_from_json(cls, json_path: str): """Create an instance from the content of `json_path`.""" with open(json_path, encoding='utf-8') as f: text = f.read() return cls(**json.loads(text)) def compute_steps(self, args, max_steps): """ Calculates and stores the absolute value for logging, eval, and save steps based on if it was a proportion or not. """ for step_kind in ('logging', 'eval', 'save'): num_steps = getattr(args, f'{step_kind}_steps') if num_steps is not None: if num_steps < 1: num_steps = math.ceil(max_steps * num_steps) setattr(self, f'{step_kind}_steps', num_steps) def init_training_references(self, trainer, max_steps, num_train_epochs, trial): """ Stores the initial training references needed in `self` """ if trainer.hp_name is not None and trainer._trial is not None: self.trial_name = trainer.hp_name(trainer._trial) self.trial_params = None if trial is not None: from transformers.integrations import hp_params assignments = trial.assignments if trainer.hp_search_backend == HPSearchBackend.SIGOPT else trial self.trial_params = hp_params(assignments) self.max_steps = max_steps self.num_train_epochs = num_train_epochs self.is_local_process_zero = trainer.is_local_process_zero() self.is_world_process_zero = trainer.is_world_process_zero()
@dataclass class TrainerState: ''' A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing and passed to the [`TrainerCallback`]. <Tip> In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update step requires going through *n* batches. </Tip> Args: epoch (`float`, *optional*): Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed). global_step (`int`, *optional*, defaults to 0): During training, represents the number of update steps completed. max_steps (`int`, *optional*, defaults to 0): The number of update steps to do during the current training. logging_steps (`int`, *optional*, defaults to 500): Log every X updates steps eval_steps (`int`, *optional*): Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. train_batch_size (`int`, *optional*): The batch size for the training dataloader. Only needed when `auto_find_batch_size` has been used. num_input_tokens_seen (`int`, *optional*, defaults to 0): When tracking the inputs tokens, the number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). log_history (`list[dict[str, float]]`, *optional*): The list of logs done since the beginning of training. best_metric (`float`, *optional*): When tracking the best model, the value of the best metric encountered so far. best_global_step (`int`, *optional*): When tracking the best model, the step at which the best metric was encountered. Used for setting `best_model_checkpoint`. best_model_checkpoint (`str`, *optional*): When tracking the best model, the value of the name of the checkpoint for the best model encountered so far. is_local_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. is_world_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). is_hyper_param_search (`bool`, *optional*, defaults to `False`): Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard. stateful_callbacks (`list[StatefulTrainerCallback]`, *optional*): Callbacks attached to the `Trainer` that should have their states be saved or restored. Relevant callbacks should implement a `state` and `from_state` function. ''' def __post_init__(self): pass def save_to_json(self, json_path: str): '''Save the content of this instance in JSON format inside `json_path`.''' pass @classmethod def load_from_json(cls, json_path: str): '''Create an instance from the content of `json_path`.''' pass def compute_steps(self, args, max_steps): ''' Calculates and stores the absolute value for logging, eval, and save steps based on if it was a proportion or not. ''' pass def init_training_references(self, trainer, max_steps, num_train_epochs, trial): ''' Stores the initial training references needed in `self` ''' pass
8
5
14
1
10
3
4
0.92
0
7
2
0
4
1
5
5
153
13
73
39
65
67
67
36
60
8
0
4
19
6,514
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.AcceleratorConfig
from typing import Any, Optional, Union import json import os from dataclasses import dataclass, field import copy import io @dataclass class AcceleratorConfig: """ A subset of arguments relating to the underlying [`accelerate.Accelerator`] implementation utilized in the `Trainer` that can be customized. Mostly relating to data. Parameters: split_batches (`bool`, *optional*, defaults to `False`): Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes. dispatch_batches (`bool`, *optional*): If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. use_seedable_sampler (`bool`, *optional*, defaults to `True`): Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures training results are fully reproducible using a different sampling technique. While seed-to-seed results may differ, on average the differences are negligible when using multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results. gradient_accumulation_kwargs (`dict`, *optional*): Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. Any of the following (optional) keys are acceptable: num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if the latter is set to 1, otherwise an exception will be raised. adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`. non_blocking (`bool`, *optional*, defaults to `False`): Whether to use non-blocking CUDA calls to help minimize synchronization during distributed training with prepared `DataLoader` inputs being moved to device. Best if used with `pin_memory=True` in the `TrainingArguments`. use_configured_state (`bool*, *optional*, defaults to `False`): Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`. If `True`, an `Accelerator` or `PartialState` must be initialized. May lead to issues using sweeps or hyperparameter tuning. """ split_batches: bool = field(default=False, metadata={'help': 'Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes.'}) dispatch_batches: Optional[bool] = field(default=None, metadata={'help': 'If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataslet`, `False` otherwise.'}) even_batches: bool = field(default=True, metadata={'help': 'If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers.'}) use_seedable_sampler: bool = field(default=True, metadata={'help': 'Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]).Ensures training results are fully reproducible using a different sampling technique. While seed-to-seed results may differ, on average the differences are negligible when usingmultiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results.'}) non_blocking: Optional[bool] = field(default=False, metadata={'help': 'Whether to use non-blocking CUDA calls to help minimize synchronization during distributed training with prepared `DataLoader` inputs being moved to device. Best if used with `pin_memory=True` in the `TrainingArguments`. Requires accelerate v0.30.0.'}) gradient_accumulation_kwargs: Optional[dict] = field(default=None, metadata={'help': 'Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. Any of the following (optional) keys are acceptable: num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if the latter is set to 1, otherwise an exception will be raised. adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`.'}) use_configured_state: bool = field(default=False, metadata={'help': 'Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`.If `True`, an `Accelerator` or `PartialState` must be initialized. May lead to issues using sweeps or hyperparameter tuning.'}) @classmethod def from_json_file(cls, json_file): open_file = io.open if os.path.exists(json_file) else open with open_file(json_file, 'r', encoding='utf-8') as f: config_dict = json.load(f) extra_keys = sorted((key for key in config_dict if key not in cls.__dataclass_fields__)) if len(extra_keys) > 0: raise ValueError(f'The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `transformers` version or fix (and potentially remove these keys) from your config file.') return cls(**config_dict) def to_dict(self): return copy.deepcopy(self.__dict__) def pop(self, key, default=None): return self.__dict__.pop(key, default)
@dataclass class AcceleratorConfig: ''' A subset of arguments relating to the underlying [`accelerate.Accelerator`] implementation utilized in the `Trainer` that can be customized. Mostly relating to data. Parameters: split_batches (`bool`, *optional*, defaults to `False`): Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes. dispatch_batches (`bool`, *optional*): If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. use_seedable_sampler (`bool`, *optional*, defaults to `True`): Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures training results are fully reproducible using a different sampling technique. While seed-to-seed results may differ, on average the differences are negligible when using multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results. gradient_accumulation_kwargs (`dict`, *optional*): Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. Any of the following (optional) keys are acceptable: num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if the latter is set to 1, otherwise an exception will be raised. adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`. non_blocking (`bool`, *optional*, defaults to `False`): Whether to use non-blocking CUDA calls to help minimize synchronization during distributed training with prepared `DataLoader` inputs being moved to device. Best if used with `pin_memory=True` in the `TrainingArguments`. use_configured_state (`bool*, *optional*, defaults to `False`): Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`. If `True`, an `Accelerator` or `PartialState` must be initialized. May lead to issues using sweeps or hyperparameter tuning. ''' @classmethod def from_json_file(cls, json_file): pass def to_dict(self): pass def pop(self, key, default=None): pass
6
1
6
0
5
1
2
0.55
0
1
0
0
2
0
3
3
132
8
80
16
75
44
20
14
16
3
0
1
5
6,515
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.DistributedLengthGroupedSampler
from typing import Any, Optional, Union from .tokenization_utils_base import BatchEncoding import torch from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler from torch.utils.data.distributed import DistributedSampler import math import torch.distributed as dist from collections.abc import Iterator, Mapping class DistributedLengthGroupedSampler(DistributedSampler): """ Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. """ def __init__(self, batch_size: int, dataset: Optional[Dataset]=None, num_replicas: Optional[int]=None, rank: Optional[int]=None, seed: int=0, drop_last: bool=False, lengths: Optional[list[int]]=None, model_input_name: Optional[str]=None): if dataset is None and lengths is None: raise ValueError('One of dataset and lengths must be provided.') if num_replicas is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() self.batch_size = batch_size self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.drop_last = drop_last if lengths is None: model_input_name = model_input_name if model_input_name is not None else 'input_ids' if not isinstance(dataset[0], (dict, BatchEncoding)) or model_input_name not in dataset[0]: raise ValueError(f"Can only automatically infer lengths for datasets whose items are dictionaries with an '{model_input_name}' key.") lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info('If lengths is a torch.Tensor, DistributedLengthGroupedSampler will be slow. Converting lengths to list[int]...') lengths = lengths.tolist() self.lengths = lengths if self.drop_last and len(self.lengths) % self.num_replicas != 0: self.num_samples = math.ceil((len(self.lengths) - self.num_replicas) / self.num_replicas) else: self.num_samples = math.ceil(len(self.lengths) / self.num_replicas) self.total_size = self.num_samples * self.num_replicas self.seed = seed def __iter__(self) -> Iterator: g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g) if not self.drop_last: indices += indices[:self.total_size - len(indices)] else: indices = indices[:self.total_size] assert len(indices) == self.total_size indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices)
class DistributedLengthGroupedSampler(DistributedSampler): ''' Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. ''' def __init__(self, batch_size: int, dataset: Optional[Dataset]=None, num_replicas: Optional[int]=None, rank: Optional[int]=None, seed: int=0, drop_last: bool=False, lengths: Optional[list[int]]=None, model_input_name: Optional[str]=None): pass def __iter__(self) -> Iterator: pass
3
1
39
4
31
5
7
0.22
1
8
1
0
2
9
2
10
86
9
63
24
50
14
41
14
38
11
3
2
13
6,516
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.DistributedSamplerWithLoop
from torch.utils.data.distributed import DistributedSampler class DistributedSamplerWithLoop(DistributedSampler): """ Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled samples to make each process have a round multiple of batch_size samples. Args: dataset (`torch.utils.data.Dataset`): Dataset used for sampling. batch_size (`int`): The batch size used with this sampler kwargs (`dict[str, Any]`, *optional*): All other keyword arguments passed to `DistributedSampler`. """ def __init__(self, dataset, batch_size, **kwargs): super().__init__(dataset, **kwargs) self.batch_size = batch_size def __iter__(self): indices = list(super().__iter__()) remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0 indices += indices[start_remainder:start_remainder + remainder] return iter(indices)
class DistributedSamplerWithLoop(DistributedSampler): ''' Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled samples to make each process have a round multiple of batch_size samples. Args: dataset (`torch.utils.data.Dataset`): Dataset used for sampling. batch_size (`int`): The batch size used with this sampler kwargs (`dict[str, Any]`, *optional*): All other keyword arguments passed to `DistributedSampler`. ''' def __init__(self, dataset, batch_size, **kwargs): pass def __iter__(self): pass
3
1
6
0
5
1
2
1.3
1
2
0
0
2
1
2
10
26
3
10
7
7
13
10
7
7
3
3
0
4
6,517
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.DistributedTensorGatherer
import warnings import numpy as np class DistributedTensorGatherer: """ A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks. If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices: `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]` to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples: - P0: `[0, 1, 2, 3, 4, 5]` - P1: `[6, 7, 8, 9, 10, 11]` - P2: `[12, 13, 14, 15, 0, 1]` The first batch treated on each process will be: - P0: `[0, 1]` - P1: `[6, 7]` - P2: `[12, 13]` So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices: `[0, 1, 6, 7, 12, 13]` If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop: `[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]` For some reason, that's not going to roll their boat. This class is there to solve that problem. Args: world_size (`int`): The number of processes used in the distributed training. num_samples (`int`): The number of samples in our dataset. make_multiple_of (`int`, *optional*): If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples). padding_index (`int`, *optional*, defaults to -100): The padding index to use if the arrays don't all have the same sequence length. """ def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100): warnings.warn('DistributedTensorGatherer is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.world_size = world_size self.num_samples = num_samples total_size = world_size if make_multiple_of is None else world_size * make_multiple_of self.total_samples = int(np.ceil(num_samples / total_size)) * total_size self.process_length = self.total_samples // world_size self._storage = None self._offsets = None self.padding_index = padding_index def add_arrays(self, arrays): """ Add `arrays` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we're bound to get an OOM, it happens at the beginning. """ if arrays is None: return if self._storage is None: self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index) self._offsets = list(range(0, self.total_samples, self.process_length)) slice_len, self._storage = self._nested_set_tensors(self._storage, arrays) for i in range(self.world_size): self._offsets[i] += slice_len def _nested_set_tensors(self, storage, arrays): if isinstance(arrays, (list, tuple)): result = [self._nested_set_tensors(x, y) for x, y in zip(storage, arrays)] return (result[0][0], type(arrays)((r[1] for r in result))) assert arrays.shape[0] % self.world_size == 0, f'Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}.' slice_len = arrays.shape[0] // self.world_size for i in range(self.world_size): if len(arrays.shape) == 1: storage[self._offsets[i]:self._offsets[i] + slice_len] = arrays[i * slice_len:(i + 1) * slice_len] else: if len(storage.shape) > 1 and storage.shape[1] < arrays.shape[1]: storage = expand_like(storage, arrays.shape[1], padding_index=self.padding_index) storage[self._offsets[i]:self._offsets[i] + slice_len, :arrays.shape[1]] = arrays[i * slice_len:(i + 1) * slice_len] return (slice_len, storage) def finalize(self): """ Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length). """ if self._storage is None: return if self._offsets[0] != self.process_length: logger.warning('Not all data has been set. Are you sure you passed all values?') return nested_truncate(self._storage, self.num_samples)
class DistributedTensorGatherer: ''' A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks. If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices: `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]` to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples: - P0: `[0, 1, 2, 3, 4, 5]` - P1: `[6, 7, 8, 9, 10, 11]` - P2: `[12, 13, 14, 15, 0, 1]` The first batch treated on each process will be: - P0: `[0, 1]` - P1: `[6, 7]` - P2: `[12, 13]` So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices: `[0, 1, 6, 7, 12, 13]` If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop: `[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]` For some reason, that's not going to roll their boat. This class is there to solve that problem. Args: world_size (`int`): The number of processes used in the distributed training. num_samples (`int`): The number of samples in our dataset. make_multiple_of (`int`, *optional*): If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples). padding_index (`int`, *optional*, defaults to -100): The padding index to use if the arrays don't all have the same sequence length. ''' def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100): pass def add_arrays(self, arrays): ''' Add `arrays` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we're bound to get an OOM, it happens at the beginning. ''' pass def _nested_set_tensors(self, storage, arrays): pass def finalize(self): ''' Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length). ''' pass
5
3
14
1
12
2
4
0.87
0
7
0
0
4
7
4
4
106
18
47
18
42
41
39
18
34
5
0
3
14
6,518
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.EvalLoopContainer
class EvalLoopContainer: """ Container to store intermediate results of evaluation loop. Args: do_nested_concat (`bool`, *optional*, defaults to `True`): If set to `True`, each iteration will recursively concatenate a new object containing tensors to the existing stored tensors, provided that the structure of the existing object and the new one are identical. If set to `False`, all newly added tensors will be stored in a list. padding_index (`int`, *optional*, defaults to -100): Value used to pad tensors of different shapes when `do_nested_concat=True`. """ def __init__(self, do_nested_concat: bool=True, padding_index: int=-100): self.do_nested_concat = do_nested_concat self.padding_index = padding_index self.tensors = None self.arrays = None def add(self, tensors) -> None: """Add tensors to the stored objects. If `do_nested_concat=True`, the tensors will be concatenated recursively.""" if self.tensors is None: self.tensors = tensors if self.do_nested_concat else [tensors] elif self.do_nested_concat: self.tensors = nested_concat(self.tensors, tensors, padding_index=self.padding_index) else: self.tensors.append(tensors) def to_cpu_and_numpy(self) -> None: """Move tensors in stored objects to CPU and convert them to numpy arrays.""" if self.tensors is None: return new_arrays = nested_numpify(self.tensors) if self.arrays is None: self.arrays = new_arrays elif self.do_nested_concat: self.arrays = nested_concat(self.arrays, new_arrays, padding_index=self.padding_index) else: self.arrays.extend(new_arrays) self.tensors = None def get_arrays(self): """Returns the numpified and moved to CPU stored objects.""" self.to_cpu_and_numpy() return self.arrays
class EvalLoopContainer: ''' Container to store intermediate results of evaluation loop. Args: do_nested_concat (`bool`, *optional*, defaults to `True`): If set to `True`, each iteration will recursively concatenate a new object containing tensors to the existing stored tensors, provided that the structure of the existing object and the new one are identical. If set to `False`, all newly added tensors will be stored in a list. padding_index (`int`, *optional*, defaults to -100): Value used to pad tensors of different shapes when `do_nested_concat=True`. ''' def __init__(self, do_nested_concat: bool=True, padding_index: int=-100): pass def add(self, tensors) -> None: '''Add tensors to the stored objects. If `do_nested_concat=True`, the tensors will be concatenated recursively.''' pass def to_cpu_and_numpy(self) -> None: '''Move tensors in stored objects to CPU and convert them to numpy arrays.''' pass def get_arrays(self): '''Returns the numpified and moved to CPU stored objects.''' pass
5
4
9
1
7
1
3
0.56
0
2
0
0
4
4
4
4
50
8
27
10
22
15
23
10
18
4
0
1
10
6,519
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.IterableDatasetShard
from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler import math import torch import torch.distributed as dist class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of 2: - the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]` - the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]` <Tip warning={true}> If your IterableDataset implements some randomization that needs to be applied the same way on all processes (for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with this. </Tip> Args: dataset (`torch.utils.data.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard. drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. seed (`int`, *optional*, defaults to 0): A random seed that will be used for the random number generation in [`~trainer_pt_utils.IterableDatasetShard.set_epoch`]. """ def __init__(self, dataset: IterableDataset, batch_size: int=1, drop_last: bool=False, num_processes: int=1, process_index: int=0, seed: int=0): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.seed = seed self.epoch = 0 self.num_examples = 0 def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, 'set_epoch'): self.dataset.set_epoch(epoch) def __iter__(self): self.num_examples = 0 if not hasattr(self.dataset, 'set_epoch') and hasattr(self.dataset, 'generator') and isinstance(self.dataset.generator, torch.Generator): self.dataset.generator.manual_seed(self.seed + self.epoch) real_batch_size = self.batch_size * self.num_processes process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size) first_batch = None current_batch = [] for element in self.dataset: self.num_examples += 1 current_batch.append(element) if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] def __len__(self): if self.drop_last: return len(self.dataset) // (self.batch_size * self.num_processes) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
class IterableDatasetShard(IterableDataset): ''' Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of 2: - the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]` - the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]` <Tip warning={true}> If your IterableDataset implements some randomization that needs to be applied the same way on all processes (for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with this. </Tip> Args: dataset (`torch.utils.data.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard. drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. seed (`int`, *optional*, defaults to 0): A random seed that will be used for the random number generation in [`~trainer_pt_utils.IterableDatasetShard.set_epoch`]. ''' def __init__(self, dataset: IterableDataset, batch_size: int=1, drop_last: bool=False, num_processes: int=1, process_index: int=0, seed: int=0): pass def set_epoch(self, epoch): pass def __iter__(self): pass def __len__(self): pass
5
1
15
1
14
1
4
0.65
1
3
0
0
4
8
4
9
103
12
55
27
42
36
42
19
37
10
3
3
15
6,520
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.LabelSmoother
import torch.distributed as dist from dataclasses import dataclass, field import torch from torch import nn @dataclass class LabelSmoother: """ Adds label-smoothing on a pre-computed output from a Transformers model. Args: epsilon (`float`, *optional*, defaults to 0.1): The label smoothing factor. ignore_index (`int`, *optional*, defaults to -100): The index in the labels to ignore when computing the loss. """ epsilon: float = 0.1 ignore_index: int = -100 def __call__(self, model_output, labels, shift_labels=False): logits = model_output['logits'] if isinstance(model_output, dict) else model_output[0] if shift_labels: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() log_probs = -nn.functional.log_softmax(logits, dim=-1) if labels.dim() == log_probs.dim() - 1: labels = labels.unsqueeze(-1) padding_mask = labels.eq(self.ignore_index) labels = torch.clamp(labels, min=0) nll_loss = log_probs.gather(dim=-1, index=labels) smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32) nll_loss.masked_fill_(padding_mask, 0.0) smoothed_loss.masked_fill_(padding_mask, 0.0) num_active_elements = padding_mask.numel() - padding_mask.long().sum() nll_loss = nll_loss.sum() / num_active_elements smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1]) return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss
@dataclass class LabelSmoother: ''' Adds label-smoothing on a pre-computed output from a Transformers model. Args: epsilon (`float`, *optional*, defaults to 0.1): The label smoothing factor. ignore_index (`int`, *optional*, defaults to -100): The index in the labels to ignore when computing the loss. ''' def __call__(self, model_output, labels, shift_labels=False): pass
3
1
26
4
18
4
4
0.57
0
1
0
0
1
0
1
1
40
7
21
10
19
12
21
10
19
4
0
1
4
6,521
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.LayerWiseDummyOptimizer
import torch import torch.distributed as dist from typing import Any, Optional, Union class LayerWiseDummyOptimizer(torch.optim.Optimizer): """ For Layer-wise optimizers such as GaLoRE optimizer, the optimization step is already done through the post gradient hooks. Therefore the trick is to create a dummy optimizer that can take arbitrary args and kwargs and return a no-op during training. Initial idea from @hiyouga in LLaMA-Factory: https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba """ def __init__(self, optimizer_dict=None, *args, **kwargs): dummy_tensor = torch.randn(1, 1) self.optimizer_dict = optimizer_dict super().__init__([dummy_tensor], {'lr': kwargs.get('lr', 0.001)}) def zero_grad(self, set_to_none: bool=True) -> None: pass def step(self, closure=None) -> Optional[float]: pass
class LayerWiseDummyOptimizer(torch.optim.Optimizer): ''' For Layer-wise optimizers such as GaLoRE optimizer, the optimization step is already done through the post gradient hooks. Therefore the trick is to create a dummy optimizer that can take arbitrary args and kwargs and return a no-op during training. Initial idea from @hiyouga in LLaMA-Factory: https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba ''' def __init__(self, optimizer_dict=None, *args, **kwargs): pass def zero_grad(self, set_to_none: bool=True) -> None: pass def step(self, closure=None) -> Optional[float]: pass
4
1
3
0
3
0
1
0.89
0
3
0
0
3
1
3
3
21
4
9
6
5
8
9
6
5
1
0
0
3
6,522
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.LayerWiseDummyScheduler
from itertools import chain class LayerWiseDummyScheduler(LRScheduler): """ For Layer-wise optimizers such as GaLoRE optimizer, the optimization and scheduling step are already done through the post gradient hooks. Therefore the trick is to create a dummy scheduler that can take arbitrary args and kwargs and return a no-op during training. """ def __init__(self, *args, **kwargs): self.default_lr = kwargs['lr'] optimizer = LayerWiseDummyOptimizer(**kwargs) last_epoch = -1 super().__init__(optimizer, last_epoch) def get_lr(self): lrs = [self.default_lr] if self.optimizer is not None: param_wise_lrs = [[group['lr'] for group in optim.param_groups] for optim in self.optimizer.optimizer_dict.values()] lrs = list(chain(*param_wise_lrs)) return lrs def _get_closed_form_lr(self): return self.base_lrs
class LayerWiseDummyScheduler(LRScheduler): ''' For Layer-wise optimizers such as GaLoRE optimizer, the optimization and scheduling step are already done through the post gradient hooks. Therefore the trick is to create a dummy scheduler that can take arbitrary args and kwargs and return a no-op during training. ''' def __init__(self, *args, **kwargs): pass def get_lr(self): pass def _get_closed_form_lr(self): pass
4
1
7
1
5
1
1
0.47
1
4
1
0
3
1
3
11
30
5
17
10
13
8
15
10
11
2
1
1
4
6,523
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.LengthGroupedSampler
from .tokenization_utils_base import BatchEncoding import torch from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler import torch.distributed as dist from typing import Any, Optional, Union class LengthGroupedSampler(Sampler): """ Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. """ def __init__(self, batch_size: int, dataset: Optional[Dataset]=None, lengths: Optional[list[int]]=None, model_input_name: Optional[str]=None, generator=None): if dataset is None and lengths is None: raise ValueError('One of dataset and lengths must be provided.') self.batch_size = batch_size if lengths is None: model_input_name = model_input_name if model_input_name is not None else 'input_ids' if not isinstance(dataset[0], (dict, BatchEncoding)) or model_input_name not in dataset[0]: raise ValueError(f"Can only automatically infer lengths for datasets whose items are dictionaries with an '{model_input_name}' key.") lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info('If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to list[int]...') lengths = lengths.tolist() self.lengths = lengths self.generator = generator def __len__(self): return len(self.lengths) def __iter__(self): indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator) return iter(indices)
class LengthGroupedSampler(Sampler): ''' Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness. ''' def __init__(self, batch_size: int, dataset: Optional[Dataset]=None, lengths: Optional[list[int]]=None, model_input_name: Optional[str]=None, generator=None): pass def __len__(self): pass def __iter__(self): pass
4
1
12
1
11
0
3
0.11
1
6
1
0
3
3
3
7
44
5
35
15
24
4
19
8
15
6
2
2
8
6,524
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.SequentialDistributedSampler
import warnings import math from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler import torch.distributed as dist class SequentialDistributedSampler(Sampler): """ Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end. Even though we only use this sampler for eval and predict (no training), which means that the model params won't have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. """ def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None): warnings.warn('SequentialDistributedSampler is deprecated and will be removed in v5 of Transformers.', FutureWarning) if num_replicas is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank num_samples = len(self.dataset) if batch_size is not None: self.num_samples = int(math.ceil(num_samples / (batch_size * num_replicas))) * batch_size else: self.num_samples = int(math.ceil(num_samples / num_replicas)) self.total_size = self.num_samples * self.num_replicas self.batch_size = batch_size def __iter__(self): indices = list(range(len(self.dataset))) indices += indices[:self.total_size - len(indices)] assert len(indices) == self.total_size, f'Indices length {len(indices)} and total size {self.total_size} mismatched' indices = indices[self.rank * self.num_samples:(self.rank + 1) * self.num_samples] assert len(indices) == self.num_samples, f'Indices length {len(indices)} and sample number {self.num_samples} mismatched' return iter(indices) def __len__(self): return self.num_samples
class SequentialDistributedSampler(Sampler): ''' Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end. Even though we only use this sampler for eval and predict (no training), which means that the model params won't have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. ''' def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None): pass def __iter__(self): pass def __len__(self): pass
4
1
14
1
12
1
3
0.27
1
5
0
0
3
6
3
7
54
7
37
12
33
10
29
12
25
6
2
2
8
6,525
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_pt_utils.py
transformers.trainer_pt_utils.ShardSampler
from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler import math class ShardSampler(Sampler): """ Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into `[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1. The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1. """ def __init__(self, dataset: Dataset, batch_size: int=1, drop_last: bool=False, num_processes: int=1, process_index: int=0): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.total_batch_size = total_batch_size = batch_size * num_processes num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size) self.total_num_samples = num_batches * total_batch_size def __iter__(self): indices = list(range(len(self.dataset))) while len(indices) < self.total_num_samples: indices += indices[:self.total_num_samples - len(indices)] result = [] for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size): result += indices[batch_start:batch_start + self.batch_size] return iter(result) def __len__(self): return self.total_num_samples // self.num_processes
class ShardSampler(Sampler): ''' Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into `[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1. The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1. ''' def __init__(self, dataset: Dataset, batch_size: int=1, drop_last: bool=False, num_processes: int=1, process_index: int=0): pass def __iter__(self): pass def __len__(self): pass
4
1
11
2
9
1
2
0.33
1
4
0
0
3
7
3
7
45
9
27
22
16
9
20
15
16
3
2
1
6
6,526
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_seq2seq.py
transformers.trainer_seq2seq.Seq2SeqTrainer
from torch.utils.data import Dataset from .trainer import Trainer from pathlib import Path import contextlib from typing import TYPE_CHECKING, Any, Callable, Optional, Union from .integrations.deepspeed import is_deepspeed_zero3_enabled from .integrations.fsdp import is_fsdp_managed_module from copy import deepcopy from .generation.configuration_utils import GenerationConfig from torch import nn import torch from torch.distributed.fsdp import FullyShardedDataParallel from .utils.deprecation import deprecate_kwarg class Seq2SeqTrainer(Trainer): @deprecate_kwarg('tokenizer', new_name='processing_class', version='5.0.0', raise_if_both_names=True) def __init__(self, model: Optional[Union['PreTrainedModel', nn.Module]]=None, args: Optional['TrainingArguments']=None, data_collator: Optional['DataCollator']=None, train_dataset: Optional[Union[Dataset, 'IterableDataset', 'datasets.Dataset']]=None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]]=None, processing_class: Optional[Union['PreTrainedTokenizerBase', 'BaseImageProcessor', 'FeatureExtractionMixin', 'ProcessorMixin']]=None, model_init: Optional[Callable[[], 'PreTrainedModel']]=None, compute_loss_func: Optional[Callable]=None, compute_metrics: Optional[Callable[['EvalPrediction'], dict]]=None, callbacks: Optional[list['TrainerCallback']]=None, optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None): super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, model_init=model_init, compute_loss_func=compute_loss_func, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if self.args.generation_config is not None: gen_config = self.load_generation_config(self.args.generation_config) self.model.generation_config = gen_config @staticmethod def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig: """ Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments. Args: gen_config_arg (`str` or [`~generation.GenerationConfig]`): `Seq2SeqTrainingArguments.generation_config` argument. Returns: A `~generation.GenerationConfig`. """ if isinstance(gen_config_arg, GenerationConfig): gen_config = deepcopy(gen_config_arg) else: pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg config_file_name = None if pretrained_model_name.is_file(): config_file_name = pretrained_model_name.name pretrained_model_name = pretrained_model_name.parent elif pretrained_model_name.is_dir(): pass else: pretrained_model_name = gen_config_arg gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name) try: gen_config.validate(strict=True) except ValueError as exc: raise ValueError(str(exc) + '\n\nFix these issues to train your model.') return gen_config def evaluate(self, eval_dataset: Optional[Dataset]=None, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='eval', **gen_kwargs) -> dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (`Dataset`, *optional*): Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` method. ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is `"eval"` (default) max_length (`int`, *optional*): The maximum target length to use when predicting with the generate method. num_beams (`int`, *optional*): Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs: Additional `generate` specific kwargs. Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ gen_kwargs = gen_kwargs.copy() if gen_kwargs.get('max_length') is None and gen_kwargs.get('max_new_tokens') is None and (self.args.generation_max_length is not None): gen_kwargs['max_length'] = self.args.generation_max_length if gen_kwargs.get('num_beams') is None and self.args.generation_num_beams is not None: gen_kwargs['num_beams'] = self.args.generation_num_beams self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) def predict(self, test_dataset: Dataset, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='test', **gen_kwargs) -> 'PredictionOutput': """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `evaluate()`. Args: test_dataset (`Dataset`): Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. Has to implement the method `__len__` ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is `"eval"` (default) max_length (`int`, *optional*): The maximum target length to use when predicting with the generate method. num_beams (`int`, *optional*): Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs: Additional `generate` specific kwargs. <Tip> If your predictions or labels have different sequence lengths (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. </Tip> Returns: *NamedTuple* A namedtuple with the following keys: - predictions (`np.ndarray`): The predictions on `test_dataset`. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). - metrics (`dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained labels). """ gen_kwargs = gen_kwargs.copy() if gen_kwargs.get('max_length') is None and gen_kwargs.get('max_new_tokens') is None and (self.args.generation_max_length is not None): gen_kwargs['max_length'] = self.args.generation_max_length if gen_kwargs.get('num_beams') is None and self.args.generation_num_beams is not None: gen_kwargs['num_beams'] = self.args.generation_num_beams self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None, **gen_kwargs) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. gen_kwargs: Additional `generate` specific kwargs. Return: tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step(model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys) has_labels = 'labels' in inputs inputs = self._prepare_inputs(inputs) if len(gen_kwargs) == 0 and hasattr(self, '_gen_kwargs'): gen_kwargs = self._gen_kwargs.copy() if 'num_beams' in gen_kwargs and gen_kwargs['num_beams'] is None: gen_kwargs.pop('num_beams') if 'max_length' in gen_kwargs and gen_kwargs['max_length'] is None: gen_kwargs.pop('max_length') default_synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self.model) gen_kwargs['synced_gpus'] = gen_kwargs.get('synced_gpus', default_synced_gpus) generation_inputs = inputs.copy() if 'labels' in generation_inputs and 'decoder_input_ids' in generation_inputs and (generation_inputs['labels'].shape == generation_inputs['decoder_input_ids'].shape): generation_inputs = {k: v for k, v in inputs.items() if k not in ('decoder_input_ids', 'decoder_attention_mask')} summon_full_params_context = FullyShardedDataParallel.summon_full_params(self.model) if isinstance(self.model, FullyShardedDataParallel) else contextlib.nullcontext() with summon_full_params_context: generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs) if self.model.generation_config._from_model_config: self.model.generation_config._from_model_config = False gen_config = self.model.generation_config if generated_tokens.shape[-1] < gen_config.max_length: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length) elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1) with torch.no_grad(): if has_labels: with self.compute_loss_context_manager(): outputs = model(**inputs) if self.label_smoother is not None: loss = self.label_smoother(outputs, inputs['labels']).detach().mean() else: loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]).detach().mean() else: loss = None if self.args.prediction_loss_only: return (loss, None, None) if has_labels: labels = inputs['labels'] if labels.shape[-1] < gen_config.max_length: labels = self._pad_tensors_to_max_len(labels, gen_config.max_length) elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1: labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1) else: labels = None return (loss, generated_tokens, labels) def _pad_tensors_to_max_len(self, tensor, max_length): if self.processing_class is not None and hasattr(self.processing_class, 'pad_token_id'): pad_token_id = self.processing_class.pad_token_id if self.processing_class.pad_token_id is not None else self.processing_class.eos_token_id elif self.model.config.pad_token_id is not None: pad_token_id = self.model.config.pad_token_id else: raise ValueError('Pad_token_id must be set in the configuration of the model, in order to pad tensors') padded_tensor = pad_token_id * torch.ones((tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device) padded_tensor[:, :tensor.shape[-1]] = tensor return padded_tensor
class Seq2SeqTrainer(Trainer): @deprecate_kwarg('tokenizer', new_name='processing_class', version='5.0.0', raise_if_both_names=True) def __init__(self, model: Optional[Union['PreTrainedModel', nn.Module]]=None, args: Optional['TrainingArguments']=None, data_collator: Optional['DataCollator']=None, train_dataset: Optional[Union[Dataset, 'IterableDataset', 'datasets.Dataset']]=None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]]=None, processing_class: Optional[Union['PreTrainedTokenizerBase', 'BaseImageProcessor', 'FeatureExtractionMixin', 'ProcessorMixin']]=None, model_init: Optional[Callable[[], 'PreTrainedModel']]=None, compute_loss_func: Optional[Callable]=None, compute_metrics: Optional[Callable[['EvalPrediction'], dict]]=None, callbacks: Optional[list['TrainerCallback']]=None, optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]]=(None, None), preprocess_logits_for_metrics: pass @staticmethod def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig: ''' Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments. Args: gen_config_arg (`str` or [`~generation.GenerationConfig]`): `Seq2SeqTrainingArguments.generation_config` argument. Returns: A `~generation.GenerationConfig`. ''' pass def evaluate(self, eval_dataset: Optional[Dataset]=None, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='eval', **gen_kwargs) -> dict[str, float]: ''' Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (`Dataset`, *optional*): Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` method. ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is `"eval"` (default) max_length (`int`, *optional*): The maximum target length to use when predicting with the generate method. num_beams (`int`, *optional*): Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs: Additional `generate` specific kwargs. Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. ''' pass def predict(self, test_dataset: Dataset, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='test', **gen_kwargs) -> 'PredictionOutput': ''' Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `evaluate()`. Args: test_dataset (`Dataset`): Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. Has to implement the method `__len__` ignore_keys (`list[str]`, *optional*): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (`str`, *optional*, defaults to `"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is `"eval"` (default) max_length (`int`, *optional*): The maximum target length to use when predicting with the generate method. num_beams (`int`, *optional*): Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs: Additional `generate` specific kwargs. <Tip> If your predictions or labels have different sequence lengths (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. </Tip> Returns: *NamedTuple* A namedtuple with the following keys: - predictions (`np.ndarray`): The predictions on `test_dataset`. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). - metrics (`dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained labels). ''' pass def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None, **gen_kwargs) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: ''' Perform an evaluation step on `model` using `inputs`. Subclass and override to inject custom behavior. Args: model (`nn.Module`): The model to evaluate. inputs (`dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument `labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (`bool`): Whether or not to return the loss only. gen_kwargs: Additional `generate` specific kwargs. Return: tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). ''' pass def _pad_tensors_to_max_len(self, tensor, max_length): pass
9
4
55
7
30
18
6
0.6
1
14
1
0
5
2
6
91
339
46
183
63
139
110
94
24
87
17
1
3
36
6,527
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.BestRun
from typing import Any, NamedTuple, Optional, Union class BestRun(NamedTuple): """ The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]). Parameters: run_id (`str`): The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending with run-{run_id}). objective (`float`): The objective that was obtained for this run. hyperparameters (`dict[str, Any]`): The hyperparameters picked to get this run. run_summary (`Optional[Any]`): A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend. """ run_id: str objective: Union[float, list[float]] hyperparameters: dict[str, Any] run_summary: Optional[Any] = None
class BestRun(NamedTuple): ''' The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]). Parameters: run_id (`str`): The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending with run-{run_id}). objective (`float`): The objective that was obtained for this run. hyperparameters (`dict[str, Any]`): The hyperparameters picked to get this run. run_summary (`Optional[Any]`): A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend. ''' pass
1
1
0
0
0
0
0
2.6
1
0
0
0
0
0
0
0
20
2
5
2
4
13
5
2
4
0
1
0
0
6,528
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.EvalLoopOutput
import numpy as np from typing import Any, NamedTuple, Optional, Union class EvalLoopOutput(NamedTuple): predictions: Union[np.ndarray, tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, tuple[np.ndarray]]] metrics: Optional[dict[str, float]] num_samples: Optional[int]
class EvalLoopOutput(NamedTuple): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
5
0
5
1
4
0
5
1
4
0
1
0
0
6,529
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.EvalPrediction
from typing import Any, NamedTuple, Optional, Union import numpy as np class EvalPrediction: """ Evaluation output (always contains labels), to be used to compute metrics. Parameters: predictions (`np.ndarray`): Predictions of the model. label_ids (`np.ndarray`): Targets to be matched. inputs (`np.ndarray`, *optional*): Input data passed to the model. losses (`np.ndarray`, *optional*): Loss values computed during evaluation. """ def __init__(self, predictions: Union[np.ndarray, tuple[np.ndarray]], label_ids: Union[np.ndarray, tuple[np.ndarray]], inputs: Optional[Union[np.ndarray, tuple[np.ndarray]]]=None, losses: Optional[Union[np.ndarray, tuple[np.ndarray]]]=None): self.predictions = predictions self.label_ids = label_ids self.inputs = inputs self.losses = losses self.elements = (self.predictions, self.label_ids) if self.inputs is not None: self.elements += (self.inputs,) if self.losses is not None: self.elements += (self.losses,) def __iter__(self): return iter(self.elements) def __getitem__(self, idx): if idx < 0 or idx >= len(self.elements): raise IndexError('tuple index out of range') return self.elements[idx]
class EvalPrediction: ''' Evaluation output (always contains labels), to be used to compute metrics. Parameters: predictions (`np.ndarray`): Predictions of the model. label_ids (`np.ndarray`): Targets to be matched. inputs (`np.ndarray`, *optional*): Input data passed to the model. losses (`np.ndarray`, *optional*): Loss values computed during evaluation. ''' def __init__(self, predictions: Union[np.ndarray, tuple[np.ndarray]], label_ids: Union[np.ndarray, tuple[np.ndarray]], inputs: Optional[Union[np.ndarray, tuple[np.ndarray]]]=None, losses: Optional[Union[np.ndarray, tuple[np.ndarray]]]=None): pass def __iter__(self): pass def __getitem__(self, idx): pass
4
1
7
0
7
0
2
0.35
0
1
0
0
3
5
3
3
35
4
23
15
13
8
17
9
13
3
0
1
6
6,530
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.EvaluationStrategy
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends class EvaluationStrategy(ExplicitEnum): NO = 'no' STEPS = 'steps' EPOCH = 'epoch'
class EvaluationStrategy(ExplicitEnum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
4
0
4
4
3
0
4
4
3
0
1
0
0
6,531
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.FSDPOption
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends class FSDPOption(ExplicitEnum): FULL_SHARD = 'full_shard' SHARD_GRAD_OP = 'shard_grad_op' NO_SHARD = 'no_shard' HYBRID_SHARD = 'hybrid_shard' HYBRID_SHARD_ZERO2 = 'hybrid_shard_zero2' OFFLOAD = 'offload' AUTO_WRAP = 'auto_wrap'
class FSDPOption(ExplicitEnum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
8
0
8
8
7
0
8
8
7
0
1
0
0
6,532
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.HPSearchBackend
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends class HPSearchBackend(ExplicitEnum): OPTUNA = 'optuna' RAY = 'ray' SIGOPT = 'sigopt' WANDB = 'wandb'
class HPSearchBackend(ExplicitEnum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
5
0
5
5
4
0
5
5
4
0
1
0
0
6,533
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.HubStrategy
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends class HubStrategy(ExplicitEnum): END = 'end' EVERY_SAVE = 'every_save' CHECKPOINT = 'checkpoint' ALL_CHECKPOINTS = 'all_checkpoints'
class HubStrategy(ExplicitEnum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
5
0
5
5
4
0
5
5
4
0
1
0
0
6,534
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.IntervalStrategy
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends class IntervalStrategy(ExplicitEnum): NO = 'no' STEPS = 'steps' EPOCH = 'epoch'
class IntervalStrategy(ExplicitEnum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
4
0
4
4
3
0
4
4
3
0
1
0
0
6,535
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.PredictionOutput
from typing import Any, NamedTuple, Optional, Union import numpy as np class PredictionOutput(NamedTuple): predictions: Union[np.ndarray, tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, tuple[np.ndarray]]] metrics: Optional[dict[str, float]]
class PredictionOutput(NamedTuple): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
4
0
4
1
3
0
4
1
3
0
1
0
0
6,536
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.RemoveColumnsCollator
from typing import Any, NamedTuple, Optional, Union class RemoveColumnsCollator: """Wrap the data collator to remove unused columns before they are passed to the collator.""" def __init__(self, data_collator, signature_columns, logger=None, model_name: Optional[str]=None, description: Optional[str]=None): self.data_collator = data_collator self.signature_columns = signature_columns self.logger = logger self.description = description self.model_name = model_name self.message_logged = False def _remove_columns(self, feature: dict) -> dict: if not isinstance(feature, dict): return feature if not self.message_logged and self.logger and self.model_name: ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) if len(ignored_columns) > 0: dset_description = '' if self.description is None else f'in the {self.description} set' self.logger.info(f"The following columns {dset_description} don't have a corresponding argument in `{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}. If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, you can safely ignore this message.") self.message_logged = True return {k: v for k, v in feature.items() if k in self.signature_columns} def __call__(self, features: list[dict]): features = [self._remove_columns(feature) for feature in features] return self.data_collator(features)
class RemoveColumnsCollator: '''Wrap the data collator to remove unused columns before they are passed to the collator.''' def __init__(self, data_collator, signature_columns, logger=None, model_name: Optional[str]=None, description: Optional[str]=None): pass def _remove_columns(self, feature: dict) -> dict: pass def __call__(self, features: list[dict]): pass
4
1
11
0
11
0
2
0.03
0
4
0
0
3
6
3
3
37
3
33
19
22
1
21
12
17
5
0
2
7
6,537
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.SaveStrategy
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends class SaveStrategy(ExplicitEnum): NO = 'no' STEPS = 'steps' EPOCH = 'epoch' BEST = 'best'
class SaveStrategy(ExplicitEnum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
5
0
5
5
4
0
5
5
4
0
1
0
0
6,538
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.SchedulerType
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends class SchedulerType(ExplicitEnum): """ Scheduler names for the parameter `lr_scheduler_type` in [`TrainingArguments`]. By default, it uses "linear". Internally, this retrieves `get_linear_schedule_with_warmup` scheduler from [`Trainer`]. Scheduler types: - "linear" = [`get_linear_schedule_with_warmup`] - "cosine" = [`get_cosine_schedule_with_warmup`] - "cosine_with_restarts" = [`get_cosine_with_hard_restarts_schedule_with_warmup`] - "polynomial" = [`get_polynomial_decay_schedule_with_warmup`] - "constant" = [`get_constant_schedule`] - "constant_with_warmup" = [`get_constant_schedule_with_warmup`] - "inverse_sqrt" = [`get_inverse_sqrt_schedule`] - "reduce_lr_on_plateau" = [`get_reduce_on_plateau_schedule`] - "cosine_with_min_lr" = [`get_cosine_with_min_lr_schedule_with_warmup`] - "cosine_warmup_with_min_lr" = [`get_cosine_with_min_lr_schedule_with_warmup_lr_rate`] - "warmup_stable_decay" = [`get_wsd_schedule`] """ LINEAR = 'linear' COSINE = 'cosine' COSINE_WITH_RESTARTS = 'cosine_with_restarts' POLYNOMIAL = 'polynomial' CONSTANT = 'constant' CONSTANT_WITH_WARMUP = 'constant_with_warmup' INVERSE_SQRT = 'inverse_sqrt' REDUCE_ON_PLATEAU = 'reduce_lr_on_plateau' COSINE_WITH_MIN_LR = 'cosine_with_min_lr' COSINE_WARMUP_WITH_MIN_LR = 'cosine_warmup_with_min_lr' WARMUP_STABLE_DECAY = 'warmup_stable_decay'
class SchedulerType(ExplicitEnum): ''' Scheduler names for the parameter `lr_scheduler_type` in [`TrainingArguments`]. By default, it uses "linear". Internally, this retrieves `get_linear_schedule_with_warmup` scheduler from [`Trainer`]. Scheduler types: - "linear" = [`get_linear_schedule_with_warmup`] - "cosine" = [`get_cosine_schedule_with_warmup`] - "cosine_with_restarts" = [`get_cosine_with_hard_restarts_schedule_with_warmup`] - "polynomial" = [`get_polynomial_decay_schedule_with_warmup`] - "constant" = [`get_constant_schedule`] - "constant_with_warmup" = [`get_constant_schedule_with_warmup`] - "inverse_sqrt" = [`get_inverse_sqrt_schedule`] - "reduce_lr_on_plateau" = [`get_reduce_on_plateau_schedule`] - "cosine_with_min_lr" = [`get_cosine_with_min_lr_schedule_with_warmup`] - "cosine_warmup_with_min_lr" = [`get_cosine_with_min_lr_schedule_with_warmup_lr_rate`] - "warmup_stable_decay" = [`get_wsd_schedule`] ''' pass
1
1
0
0
0
0
0
1.36
1
0
0
0
0
0
0
0
27
1
11
11
10
15
11
11
10
0
1
0
0
6,539
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.TrainOutput
from typing import Any, NamedTuple, Optional, Union class TrainOutput(NamedTuple): global_step: int training_loss: float metrics: dict[str, float]
class TrainOutput(NamedTuple): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
4
0
4
1
3
0
4
1
3
0
1
0
0
6,540
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/trainer_utils.py
transformers.trainer_utils.TrainerMemoryTracker
from .utils import ExplicitEnum, is_psutil_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends import threading import gc import inspect class TrainerMemoryTracker: """ A helper class that tracks cpu and gpu memory. This class will silently skip unless `psutil` is available. Install with `pip install psutil`. When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage. Example : ```python self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() # code ... metrics = {"train_runtime": 10.5} self._memory_tracker.stop_and_update_metrics(metrics) ``` To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`]. """ stages = {'__init__': 'init', 'train': 'train', '_inner_training_loop': 'train', 'evaluate': 'eval', 'predict': 'test'} def __init__(self, skip_memory_metrics=False): self.skip_memory_metrics = skip_memory_metrics if not is_psutil_available(): self.skip_memory_metrics = True if self.skip_memory_metrics: return import psutil if is_torch_cuda_available() or is_torch_mlu_available() or is_torch_musa_available(): import torch self.torch = torch self.gpu = {} elif is_torch_mps_available(): import torch self.torch = torch self.gpu = {} elif is_torch_xpu_available(): import torch self.torch = torch self.gpu = {} elif is_torch_npu_available(): import torch self.torch = torch self.gpu = {} elif is_torch_hpu_available(): import torch self.torch = torch self.gpu = {} else: self.torch = None self.process = psutil.Process() self.cur_stage = None self.cpu = {} self.init_reported = False def derive_stage(self): """derives the stage/caller name automatically""" caller = inspect.currentframe().f_back.f_back.f_code.co_name if caller in self.stages: return self.stages[caller] else: raise ValueError(f'was called from {caller}, but only expect to be called from one of {self.stages.keys()}') def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_mem_used_peak = -1 while True: self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) if not self.peak_monitoring: break def start(self): """start tracking for the caller's stage""" if self.skip_memory_metrics: return stage = self.derive_stage() if self.cur_stage is not None and self.cur_stage != stage: return self.cur_stage = stage gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.reset_peak_memory_stats() self.torch.cuda.empty_cache() elif is_torch_mlu_available(): self.torch.mlu.reset_peak_memory_stats() self.torch.mlu.empty_cache() elif is_torch_musa_available(): self.torch.musa.reset_peak_memory_stats() self.torch.musa.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.reset_peak_memory_stats() self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.reset_peak_memory_stats() self.torch.npu.empty_cache() elif is_torch_hpu_available(): self.torch.hpu.reset_peak_memory_stats() elif is_torch_mps_available(): self.torch.mps.empty_cache() if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() elif is_torch_mlu_available(): self.gpu_mem_used_at_start = self.torch.mlu.memory_allocated() elif is_torch_musa_available(): self.gpu_mem_used_at_start = self.torch.musa.memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() elif is_torch_hpu_available(): self.gpu_mem_used_at_start = self.torch.hpu.memory_allocated() elif is_torch_mps_available(): self.gpu_mem_used_at_start = self.torch.mps.current_allocated_memory() self.cpu_mem_used_at_start = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() def stop(self, stage): """stop tracking for the passed stage""" if self.cur_stage is not None and self.cur_stage != stage: return self.peak_monitoring = False gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.empty_cache() elif is_torch_mlu_available(): self.torch.mlu.empty_cache() elif is_torch_musa_available(): self.torch.musa.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.empty_cache() elif is_torch_hpu_available(): pass elif is_torch_mps_available(): self.torch.mps.empty_cache() if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_now = self.torch.cuda.memory_allocated() self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() elif is_torch_mlu_available(): self.gpu_mem_used_now = self.torch.mlu.memory_allocated() self.gpu_mem_used_peak = self.torch.mlu.max_memory_allocated() elif is_torch_musa_available(): self.gpu_mem_used_now = self.torch.musa.memory_allocated() self.gpu_mem_used_peak = self.torch.musa.max_memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_now = self.torch.xpu.memory_allocated() self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_now = self.torch.npu.memory_allocated() self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() elif is_torch_hpu_available(): self.gpu_mem_used_now = self.torch.hpu.memory_allocated() self.gpu_mem_used_peak = self.torch.hpu.max_memory_allocated() elif is_torch_mps_available(): self.gpu_mem_used_now = self.torch.mps.current_allocated_memory() self.gpu_mem_used_peak = None else: raise ValueError('No available GPU device found!') self.gpu[self.cur_stage] = {'begin': self.gpu_mem_used_at_start, 'end': self.gpu_mem_used_now, 'alloc': self.gpu_mem_used_now - self.gpu_mem_used_at_start} if self.gpu_mem_used_peak is not None: self.gpu[self.cur_stage]['peaked'] = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now) else: self.gpu[self.cur_stage]['peaked'] = 'Not available' self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = {'begin': self.cpu_mem_used_at_start, 'end': self.cpu_mem_used_now, 'alloc': self.cpu_mem_used_now - self.cpu_mem_used_at_start, 'peaked': max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now)} self.cur_stage = None def update_metrics(self, stage, metrics): """updates the metrics""" if self.skip_memory_metrics: return if self.cur_stage is not None and self.cur_stage != stage: return stages = [stage] if not self.init_reported: stages.insert(0, 'init') self.init_reported = True for stage in stages: for t in ['alloc', 'peaked']: if stage in self.cpu and t in self.cpu[stage]: metrics[f'{stage}_mem_cpu_{t}_delta'] = self.cpu[stage][t] if self.torch is not None and stage in self.gpu and (t in self.gpu[stage]): metrics[f'{stage}_mem_gpu_{t}_delta'] = self.gpu[stage][t] if stages[0] == 'init': metrics['before_init_mem_cpu'] = self.cpu['init']['begin'] if self.torch is not None: metrics['before_init_mem_gpu'] = self.gpu['init']['begin'] def stop_and_update_metrics(self, metrics=None): """combine stop and metrics update in one call for simpler code""" if self.skip_memory_metrics: return stage = self.derive_stage() self.stop(stage) if metrics is not None: self.update_metrics(stage, metrics)
null
9
7
30
5
21
4
8
0.32
0
3
0
0
8
14
8
8
286
51
179
35
165
57
135
35
121
17
0
3
60
6,541
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/training_args.py
transformers.training_args.OptimizerNames
from .utils import ACCELERATE_MIN_VERSION, ExplicitEnum, is_accelerate_available, is_apex_available, is_ipex_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, is_torch_bf16_gpu_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_tf32_available, is_torch_xla_available, is_torch_xpu_available, logging, requires_backends class OptimizerNames(ExplicitEnum): """ Stores the acceptable string identifiers for optimizers. """ ADAMW_TORCH = 'adamw_torch' ADAMW_TORCH_FUSED = 'adamw_torch_fused' ADAMW_TORCH_XLA = 'adamw_torch_xla' ADAMW_TORCH_NPU_FUSED = 'adamw_torch_npu_fused' ADAMW_APEX_FUSED = 'adamw_apex_fused' ADAFACTOR = 'adafactor' ADAMW_ANYPRECISION = 'adamw_anyprecision' ADAMW_TORCH_4BIT = 'adamw_torch_4bit' ADAMW_TORCH_8BIT = 'adamw_torch_8bit' ADEMAMIX = 'ademamix' SGD = 'sgd' ADAGRAD = 'adagrad' ADAMW_BNB = 'adamw_bnb_8bit' ADAMW_8BIT = 'adamw_8bit' ADEMAMIX_8BIT = 'ademamix_8bit' LION_8BIT = 'lion_8bit' LION = 'lion_32bit' PAGED_ADAMW = 'paged_adamw_32bit' PAGED_ADAMW_8BIT = 'paged_adamw_8bit' PAGED_ADEMAMIX = 'paged_ademamix_32bit' PAGED_ADEMAMIX_8BIT = 'paged_ademamix_8bit' PAGED_LION = 'paged_lion_32bit' PAGED_LION_8BIT = 'paged_lion_8bit' RMSPROP = 'rmsprop' RMSPROP_BNB = 'rmsprop_bnb' RMSPROP_8BIT = 'rmsprop_bnb_8bit' RMSPROP_32BIT = 'rmsprop_bnb_32bit' GALORE_ADAMW = 'galore_adamw' GALORE_ADAMW_8BIT = 'galore_adamw_8bit' GALORE_ADAFACTOR = 'galore_adafactor' GALORE_ADAMW_LAYERWISE = 'galore_adamw_layerwise' GALORE_ADAMW_8BIT_LAYERWISE = 'galore_adamw_8bit_layerwise' GALORE_ADAFACTOR_LAYERWISE = 'galore_adafactor_layerwise' LOMO = 'lomo' ADALOMO = 'adalomo' GROKADAMW = 'grokadamw' SCHEDULE_FREE_RADAM = 'schedule_free_radam' SCHEDULE_FREE_ADAMW = 'schedule_free_adamw' SCHEDULE_FREE_SGD = 'schedule_free_sgd' APOLLO_ADAMW = 'apollo_adamw' APOLLO_ADAMW_LAYERWISE = 'apollo_adamw_layerwise' STABLE_ADAMW = 'stable_adamw'
class OptimizerNames(ExplicitEnum): ''' Stores the acceptable string identifiers for optimizers. ''' pass
1
1
0
0
0
0
0
0.1
1
0
0
0
0
0
0
0
44
1
40
40
39
4
40
40
39
0
1
0
0
6,542
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/training_args.py
transformers.training_args.ParallelMode
from enum import Enum class ParallelMode(Enum): NOT_PARALLEL = 'not_parallel' NOT_DISTRIBUTED = 'not_distributed' DISTRIBUTED = 'distributed' SAGEMAKER_MODEL_PARALLEL = 'sagemaker_model_parallel' SAGEMAKER_DATA_PARALLEL = 'sagemaker_data_parallel' TPU = 'tpu'
class ParallelMode(Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
7
0
7
7
6
0
7
7
6
0
4
0
0
6,543
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/training_args.py
transformers.training_args.TrainingArguments
from functools import cached_property import warnings from .debug_utils import DebugOption from .utils import ACCELERATE_MIN_VERSION, ExplicitEnum, is_accelerate_available, is_apex_available, is_ipex_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, is_torch_bf16_gpu_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_tf32_available, is_torch_xla_available, is_torch_xpu_available, logging, requires_backends from enum import Enum import os from huggingface_hub import get_full_repo_name from datetime import timedelta import math import json from typing import Any, Optional, Union import contextlib from pathlib import Path from dataclasses import asdict, dataclass, field, fields from .trainer_utils import EvaluationStrategy, FSDPOption, HubStrategy, IntervalStrategy, SaveStrategy, SchedulerType from .utils.generic import strtobool @dataclass class TrainingArguments: """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: output_dir (`str`, *optional*, defaults to `"trainer_output"`): The output directory where the model predictions and checkpoints will be written. overwrite_output_dir (`bool`, *optional*, defaults to `False`): If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` points to a checkpoint directory. do_train (`bool`, *optional*, defaults to `False`): Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_eval (`bool`, *optional*): Whether to run evaluation on the validation set or not. Will be set to `True` if `eval_strategy` is different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_predict (`bool`, *optional*, defaults to `False`): Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. eval_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): The evaluation strategy to adopt during training. Possible values are: - `"no"`: No evaluation is done during training. - `"steps"`: Evaluation is done (and logged) every `eval_steps`. - `"epoch"`: Evaluation is done at the end of each epoch. prediction_loss_only (`bool`, *optional*, defaults to `False`): When performing evaluation and generating predictions, only returns the loss. per_device_train_batch_size (`int`, *optional*, defaults to 8): The batch size *per device*. The **global batch size** is computed as: `per_device_train_batch_size * number_of_devices` in multi-GPU or distributed setups. per_device_eval_batch_size (`int`, *optional*, defaults to 8): The batch size per device accelerator core/CPU for evaluation. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. <Tip warning={true}> When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. </Tip> eval_accumulation_steps (`int`, *optional*): Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on the device accelerator before being moved to the CPU (faster but requires more memory). eval_delay (`float`, *optional*): Number of epochs or steps to wait for before the first evaluation can be performed, depending on the eval_strategy. torch_empty_cache_steps (`int`, *optional*): Number of steps to wait before calling `torch.<device>.empty_cache()`. If left unset or set to None, cache will not be emptied. <Tip> This can help avoid CUDA out-of-memory errors by lowering peak VRAM usage at a cost of about [10% slower performance](https://github.com/huggingface/transformers/issues/31372). </Tip> learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate for [`AdamW`] optimizer. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in [`AdamW`] optimizer. adam_beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the [`AdamW`] optimizer. adam_beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the [`AdamW`] optimizer. adam_epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the [`AdamW`] optimizer. max_grad_norm (`float`, *optional*, defaults to 1.0): Maximum gradient norm (for gradient clipping). num_train_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. lr_scheduler_type (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. lr_scheduler_kwargs ('dict', *optional*, defaults to {}): The extra arguments for the lr_scheduler. See the documentation of each scheduler for possible values. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. log_level (`str`, *optional*, defaults to `passive`): Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and keeps the current log level for the Transformers library (which will be `"warning"` by default). log_level_replica (`str`, *optional*, defaults to `"warning"`): Logger log level to use on replicas. Same choices as `log_level`" log_on_each_node (`bool`, *optional*, defaults to `True`): In multinode distributed training, whether to log using `log_level` once per node, or only on the main node. logging_dir (`str`, *optional*): [TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***. logging_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The logging strategy to adopt during training. Possible values are: - `"no"`: No logging is done during training. - `"epoch"`: Logging is done at the end of each epoch. - `"steps"`: Logging is done every `logging_steps`. logging_first_step (`bool`, *optional*, defaults to `False`): Whether to log the first `global_step` or not. logging_steps (`int` or `float`, *optional*, defaults to 500): Number of update steps between two logs if `logging_strategy="steps"`. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. logging_nan_inf_filter (`bool`, *optional*, defaults to `True`): Whether to filter `nan` and `inf` losses for logging. If set to `True` the loss of every step that is `nan` or `inf` is filtered and the average loss of the current logging window is taken instead. <Tip> `logging_nan_inf_filter` only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model. </Tip> save_strategy (`str` or [`~trainer_utils.SaveStrategy`], *optional*, defaults to `"steps"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: No save is done during training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps`. - `"best"`: Save is done whenever a new `best_metric` is achieved. If `"epoch"` or `"steps"` is chosen, saving will also be performed at the very end of training, always. save_steps (`int` or `float`, *optional*, defaults to 500): Number of updates steps before two checkpoint saves if `save_strategy="steps"`. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. save_total_limit (`int`, *optional*): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. When `load_best_model_at_end` is enabled, the "best" checkpoint according to `metric_for_best_model` will always be retained in addition to the most recent ones. For example, for `save_total_limit=5` and `load_best_model_at_end`, the four last checkpoints will always be retained alongside the best model. When `save_total_limit=1` and `load_best_model_at_end`, it is possible that two checkpoints are saved: the last one and the best one (if they are different). save_safetensors (`bool`, *optional*, defaults to `True`): Use [safetensors](https://huggingface.co/docs/safetensors) saving and loading for state dicts instead of default `torch.load` and `torch.save`. save_on_each_node (`bool`, *optional*, defaults to `False`): When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one. This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node. save_only_model (`bool`, *optional*, defaults to `False`): When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state. Note that when this is true, you won't be able to resume training from checkpoint. This enables you to save storage by not storing the optimizer, scheduler & rng state. You can only load the model using `from_pretrained` with this option set to `True`. restore_callback_states_from_checkpoint (`bool`, *optional*, defaults to `False`): Whether to restore the callback states from the checkpoint. If `True`, will override callbacks passed to the `Trainer` if they exist in the checkpoint." use_cpu (`bool`, *optional*, defaults to `False`): Whether or not to use cpu. If set to False, we will use cuda or mps device if available. seed (`int`, *optional*, defaults to 42): Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the [`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters. data_seed (`int`, *optional*): Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as `seed`. This can be used to ensure reproducibility of data sampling, independent of the model seed. jit_mode_eval (`bool`, *optional*, defaults to `False`): Whether or not to use PyTorch jit trace for inference. bf16 (`bool`, *optional*, defaults to `False`): Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change. fp16 (`bool`, *optional*, defaults to `False`): Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training. fp16_opt_level (`str`, *optional*, defaults to 'O1'): For `fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on the [Apex documentation](https://nvidia.github.io/apex/amp). fp16_backend (`str`, *optional*, defaults to `"auto"`): This argument is deprecated. Use `half_precision_backend` instead. half_precision_backend (`str`, *optional*, defaults to `"auto"`): The backend to use for mixed precision training. Must be one of `"auto", "apex", "cpu_amp"`. `"auto"` will use CPU/CUDA AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend. bf16_full_eval (`bool`, *optional*, defaults to `False`): Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change. fp16_full_eval (`bool`, *optional*, defaults to `False`): Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. tf32 (`bool`, *optional*): Whether to enable the TF32 mode, available in Ampere and newer GPU architectures. The default value depends on PyTorch's version default of `torch.backends.cuda.matmul.allow_tf32`. For more details please refer to the [TF32](https://huggingface.co/docs/transformers/perf_train_gpu_one#tf32) documentation. This is an experimental API and it may change. local_rank (`int`, *optional*, defaults to -1): Rank of the process during distributed training. ddp_backend (`str`, *optional*): The backend to use for distributed training. Must be one of `"nccl"`, `"mpi"`, `"ccl"`, `"gloo"`, `"hccl"`. tpu_num_cores (`int`, *optional*): When training on TPU, the number of TPU cores (automatically passed by launcher script). dataloader_drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. eval_steps (`int` or `float`, *optional*): Number of update steps between two evaluations if `eval_strategy="steps"`. Will default to the same value as `logging_steps` if not set. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps. dataloader_num_workers (`int`, *optional*, defaults to 0): Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process. past_index (`int`, *optional*, defaults to -1): Some models like [TransformerXL](../model_doc/transformerxl) or [XLNet](../model_doc/xlnet) can make use of the past hidden states for their predictions. If this argument is set to a positive int, the `Trainer` will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument `mems`. run_name (`str`, *optional*, defaults to `output_dir`): A descriptor for the run. Typically used for [trackio](https://github.com/gradio-app/trackio), [wandb](https://www.wandb.com/), [mlflow](https://www.mlflow.org/), [comet](https://www.comet.com/site) and [swanlab](https://swanlab.cn) logging. If not specified, will be the same as `output_dir`. disable_tqdm (`bool`, *optional*): Whether or not to disable the tqdm progress bars and table of metrics produced by [`~notebook.NotebookTrainingTracker`] in Jupyter Notebooks. Will default to `True` if the logging level is set to warn or lower (default), `False` otherwise. remove_unused_columns (`bool`, *optional*, defaults to `True`): Whether or not to automatically remove the columns unused by the model forward method. label_names (`list[str]`, *optional*): The list of keys in your dictionary of inputs that correspond to the labels. Will eventually default to the list of argument names accepted by the model that contain the word "label", except if the model used is one of the `XxxForQuestionAnswering` in which case it will also include the `["start_positions", "end_positions"]` keys. You should only specify `label_names` if you're using custom label names or if your model's `forward` consumes multiple label tensors (e.g., extractive QA). load_best_model_at_end (`bool`, *optional*, defaults to `False`): Whether or not to load the best model found during training at the end of training. When this option is enabled, the best checkpoint will always be saved. See [`save_total_limit`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.save_total_limit) for more. <Tip> When set to `True`, the parameters `save_strategy` needs to be the same as `eval_strategy`, and in the case it is "steps", `save_steps` must be a round multiple of `eval_steps`. </Tip> metric_for_best_model (`str`, *optional*): Use in conjunction with `load_best_model_at_end` to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix `"eval_"`. If not specified, this will default to `"loss"` when either `load_best_model_at_end == True` or `lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU` (to use the evaluation loss). If you set this value, `greater_is_better` will default to `True` unless the name ends with "loss". Don't forget to set it to `False` if your metric is better when lower. greater_is_better (`bool`, *optional*): Use in conjunction with `load_best_model_at_end` and `metric_for_best_model` to specify if better models should have a greater metric or not. Will default to: - `True` if `metric_for_best_model` is set to a value that doesn't end in `"loss"`. - `False` if `metric_for_best_model` is not set, or set to a value that ends in `"loss"`. ignore_data_skip (`bool`, *optional*, defaults to `False`): When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to `True`, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have. fsdp (`bool`, `str` or list of [`~trainer_utils.FSDPOption`], *optional*, defaults to `''`): Use PyTorch Distributed Parallel Training (in distributed training only). A list of options along the following: - `"full_shard"`: Shard parameters, gradients and optimizer states. - `"shard_grad_op"`: Shard optimizer states and gradients. - `"hybrid_shard"`: Apply `FULL_SHARD` within a node, and replicate parameters across nodes. - `"hybrid_shard_zero2"`: Apply `SHARD_GRAD_OP` within a node, and replicate parameters across nodes. - `"offload"`: Offload parameters and gradients to CPUs (only compatible with `"full_shard"` and `"shard_grad_op"`). - `"auto_wrap"`: Automatically recursively wrap layers with FSDP using `default_auto_wrap_policy`. fsdp_config (`str` or `dict`, *optional*): Config to be used with fsdp (Pytorch Distributed Parallel Training). The value is either a location of fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`. A List of config and its options: - min_num_params (`int`, *optional*, defaults to `0`): FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `fsdp` field is passed). - transformer_layer_cls_to_wrap (`list[str]`, *optional*): List of transformer layer class names (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... (useful only when `fsdp` flag is passed). - backward_prefetch (`str`, *optional*) FSDP's backward prefetch mode. Controls when to prefetch next set of parameters (useful only when `fsdp` field is passed). A list of options along the following: - `"backward_pre"` : Prefetches the next set of parameters before the current set of parameter's gradient computation. - `"backward_post"` : This prefetches the next set of parameters after the current set of parameter’s gradient computation. - forward_prefetch (`bool`, *optional*, defaults to `False`) FSDP's forward prefetch mode (useful only when `fsdp` field is passed). If `"True"`, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. - limit_all_gathers (`bool`, *optional*, defaults to `False`) FSDP's limit_all_gathers (useful only when `fsdp` field is passed). If `"True"`, FSDP explicitly synchronizes the CPU thread to prevent too many in-flight all-gathers. - use_orig_params (`bool`, *optional*, defaults to `True`) If `"True"`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. Useful in cases such as parameter-efficient fine-tuning. Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019 - sync_module_states (`bool`, *optional*, defaults to `True`) If `"True"`, each individually wrapped FSDP unit will broadcast module parameters from rank 0 to ensure they are the same across all ranks after initialization - cpu_ram_efficient_loading (`bool`, *optional*, defaults to `False`) If `"True"`, only the first process loads the pretrained model checkpoint while all other processes have empty weights. When this setting as `"True"`, `sync_module_states` also must to be `"True"`, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. - activation_checkpointing (`bool`, *optional*, defaults to `False`): If `"True"`, activation checkpointing is a technique to reduce memory usage by clearing activations of certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time for reduced memory usage. - xla (`bool`, *optional*, defaults to `False`): Whether to use PyTorch/XLA Fully Sharded Data Parallel Training. This is an experimental feature and its API may evolve in the future. - xla_fsdp_settings (`dict`, *optional*) The value is a dictionary which stores the XLA FSDP wrapping parameters. For a complete list of options, please see [here]( https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py). - xla_fsdp_grad_ckpt (`bool`, *optional*, defaults to `False`): Will use gradient checkpointing over each nested XLA FSDP wrapped layer. This setting can only be used when the xla flag is set to true, and an auto wrapping policy is specified through fsdp_min_num_params or fsdp_transformer_layer_cls_to_wrap. deepspeed (`str` or `dict`, *optional*): Use [Deepspeed](https://github.com/deepspeedai/DeepSpeed). This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., `ds_config.json`) or an already loaded json file as a `dict`" <Tip warning={true}> If enabling any Zero-init, make sure that your model is not initialized until *after* initializing the `TrainingArguments`, else it will not be applied. </Tip> accelerator_config (`str`, `dict`, or `AcceleratorConfig`, *optional*): Config to be used with the internal `Accelerator` implementation. The value is either a location of accelerator json config file (e.g., `accelerator_config.json`), an already loaded json file as `dict`, or an instance of [`~trainer_pt_utils.AcceleratorConfig`]. A list of config and its options: - split_batches (`bool`, *optional*, defaults to `False`): Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes. - dispatch_batches (`bool`, *optional*): If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataset`, `False` otherwise. - even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. - use_seedable_sampler (`bool`, *optional*, defaults to `True`): Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures training results are fully reproducible using a different sampling technique. While seed-to-seed results may differ, on average the differences are negligible when using multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results. - use_configured_state (`bool`, *optional*, defaults to `False`): Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`. If `True`, an `Accelerator` or `PartialState` must be initialized. Note that by doing so, this could lead to issues with hyperparameter tuning. parallelism_config (`ParallelismConfig`, *optional*): Parallelism configuration for the training run. Requires Accelerate `1.10.1` label_smoothing_factor (`float`, *optional*, defaults to 0.0): The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to `label_smoothing_factor/num_labels` and `1 - label_smoothing_factor + label_smoothing_factor/num_labels` respectively. debug (`str` or list of [`~debug_utils.DebugOption`], *optional*, defaults to `""`): Enable one or more debug features. This is an experimental feature. Possible options are: - `"underflow_overflow"`: detects overflow in model's input/outputs and reports the last frames that led to the event - `"tpu_metrics_debug"`: print debug metrics on TPU The options should be separated by whitespaces. optim (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"` (for torch>=2.8 `"adamw_torch_fused"`)): The optimizer to use, such as "adamw_torch", "adamw_torch_fused", "adamw_apex_fused", "adamw_anyprecision", "adafactor". See `OptimizerNames` in [training_args.py](https://github.com/huggingface/transformers/blob/main/src/transformers/training_args.py) for a full list of optimizers. optim_args (`str`, *optional*): Optional arguments that are supplied to optimizers such as AnyPrecisionAdamW, AdEMAMix, and GaLore. group_by_length (`bool`, *optional*, defaults to `False`): Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding. length_column_name (`str`, *optional*, defaults to `"length"`): Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless `group_by_length` is `True` and the dataset is an instance of `Dataset`. report_to (`str` or `list[str]`, *optional*, defaults to `"all"`): The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, `"neptune"`, `"swanlab"`, `"tensorboard"`, `"trackio"` and `"wandb"`. Use `"all"` to report to all integrations installed, `"none"` for no integrations. ddp_find_unused_parameters (`bool`, *optional*): When using distributed training, the value of the flag `find_unused_parameters` passed to `DistributedDataParallel`. Will default to `False` if gradient checkpointing is used, `True` otherwise. ddp_bucket_cap_mb (`int`, *optional*): When using distributed training, the value of the flag `bucket_cap_mb` passed to `DistributedDataParallel`. ddp_broadcast_buffers (`bool`, *optional*): When using distributed training, the value of the flag `broadcast_buffers` passed to `DistributedDataParallel`. Will default to `False` if gradient checkpointing is used, `True` otherwise. dataloader_pin_memory (`bool`, *optional*, defaults to `True`): Whether you want to pin memory in data loaders or not. Will default to `True`. dataloader_persistent_workers (`bool`, *optional*, defaults to `False`): If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage. Will default to `False`. dataloader_prefetch_factor (`int`, *optional*): Number of batches loaded in advance by each worker. 2 means there will be a total of 2 * num_workers batches prefetched across all workers. skip_memory_metrics (`bool`, *optional*, defaults to `True`): Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push the model to the Hub every time the model is saved. If this is activated, `output_dir` will begin a git directory synced with the repo (determined by `hub_model_id`) and the content will be pushed each time a save is triggered (depending on your `save_strategy`). Calling [`~Trainer.save_model`] will also trigger a push. <Tip warning={true}> If `output_dir` exists, it needs to be a local clone of the repository to which the [`Trainer`] will be pushed. </Tip> resume_from_checkpoint (`str`, *optional*): The path to a folder with a valid checkpoint for your model. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. hub_model_id (`str`, *optional*): The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance `"user_name/model"`, which allows you to push to an organization you are a member of with `"organization_name/model"`. Will default to `user_name/output_dir_name` with *output_dir_name* being the name of `output_dir`. Will default to the name of `output_dir`. hub_strategy (`str` or [`~trainer_utils.HubStrategy`], *optional*, defaults to `"every_save"`): Defines the scope of what is pushed to the Hub and when. Possible values are: - `"end"`: push the model, its configuration, the processing class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card when the [`~Trainer.save_model`] method is called. - `"every_save"`: push the model, its configuration, the processing class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training. - `"checkpoint"`: like `"every_save"` but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `trainer.train(resume_from_checkpoint="last-checkpoint")`. - `"all_checkpoints"`: like `"checkpoint"` but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository) hub_token (`str`, *optional*): The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with `hf auth login`. hub_private_repo (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. hub_always_push (`bool`, *optional*, defaults to `False`): Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. hub_revision (`str`, *optional*): The revision to use when pushing to the Hub. Can be a branch name, a tag, or a commit hash. gradient_checkpointing (`bool`, *optional*, defaults to `False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): Key word arguments to be passed to the `gradient_checkpointing_enable` method. include_inputs_for_metrics (`bool`, *optional*, defaults to `False`): This argument is deprecated. Use `include_for_metrics` instead, e.g, `include_for_metrics = ["inputs"]`. include_for_metrics (`list[str]`, *optional*, defaults to `[]`): Include additional data in the `compute_metrics` function if needed for metrics computation. Possible options to add to `include_for_metrics` list: - `"inputs"`: Input data passed to the model, intended for calculating input dependent metrics. - `"loss"`: Loss values computed during evaluation, intended for calculating loss dependent metrics. eval_do_concat_batches (`bool`, *optional*, defaults to `True`): Whether to recursively concat inputs/losses/labels/predictions across batches. If `False`, will instead store them as lists, with each batch kept separate. auto_find_batch_size (`bool`, *optional*, defaults to `False`) Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (`pip install accelerate`) full_determinism (`bool`, *optional*, defaults to `False`) If `True`, [`enable_full_determinism`] is called instead of [`set_seed`] to ensure reproducible results in distributed training. Important: this will negatively impact the performance, so only use it for debugging. torchdynamo (`str`, *optional*): If set, the backend compiler for TorchDynamo. Possible choices are `"eager"`, `"aot_eager"`, `"inductor"`, `"nvfuser"`, `"aot_nvfuser"`, `"aot_cudagraphs"`, `"ofi"`, `"fx2trt"`, `"onnxrt"` and `"ipex"`. ray_scope (`str`, *optional*, defaults to `"last"`): The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray will then use the last checkpoint of all trials, compare those, and select the best one. However, other options are also available. See the [Ray documentation]( https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial) for more options. ddp_timeout (`int`, *optional*, defaults to 1800): The timeout for `torch.distributed.init_process_group` calls, used to avoid GPU socket timeouts when performing slow operations in distributed runnings. Please refer the [PyTorch documentation] (https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more information. use_mps_device (`bool`, *optional*, defaults to `False`): This argument is deprecated.`mps` device will be used if it is available similar to `cuda` device. torch_compile (`bool`, *optional*, defaults to `False`): Whether or not to compile the model using PyTorch 2.0 [`torch.compile`](https://pytorch.org/get-started/pytorch-2.0/). This will use the best defaults for the [`torch.compile` API](https://pytorch.org/docs/stable/generated/torch.compile.html?highlight=torch+compile#torch.compile). You can customize the defaults with the argument `torch_compile_backend` and `torch_compile_mode` but we don't guarantee any of them will work as the support is progressively rolled in in PyTorch. This flag and the whole compile API is experimental and subject to change in future releases. torch_compile_backend (`str`, *optional*): The backend to use in `torch.compile`. If set to any value, `torch_compile` will be set to `True`. Refer to the PyTorch doc for possible values and note that they may change across PyTorch versions. This flag is experimental and subject to change in future releases. torch_compile_mode (`str`, *optional*): The mode to use in `torch.compile`. If set to any value, `torch_compile` will be set to `True`. Refer to the PyTorch doc for possible values and note that they may change across PyTorch versions. This flag is experimental and subject to change in future releases. include_tokens_per_second (`bool`, *optional*): Whether or not to compute the number of tokens per second per device for training speed metrics. This will iterate over the entire training dataloader once beforehand, and will slow down the entire process. include_num_input_tokens_seen (`bool`, *optional*): Whether or not to track the number of input tokens seen throughout training. May be slower in distributed training as gather operations must be called. neftune_noise_alpha (`Optional[float]`): If not `None`, this will activate NEFTune noise embeddings. This can drastically improve model performance for instruction fine-tuning. Check out the [original paper](https://huggingface.co/papers/2310.05914) and the [original code](https://github.com/neelsjain/NEFTune). Support transformers `PreTrainedModel` and also `PeftModel` from peft. The original paper used values in the range [5.0, 15.0]. optim_target_modules (`Union[str, list[str]]`, *optional*): The target modules to optimize, i.e. the module names that you would like to train. Currently used for the GaLore algorithm (https://huggingface.co/papers/2403.03507) and APOLLO algorithm (https://huggingface.co/papers/2412.05270). See GaLore implementation (https://github.com/jiaweizzhao/GaLore) and APOLLO implementation (https://github.com/zhuhanqing/APOLLO) for more details. You need to make sure to pass a valid GaLore or APOLLO optimizer, e.g., one of: "apollo_adamw", "galore_adamw", "galore_adamw_8bit", "galore_adafactor" and make sure that the target modules are `nn.Linear` modules only. batch_eval_metrics (`Optional[bool]`, defaults to `False`): If set to `True`, evaluation will call compute_metrics at the end of each batch to accumulate statistics rather than saving all eval logits in memory. When set to `True`, you must pass a compute_metrics function that takes a boolean argument `compute_result`, which when passed `True`, will trigger the final global summary statistics from the batch-level summary statistics you've accumulated over the evaluation set. eval_on_start (`bool`, *optional*, defaults to `False`): Whether to perform a evaluation step (sanity check) before the training to ensure the validation steps works correctly. eval_use_gather_object (`bool`, *optional*, defaults to `False`): Whether to run recursively gather object in a nested list/tuple/dictionary of objects from all devices. This should only be enabled if users are not just returning tensors, and this is actively discouraged by PyTorch. use_liger_kernel (`bool`, *optional*, defaults to `False`): Whether enable [Liger](https://github.com/linkedin/Liger-Kernel) Kernel for LLM model training. It can effectively increase multi-GPU training throughput by ~20% and reduces memory usage by ~60%, works out of the box with flash attention, PyTorch FSDP, and Microsoft DeepSpeed. Currently, it supports llama, mistral, mixtral and gemma models. liger_kernel_config (`Optional[dict]`, *optional*): Configuration to be used for Liger Kernel. When use_liger_kernel=True, this dict is passed as keyword arguments to the `_apply_liger_kernel_to_instance` function, which specifies which kernels to apply. Available options vary by model but typically include: 'rope', 'swiglu', 'cross_entropy', 'fused_linear_cross_entropy', 'rms_norm', etc. If `None`, use the default kernel configurations. average_tokens_across_devices (`bool`, *optional*, defaults to `True`): Whether or not to average tokens across devices. If enabled, will use all_reduce to synchronize num_tokens_in_batch for precise loss calculation. Reference: https://github.com/huggingface/transformers/issues/34242 """ _VALID_DICT_FIELDS = ['accelerator_config', 'fsdp_config', 'deepspeed', 'gradient_checkpointing_kwargs', 'lr_scheduler_kwargs'] output_dir: Optional[str] = field(default=None, metadata={'help': "The output directory where the model predictions and checkpoints will be written. Defaults to 'trainer_output' if not provided."}) overwrite_output_dir: bool = field(default=False, metadata={'help': 'Overwrite the content of the output directory. Use this to continue training if output_dir points to a checkpoint directory.'}) do_train: bool = field(default=False, metadata={'help': 'Whether to run training.'}) do_eval: bool = field(default=False, metadata={'help': 'Whether to run eval on the dev set.'}) do_predict: bool = field(default=False, metadata={'help': 'Whether to run predictions on the test set.'}) eval_strategy: Union[IntervalStrategy, str] = field(default='no', metadata={'help': 'The evaluation strategy to use.'}) prediction_loss_only: bool = field(default=False, metadata={'help': 'When performing evaluation and predictions, only returns the loss.'}) per_device_train_batch_size: int = field(default=8, metadata={'help': 'Batch size per device accelerator core/CPU for training.'}) per_device_eval_batch_size: int = field(default=8, metadata={'help': 'Batch size per device accelerator core/CPU for evaluation.'}) per_gpu_train_batch_size: Optional[int] = field(default=None, metadata={'help': 'Deprecated, the use of `--per_device_train_batch_size` is preferred. Batch size per GPU/TPU core/CPU for training.'}) per_gpu_eval_batch_size: Optional[int] = field(default=None, metadata={'help': 'Deprecated, the use of `--per_device_eval_batch_size` is preferred. Batch size per GPU/TPU core/CPU for evaluation.'}) gradient_accumulation_steps: int = field(default=1, metadata={'help': 'Number of updates steps to accumulate before performing a backward/update pass.'}) eval_accumulation_steps: Optional[int] = field(default=None, metadata={'help': 'Number of predictions steps to accumulate before moving the tensors to the CPU.'}) eval_delay: Optional[float] = field(default=0, metadata={'help': 'Number of epochs or steps to wait for before the first evaluation can be performed, depending on the eval_strategy.'}) torch_empty_cache_steps: Optional[int] = field(default=None, metadata={'help': 'Number of steps to wait before calling `torch.<device>.empty_cache()`.This can help avoid CUDA out-of-memory errors by lowering peak VRAM usage at a cost of about [10% slower performance](https://github.com/huggingface/transformers/issues/31372).If left unset or set to None, cache will not be emptied.'}) learning_rate: float = field(default=5e-05, metadata={'help': 'The initial learning rate for AdamW.'}) weight_decay: float = field(default=0.0, metadata={'help': 'Weight decay for AdamW if we apply some.'}) adam_beta1: float = field(default=0.9, metadata={'help': 'Beta1 for AdamW optimizer'}) adam_beta2: float = field(default=0.999, metadata={'help': 'Beta2 for AdamW optimizer'}) adam_epsilon: float = field(default=1e-08, metadata={'help': 'Epsilon for AdamW optimizer.'}) max_grad_norm: float = field(default=1.0, metadata={'help': 'Max gradient norm.'}) num_train_epochs: float = field(default=3.0, metadata={'help': 'Total number of training epochs to perform.'}) max_steps: int = field(default=-1, metadata={'help': 'If > 0: set total number of training steps to perform. Override num_train_epochs.'}) lr_scheduler_type: Union[SchedulerType, str] = field(default='linear', metadata={'help': 'The scheduler type to use.'}) lr_scheduler_kwargs: Optional[Union[dict[str, Any], str]] = field(default_factory=dict, metadata={'help': "Extra parameters for the lr_scheduler such as {'num_cycles': 1} for the cosine with hard restarts."}) warmup_ratio: float = field(default=0.0, metadata={'help': 'Linear warmup over warmup_ratio fraction of total steps.'}) warmup_steps: int = field(default=0, metadata={'help': 'Linear warmup over warmup_steps.'}) log_level: str = field(default='passive', metadata={'help': "Logger log level to use on the main node. Possible choices are the log levels as strings: 'debug', 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the application set the level. Defaults to 'passive'.", 'choices': trainer_log_levels.keys()}) log_level_replica: str = field(default='warning', metadata={'help': 'Logger log level to use on replica nodes. Same choices and defaults as ``log_level``', 'choices': trainer_log_levels.keys()}) log_on_each_node: bool = field(default=True, metadata={'help': 'When doing a multinode distributed training, whether to log once per node or just once on the main node.'}) logging_dir: Optional[str] = field(default=None, metadata={'help': 'Tensorboard log dir.'}) logging_strategy: Union[IntervalStrategy, str] = field(default='steps', metadata={'help': 'The logging strategy to use.'}) logging_first_step: bool = field(default=False, metadata={'help': 'Log the first global_step'}) logging_steps: float = field(default=500, metadata={'help': 'Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.'}) logging_nan_inf_filter: bool = field(default=True, metadata={'help': 'Filter nan and inf losses for logging.'}) save_strategy: Union[SaveStrategy, str] = field(default='steps', metadata={'help': 'The checkpoint save strategy to use.'}) save_steps: float = field(default=500, metadata={'help': 'Save checkpoint every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.'}) save_total_limit: Optional[int] = field(default=None, metadata={'help': "If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. When `load_best_model_at_end` is enabled, the 'best' checkpoint according to `metric_for_best_model` will always be retained in addition to the most recent ones. For example, for `save_total_limit=5` and `load_best_model_at_end=True`, the four last checkpoints will always be retained alongside the best model. When `save_total_limit=1` and `load_best_model_at_end=True`, it is possible that two checkpoints are saved: the last one and the best one (if they are different). Default is unlimited checkpoints"}) save_safetensors: Optional[bool] = field(default=True, metadata={'help': 'Use safetensors saving and loading for state dicts instead of default torch.load and torch.save.'}) save_on_each_node: bool = field(default=False, metadata={'help': 'When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one'}) save_only_model: bool = field(default=False, metadata={'help': "When checkpointing, whether to only save the model, or also the optimizer, scheduler & rng state.Note that when this is true, you won't be able to resume training from checkpoint.This enables you to save storage by not storing the optimizer, scheduler & rng state.You can only load the model using from_pretrained with this option set to True."}) restore_callback_states_from_checkpoint: bool = field(default=False, metadata={'help': 'Whether to restore the callback states from the checkpoint. If `True`, will override callbacks passed to the `Trainer` if they exist in the checkpoint.'}) no_cuda: bool = field(default=False, metadata={'help': 'This argument is deprecated. It will be removed in version 5.0 of 🤗 Transformers.'}) use_cpu: bool = field(default=False, metadata={'help': 'Whether or not to use cpu. If left to False, we will use the available torch device/backend (cuda/mps/xpu/hpu etc.)'}) use_mps_device: bool = field(default=False, metadata={'help': 'This argument is deprecated. `mps` device will be used if available similar to `cuda` device. It will be removed in version 5.0 of 🤗 Transformers'}) seed: int = field(default=42, metadata={'help': 'Random seed that will be set at the beginning of training.'}) data_seed: Optional[int] = field(default=None, metadata={'help': 'Random seed to be used with data samplers.'}) jit_mode_eval: bool = field(default=False, metadata={'help': 'Whether or not to use PyTorch jit trace for inference'}) bf16: bool = field(default=False, metadata={'help': 'Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA architecture or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change.'}) fp16: bool = field(default=False, metadata={'help': 'Whether to use fp16 (mixed) precision instead of 32-bit'}) fp16_opt_level: str = field(default='O1', metadata={'help': "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details at https://nvidia.github.io/apex/amp.html"}) half_precision_backend: str = field(default='auto', metadata={'help': 'The backend to be used for half precision.', 'choices': ['auto', 'apex', 'cpu_amp']}) bf16_full_eval: bool = field(default=False, metadata={'help': 'Whether to use full bfloat16 evaluation instead of 32-bit. This is an experimental API and it may change.'}) fp16_full_eval: bool = field(default=False, metadata={'help': 'Whether to use full float16 evaluation instead of 32-bit'}) tf32: Optional[bool] = field(default=None, metadata={'help': 'Whether to enable tf32 mode, available in Ampere and newer GPU architectures. This is an experimental API and it may change.'}) local_rank: int = field(default=-1, metadata={'help': 'For distributed training: local_rank'}) ddp_backend: Optional[str] = field(default=None, metadata={'help': 'The backend to be used for distributed training', 'choices': ['nccl', 'gloo', 'mpi', 'ccl', 'hccl', 'cncl', 'mccl']}) tpu_num_cores: Optional[int] = field(default=None, metadata={'help': 'TPU: Number of TPU cores (automatically passed by launcher script)'}) tpu_metrics_debug: bool = field(default=False, metadata={'help': 'Deprecated, the use of `--debug tpu_metrics_debug` is preferred. TPU: Whether to print debug metrics'}) debug: Union[str, list[DebugOption]] = field(default='', metadata={'help': 'Whether or not to enable debug mode. Current options: `underflow_overflow` (Detect underflow and overflow in activations and weights), `tpu_metrics_debug` (print debug metrics on TPU).'}) dataloader_drop_last: bool = field(default=False, metadata={'help': 'Drop the last incomplete batch if it is not divisible by the batch size.'}) eval_steps: Optional[float] = field(default=None, metadata={'help': 'Run an evaluation every X steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.'}) dataloader_num_workers: int = field(default=0, metadata={'help': 'Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.'}) dataloader_prefetch_factor: Optional[int] = field(default=None, metadata={'help': 'Number of batches loaded in advance by each worker. 2 means there will be a total of 2 * num_workers batches prefetched across all workers. '}) past_index: int = field(default=-1, metadata={'help': 'If >=0, uses the corresponding part of the output as the past state for next step.'}) run_name: Optional[str] = field(default=None, metadata={'help': 'An optional descriptor for the run. Notably used for trackio, wandb, mlflow comet and swanlab logging.'}) disable_tqdm: Optional[bool] = field(default=None, metadata={'help': 'Whether or not to disable the tqdm progress bars.'}) remove_unused_columns: Optional[bool] = field(default=True, metadata={'help': 'Remove columns not required by the model when using an nlp.Dataset.'}) label_names: Optional[list[str]] = field(default=None, metadata={'help': 'The list of keys in your dictionary of inputs that correspond to the labels.'}) load_best_model_at_end: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to load the best model found during training at the end of training. When this option is enabled, the best checkpoint will always be saved. See `save_total_limit` for more.'}) metric_for_best_model: Optional[str] = field(default=None, metadata={'help': 'The metric to use to compare two different models.'}) greater_is_better: Optional[bool] = field(default=None, metadata={'help': 'Whether the `metric_for_best_model` should be maximized or not.'}) ignore_data_skip: bool = field(default=False, metadata={'help': 'When resuming training, whether or not to skip the first epochs and batches to get to the same training data.'}) fsdp: Optional[Union[list[FSDPOption], str]] = field(default='', metadata={'help': 'Whether or not to use PyTorch Fully Sharded Data Parallel (FSDP) training (in distributed training only). The base option should be `full_shard`, `shard_grad_op` or `no_shard` and you can add CPU-offload to `full_shard` or `shard_grad_op` like this: full_shard offload` or `shard_grad_op offload`. You can add auto-wrap to `full_shard` or `shard_grad_op` with the same syntax: full_shard auto_wrap` or `shard_grad_op auto_wrap`.'}) fsdp_min_num_params: int = field(default=0, metadata={'help': "This parameter is deprecated. FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `fsdp` field is passed)."}) fsdp_config: Optional[Union[dict[str, Any], str]] = field(default=None, metadata={'help': 'Config to be used with FSDP (Pytorch Fully Sharded Data Parallel). The value is either a fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`.'}) fsdp_transformer_layer_cls_to_wrap: Optional[str] = field(default=None, metadata={'help': 'This parameter is deprecated. Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... (useful only when `fsdp` flag is passed).'}) accelerator_config: Optional[Union[dict, str]] = field(default=None, metadata={'help': 'Config to be used with the internal Accelerator object initialization. The value is either a accelerator json config file (e.g., `accelerator_config.json`) or an already loaded json file as `dict`.'}) parallelism_config: Optional[ParallelismConfig] = field(default=None, metadata={'help': 'Parallelism configuration for the training run. Requires Accelerate `1.10.1`'}) deepspeed: Optional[Union[dict, str]] = field(default=None, metadata={'help': 'Enable deepspeed and pass the path to deepspeed json config file (e.g. `ds_config.json`) or an already loaded json file as a dict'}) label_smoothing_factor: float = field(default=0.0, metadata={'help': 'The label smoothing epsilon to apply (zero means no label smoothing).'}) default_optim = 'adamw_torch' if is_torch_available(): from .pytorch_utils import is_torch_greater_or_equal_than_2_8 if is_torch_greater_or_equal_than_2_8: default_optim = 'adamw_torch_fused' optim: Union[OptimizerNames, str] = field(default=default_optim, metadata={'help': 'The optimizer to use.'}) optim_args: Optional[str] = field(default=None, metadata={'help': 'Optional arguments to supply to optimizer.'}) adafactor: bool = field(default=False, metadata={'help': 'Whether or not to replace AdamW by Adafactor.'}) group_by_length: bool = field(default=False, metadata={'help': 'Whether or not to group samples of roughly the same length together when batching.'}) length_column_name: Optional[str] = field(default='length', metadata={'help': 'Column name with precomputed lengths to use when grouping by length.'}) report_to: Union[None, str, list[str]] = field(default=None, metadata={'help': 'The list of integrations to report the results and logs to.'}) ddp_find_unused_parameters: Optional[bool] = field(default=None, metadata={'help': 'When using distributed training, the value of the flag `find_unused_parameters` passed to `DistributedDataParallel`.'}) ddp_bucket_cap_mb: Optional[int] = field(default=None, metadata={'help': 'When using distributed training, the value of the flag `bucket_cap_mb` passed to `DistributedDataParallel`.'}) ddp_broadcast_buffers: Optional[bool] = field(default=None, metadata={'help': 'When using distributed training, the value of the flag `broadcast_buffers` passed to `DistributedDataParallel`.'}) dataloader_pin_memory: bool = field(default=True, metadata={'help': 'Whether or not to pin memory for DataLoader.'}) dataloader_persistent_workers: bool = field(default=False, metadata={'help': 'If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage.'}) skip_memory_metrics: bool = field(default=True, metadata={'help': 'Whether or not to skip adding of memory profiler reports to metrics.'}) use_legacy_prediction_loop: bool = field(default=False, metadata={'help': 'Whether or not to use the legacy prediction_loop in the Trainer.'}) push_to_hub: bool = field(default=False, metadata={'help': 'Whether or not to upload the trained model to the model hub after training.'}) resume_from_checkpoint: Optional[str] = field(default=None, metadata={'help': 'The path to a folder with a valid checkpoint for your model.'}) hub_model_id: Optional[str] = field(default=None, metadata={'help': 'The name of the repository to keep in sync with the local `output_dir`.'}) hub_strategy: Union[HubStrategy, str] = field(default='every_save', metadata={'help': 'The hub strategy to use when `--push_to_hub` is activated.'}) hub_token: Optional[str] = field(default=None, metadata={'help': 'The token to use to push to the Model Hub.'}) hub_private_repo: Optional[bool] = field(default=None, metadata={'help': "Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists."}) hub_always_push: bool = field(default=False, metadata={'help': "Unless `True`, the Trainer will skip pushes if the previous one wasn't finished yet."}) hub_revision: Optional[str] = field(default=None, metadata={'help': 'The revision to use when pushing to the Hub. Can be a branch name, a tag, or a commit hash.'}) gradient_checkpointing: bool = field(default=False, metadata={'help': 'If True, use gradient checkpointing to save memory at the expense of slower backward pass.'}) gradient_checkpointing_kwargs: Optional[Union[dict[str, Any], str]] = field(default=None, metadata={'help': 'Gradient checkpointing key word arguments such as `use_reentrant`. Will be passed to `torch.utils.checkpoint.checkpoint` through `model.gradient_checkpointing_enable`.'}) include_inputs_for_metrics: bool = field(default=False, metadata={'help': 'This argument is deprecated and will be removed in version 5 of 🤗 Transformers. Use `include_for_metrics` instead.'}) include_for_metrics: list[str] = field(default_factory=list, metadata={'help': "List of strings to specify additional data to include in the `compute_metrics` function.Options: 'inputs', 'loss'."}) eval_do_concat_batches: bool = field(default=True, metadata={'help': 'Whether to recursively concat inputs/losses/labels/predictions across batches. If `False`, will instead store them as lists, with each batch kept separate.'}) fp16_backend: str = field(default='auto', metadata={'help': 'Deprecated. Use half_precision_backend instead', 'choices': ['auto', 'apex', 'cpu_amp']}) push_to_hub_model_id: Optional[str] = field(default=None, metadata={'help': 'The name of the repository to which push the `Trainer`.'}) push_to_hub_organization: Optional[str] = field(default=None, metadata={'help': 'The name of the organization in with to which push the `Trainer`.'}) push_to_hub_token: Optional[str] = field(default=None, metadata={'help': 'The token to use to push to the Model Hub.'}) _n_gpu: int = field(init=False, repr=False, default=-1) mp_parameters: str = field(default='', metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in Trainer'}) auto_find_batch_size: bool = field(default=False, metadata={'help': 'Whether to automatically decrease the batch size in half and rerun the training loop again each time a CUDA Out-of-Memory was reached'}) full_determinism: bool = field(default=False, metadata={'help': 'Whether to call enable_full_determinism instead of set_seed for reproducibility in distributed training. Important: this will negatively impact the performance, so only use it for debugging.'}) torchdynamo: Optional[str] = field(default=None, metadata={'help': 'This argument is deprecated, use `--torch_compile_backend` instead.'}) ray_scope: Optional[str] = field(default='last', metadata={'help': 'The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray will then use the last checkpoint of all trials, compare those, and select the best one. However, other options are also available. See the Ray documentation (https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial) for more options.'}) ddp_timeout: int = field(default=1800, metadata={'help': 'Overrides the default timeout for distributed training (value should be given in seconds).'}) torch_compile: bool = field(default=False, metadata={'help': 'If set to `True`, the model will be wrapped in `torch.compile`.'}) torch_compile_backend: Optional[str] = field(default=None, metadata={'help': 'Which backend to use with `torch.compile`, passing one will trigger a model compilation.'}) torch_compile_mode: Optional[str] = field(default=None, metadata={'help': 'Which mode to use with `torch.compile`, passing one will trigger a model compilation.'}) include_tokens_per_second: Optional[bool] = field(default=False, metadata={'help': 'If set to `True`, the speed metrics will include `tgs` (tokens per second per device).'}) include_num_input_tokens_seen: Optional[Union[str, bool]] = field(default=False, metadata={'help': "Whether to track the number of input tokens seen. Can be `'all'` to count all tokens, `'non_padding'` to count only non-padding tokens, or a boolean (`True` maps to `'all'`, `False` to `'no'`)."}) neftune_noise_alpha: Optional[float] = field(default=None, metadata={'help': 'Activates neftune noise embeddings into the model. NEFTune has been proven to drastically improve model performances for instruction fine-tuning. Check out the original paper here: https://huggingface.co/papers/2310.05914 and the original code here: https://github.com/neelsjain/NEFTune. Only supported for `PreTrainedModel` and `PeftModel` classes.'}) optim_target_modules: Union[None, str, list[str]] = field(default=None, metadata={'help': 'Target modules for the optimizer defined in the `optim` argument. Only used for the GaLore optimizer at the moment.'}) batch_eval_metrics: bool = field(default=False, metadata={'help': 'Break eval metrics calculation into batches to save memory.'}) eval_on_start: bool = field(default=False, metadata={'help': 'Whether to run through the entire `evaluation` step at the very beginning of training as a sanity check.'}) use_liger_kernel: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to enable the Liger Kernel for model training.'}) liger_kernel_config: Optional[dict[str, bool]] = field(default=None, metadata={'help': "Configuration to be used for Liger Kernel. When use_liger_kernel=True, this dict is passed as keyword arguments to the `_apply_liger_kernel_to_instance` function, which specifies which kernels to apply. Available options vary by model but typically include: 'rope', 'swiglu', 'cross_entropy', 'fused_linear_cross_entropy', 'rms_norm', etc. If None, use the default kernel configurations."}) eval_use_gather_object: Optional[bool] = field(default=False, metadata={'help': 'Whether to run recursively gather object in a nested list/tuple/dictionary of objects from all devices.'}) average_tokens_across_devices: Optional[bool] = field(default=True, metadata={'help': 'Whether or not to average tokens across devices. If enabled, will use all_reduce to synchronize num_tokens_in_batch for precise loss calculation. Reference: https://github.com/huggingface/transformers/issues/34242'}) def __post_init__(self): if self.output_dir is None: self.output_dir = 'trainer_output' logger.info("No output directory specified, defaulting to 'trainer_output'. To change this behavior, specify --output_dir when creating TrainingArguments.") for field in self._VALID_DICT_FIELDS: passed_value = getattr(self, field) if isinstance(passed_value, str) and passed_value.startswith('{'): loaded_dict = json.loads(passed_value) loaded_dict = _convert_str_dict(loaded_dict) setattr(self, field, loaded_dict) if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) if self.logging_dir is None and self.output_dir is not None: self.logging_dir = os.path.join(self.output_dir, default_logdir()) if self.logging_dir is not None: self.logging_dir = os.path.expanduser(self.logging_dir) if self.disable_tqdm is None: self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN if isinstance(self.eval_strategy, EvaluationStrategy): warnings.warn('using `EvaluationStrategy` for `eval_strategy` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `IntervalStrategy` instead', FutureWarning) self.eval_strategy = self.eval_strategy.value if self.no_cuda: warnings.warn('using `no_cuda` is deprecated and will be removed in version 5.0 of 🤗 Transformers. Use `use_cpu` instead', FutureWarning) self.use_cpu = self.no_cuda self.eval_strategy = IntervalStrategy(self.eval_strategy) self.logging_strategy = IntervalStrategy(self.logging_strategy) self.save_strategy = SaveStrategy(self.save_strategy) self.hub_strategy = HubStrategy(self.hub_strategy) self.lr_scheduler_type = SchedulerType(self.lr_scheduler_type) if self.do_eval is False and self.eval_strategy != IntervalStrategy.NO: self.do_eval = True if self.torch_empty_cache_steps is not None: if not (isinstance(self.torch_empty_cache_steps, int) and self.torch_empty_cache_steps > 0): raise ValueError(f'`torch_empty_cache_steps` must be an integer bigger than 0, got {self.torch_empty_cache_steps}.') if self.eval_strategy == IntervalStrategy.STEPS and (self.eval_steps is None or self.eval_steps == 0): if self.logging_steps > 0: logger.info(f'using `logging_steps` to initialize `eval_steps` to {self.logging_steps}') self.eval_steps = self.logging_steps else: raise ValueError(f'evaluation strategy {self.eval_strategy} requires either non-zero --eval_steps or --logging_steps') if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps == 0: raise ValueError(f'logging strategy {self.logging_strategy} requires non-zero --logging_steps') if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps > 1: if self.logging_steps != int(self.logging_steps): raise ValueError(f'--logging_steps must be an integer if bigger than 1: {self.logging_steps}') self.logging_steps = int(self.logging_steps) if self.eval_strategy == IntervalStrategy.STEPS and self.eval_steps > 1: if self.eval_steps != int(self.eval_steps): raise ValueError(f'--eval_steps must be an integer if bigger than 1: {self.eval_steps}') self.eval_steps = int(self.eval_steps) if self.save_strategy == SaveStrategy.STEPS and self.save_steps > 1: if self.save_steps != int(self.save_steps): raise ValueError(f'--save_steps must be an integer if bigger than 1: {self.save_steps}') self.save_steps = int(self.save_steps) if self.load_best_model_at_end and self.save_strategy != SaveStrategy.BEST: if self.eval_strategy != self.save_strategy: raise ValueError(f'--load_best_model_at_end requires the save and eval strategy to match, but found\n- Evaluation strategy: {self.eval_strategy}\n- Save strategy: {self.save_strategy}') if self.eval_strategy == IntervalStrategy.STEPS and self.save_steps % self.eval_steps != 0: if self.eval_steps < 1 or self.save_steps < 1: if not (self.eval_steps < 1 and self.save_steps < 1): raise ValueError(f'--load_best_model_at_end requires the saving steps to be a multiple of the evaluation steps, which cannot get guaranteed when mixing ratio and absolute steps for save_steps {self.save_steps} and eval_steps {self.eval_steps}.') LARGE_MULTIPLIER = 1000000 if self.save_steps * LARGE_MULTIPLIER % (self.eval_steps * LARGE_MULTIPLIER) != 0: raise ValueError(f'--load_best_model_at_end requires the saving steps to be a multiple of the evaluation steps, but found {self.save_steps}, which is not a multiple of {self.eval_steps}.') else: raise ValueError(f'--load_best_model_at_end requires the saving steps to be a round multiple of the evaluation steps, but found {self.save_steps}, which is not a round multiple of {self.eval_steps}.') safetensors_available = is_safetensors_available() if self.save_safetensors and (not safetensors_available): raise ValueError(f'--save_safetensors={self.save_safetensors} requires safetensors to be installed!') if not self.save_safetensors and safetensors_available: logger.info(f'Found safetensors installation, but --save_safetensors={self.save_safetensors}. Safetensors should be a preferred weights saving format due to security and performance reasons. If your model cannot be saved by safetensors please feel free to open an issue at https://github.com/huggingface/safetensors!') if (self.load_best_model_at_end or self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU) and self.metric_for_best_model is None: self.metric_for_best_model = 'loss' if self.greater_is_better is None and self.metric_for_best_model is not None: self.greater_is_better = not self.metric_for_best_model.endswith('loss') if is_torch_available(): if self.fp16_backend and self.fp16_backend != 'auto': warnings.warn('`fp16_backend` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `half_precision_backend` instead', FutureWarning) self.half_precision_backend = self.fp16_backend if self.bf16 or self.bf16_full_eval: if self.use_cpu and (not is_torch_available()) and (not is_torch_xla_available()): raise ValueError("Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10") elif not self.use_cpu: if not is_torch_bf16_gpu_available() and (not is_torch_xla_available()): error_message = "Your setup doesn't support bf16/gpu." if is_torch_cuda_available(): error_message += ' You need Ampere+ GPU with cuda>=11.0' raise ValueError(error_message) if self.fp16 and self.bf16: raise ValueError('At most one of fp16 and bf16 can be True, but not both') if self.fp16_full_eval and self.bf16_full_eval: raise ValueError('At most one of fp16 and bf16 can be True for full eval, but not both') if self.bf16: if self.half_precision_backend == 'apex': raise ValueError(' `--half_precision_backend apex`: GPU bf16 is not supported by apex.') if self.half_precision_backend == 'apex': if not is_apex_available(): raise ImportError('Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.') try: from apex import amp except ImportError as e: raise ImportError(f"apex.amp is deprecated in the latest version of apex, causing this error {e}. Either revert to an older version or use pytorch amp by setting half_precision_backend='auto' instead. See https://github.com/NVIDIA/apex/pull/1896 ") if self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU: if self.eval_strategy == IntervalStrategy.NO: raise ValueError('lr_scheduler_type reduce_lr_on_plateau requires an eval strategy') if not is_torch_available(): raise ValueError('lr_scheduler_type reduce_lr_on_plateau requires torch>=0.2.0') self.optim = OptimizerNames(self.optim) if self.adafactor: warnings.warn('`--adafactor` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--optim adafactor` instead', FutureWarning) self.optim = OptimizerNames.ADAFACTOR if is_accelerate_available(): if not isinstance(self.accelerator_config, AcceleratorConfig): if self.accelerator_config is None: self.accelerator_config = AcceleratorConfig() elif isinstance(self.accelerator_config, dict): self.accelerator_config = AcceleratorConfig(**self.accelerator_config) elif isinstance(self.accelerator_config, type): raise NotImplementedError('Tried passing in a callable to `accelerator_config`, but this is not supported. Please pass in a fully constructed `AcceleratorConfig` object instead.') else: self.accelerator_config = AcceleratorConfig.from_json_file(self.accelerator_config) if self.accelerator_config.split_batches: logger.info('Using `split_batches=True` in `accelerator_config` will override the `per_device_train_batch_size` Batches will be split across all processes equally when using `split_batches=True`.') if is_torch_available(): self.device if self.torchdynamo is not None: warnings.warn('`torchdynamo` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `torch_compile_backend` instead', FutureWarning) self.torch_compile_backend = self.torchdynamo if (self.torch_compile_mode is not None or self.torch_compile_backend is not None) and (not self.torch_compile): self.torch_compile = True if self.torch_compile and self.torch_compile_backend is None: if not self.use_cpu and is_torch_hpu_available(): self.torch_compile_backend = 'hpu_backend' else: self.torch_compile_backend = 'inductor' if self.torch_compile: prefix = 'ACCELERATE_DYNAMO_' os.environ[prefix + 'BACKEND'] = self.torch_compile_backend if self.torch_compile_mode is not None: os.environ[prefix + 'MODE'] = self.torch_compile_mode if is_torch_available() and self.torch_compile: if is_torch_tf32_available(): if self.tf32 is None and (not self.fp16) or self.bf16: device_str = 'MUSA' if is_torch_musa_available() else 'CUDA' logger.info(f"Setting TF32 in {device_str} backends to speedup torch compile, you won't see any improvement otherwise.") if is_torch_musa_available(): torch.backends.mudnn.allow_tf32 = True else: torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True else: logger.warning('The speedups for torchdynamo mostly come with GPU Ampere or higher and which is not detected here.') if is_torch_available() and self.tf32 is not None: if self.tf32: if is_torch_tf32_available(): if is_torch_musa_available(): torch.backends.mudnn.allow_tf32 = True else: torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True else: raise ValueError('--tf32 requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7') elif is_torch_tf32_available(): if is_torch_musa_available(): torch.backends.mudnn.allow_tf32 = False else: torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False if self.report_to is None: logger.info('The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).') self.report_to = 'all' if self.report_to == 'all' or self.report_to == ['all']: from .integrations import get_available_reporting_integrations self.report_to = get_available_reporting_integrations() if 'codecarbon' in self.report_to and torch.version.hip: logger.warning('When using the Trainer, CodeCarbonCallback requires the `codecarbon` package, which is not compatible with AMD ROCm (https://github.com/mlco2/codecarbon/pull/490). Automatically disabling the codecarbon callback. Reference: https://huggingface.co/docs/transformers/v4.39.3/en/main_classes/trainer#transformers.TrainingArguments.report_to.') self.report_to.remove('codecarbon') elif self.report_to == 'none' or self.report_to == ['none']: self.report_to = [] elif not isinstance(self.report_to, list): self.report_to = [self.report_to] if self.warmup_ratio < 0 or self.warmup_ratio > 1: raise ValueError('warmup_ratio must lie in range [0,1]') elif self.warmup_ratio > 0 and self.warmup_steps > 0: logger.info('Both warmup_ratio and warmup_steps given, warmup_steps will override any effect of warmup_ratio during training') if not isinstance(self.warmup_steps, int) or self.warmup_steps < 0: raise ValueError('warmup_steps must be of type int and must be 0 or a positive integer.') if isinstance(self.fsdp, bool): self.fsdp = [FSDPOption.FULL_SHARD] if self.fsdp else '' if isinstance(self.fsdp, str): self.fsdp = [FSDPOption(s) for s in self.fsdp.split()] if self.fsdp == [FSDPOption.OFFLOAD]: raise ValueError('`--fsdp offload` can\'t work on its own. It needs to be added to `--fsdp full_shard` or `--fsdp shard_grad_op`. For example, `--fsdp "full_shard offload"`.') elif FSDPOption.FULL_SHARD in self.fsdp and FSDPOption.SHARD_GRAD_OP in self.fsdp: raise ValueError('`--fsdp full_shard` is not compatible with `--fsdp shard_grad_op`.') if self.gradient_checkpointing and (FSDPOption.FULL_SHARD in self.fsdp or FSDPOption.HYBRID_SHARD in self.fsdp): logger.warning('When using FSDP full shard, instead of using `gradient_checkpointing` in TrainingArguments, please use `activation_checkpointing` in `fsdp_config`. The former introduces a redundant AllGather operation in backward pass. Reference: https://github.com/huggingface/transformers/issues/30404') if self.fsdp_config is None: self.fsdp_config = {} if isinstance(self.fsdp_config, str): if len(self.fsdp) == 0: warnings.warn('`--fsdp_config` is useful only when `--fsdp` is specified.') with open(self.fsdp_config, encoding='utf-8') as f: self.fsdp_config = json.load(f) if self.fsdp_config is not None and isinstance(self.fsdp_config, dict): for k in list(self.fsdp_config.keys()): if k.startswith('fsdp_'): v = self.fsdp_config.pop(k) self.fsdp_config[k[5:]] = v if self.fsdp_min_num_params > 0: warnings.warn('using `--fsdp_min_num_params` is deprecated. Use fsdp_config instead ', FutureWarning) self.fsdp_config['min_num_params'] = max(self.fsdp_config.get('min_num_params', 0), self.fsdp_min_num_params) if isinstance(self.fsdp_config.get('transformer_layer_cls_to_wrap', None), str): self.fsdp_config['transformer_layer_cls_to_wrap'] = [self.fsdp_config['transformer_layer_cls_to_wrap']] if self.fsdp_transformer_layer_cls_to_wrap is not None: warnings.warn('using `--fsdp_transformer_layer_cls_to_wrap` is deprecated. Use fsdp_config instead ', FutureWarning) self.fsdp_config['transformer_layer_cls_to_wrap'] = self.fsdp_config.get('transformer_layer_cls_to_wrap', []) + [self.fsdp_transformer_layer_cls_to_wrap] if len(self.fsdp) == 0 and self.fsdp_config['min_num_params'] > 0: warnings.warn('`min_num_params` is useful only when `--fsdp` is specified.') if len(self.fsdp) == 0 and self.fsdp_config.get('transformer_layer_cls_to_wrap', None) is not None: warnings.warn('`transformer_layer_cls_to_wrap` is useful only when `--fsdp` is specified.') if len(self.fsdp) > 0 and self.fsdp_config['min_num_params'] > 0 and (self.fsdp_config.get('transformer_layer_cls_to_wrap', None) is not None): raise ValueError('`min_num_params` and `transformer_layer_cls_to_wrap` are mutually exclusive.') self.fsdp_config['xla'] = self.fsdp_config.get('xla', False) self.fsdp_config['xla_fsdp_v2'] = self.fsdp_config.get('xla_fsdp_v2', False) self.fsdp_config['xla_fsdp_grad_ckpt'] = self.fsdp_config.get('xla_fsdp_grad_ckpt', False) if self.fsdp_config['xla']: if len(self.fsdp) > 0: self.xla_fsdp_config = self.fsdp_config.get('xla_fsdp_settings', {}).copy() if 'compute_dtype' in self.xla_fsdp_config: self.xla_fsdp_config['compute_dtype'] = getattr(torch, self.xla_fsdp_config['compute_dtype']) if 'buffer_dtype' in self.xla_fsdp_config: self.xla_fsdp_config['buffer_dtype'] = getattr(torch, self.xla_fsdp_config['buffer_dtype']) else: warnings.warn('XLA FSDP can be used only when `--fsdp` is specified.') elif self.fsdp_config['xla_fsdp_grad_ckpt']: warnings.warn('`--xla_fsdp_grad_ckpt` is useful only when `--xla` is set to true.') if len(self.fsdp) > 0 and (not self.fsdp_config['xla']): os.environ['ACCELERATE_USE_FSDP'] = 'true' from accelerate.utils.constants import FSDP_AUTO_WRAP_POLICY, FSDP_SHARDING_STRATEGY prefix = 'FSDP_' for fsdp_option in self.fsdp: if fsdp_option.upper() in FSDP_SHARDING_STRATEGY: os.environ[f'{prefix}SHARDING_STRATEGY'] = str(FSDP_SHARDING_STRATEGY.index(fsdp_option.upper()) + 1) elif fsdp_option == FSDPOption.OFFLOAD: os.environ[f'{prefix}OFFLOAD_PARAMS'] = 'true' elif fsdp_option == FSDPOption.AUTO_WRAP: os.environ[f'{prefix}AUTO_WRAP_POLICY'] = FSDP_AUTO_WRAP_POLICY[0] if self.fsdp_config['min_num_params'] > 0: os.environ[f'{prefix}MIN_NUM_PARAMS'] = str(self.fsdp_config['min_num_params']) os.environ[f'{prefix}AUTO_WRAP_POLICY'] = FSDP_AUTO_WRAP_POLICY[1] elif self.fsdp_config.get('transformer_layer_cls_to_wrap', None) is not None: os.environ[f'{prefix}TRANSFORMER_CLS_TO_WRAP'] = ','.join(self.fsdp_config['transformer_layer_cls_to_wrap']) prefetch_policy = self.fsdp_config.get('backward_prefetch', 'NO_PREFETCH') os.environ[f'{prefix}BACKWARD_PREFETCH'] = prefetch_policy.upper() os.environ[f'{prefix}FORWARD_PREFETCH'] = str(self.fsdp_config.get('forward_prefetch', 'false')).lower() sync_module_states = str(self.fsdp_config.get('sync_module_states', 'true')).lower() cpu_ram_efficient_loading = str(self.fsdp_config.get('cpu_ram_efficient_loading', 'false')).lower() if sync_module_states == 'false' and cpu_ram_efficient_loading == 'true': raise ValueError('`sync_module_states` must be `"True"` if `cpu_ram_efficient_loading` is `"True"`') os.environ[f'{prefix}SYNC_MODULE_STATES'] = sync_module_states os.environ[f'{prefix}CPU_RAM_EFFICIENT_LOADING'] = cpu_ram_efficient_loading os.environ[f'{prefix}USE_ORIG_PARAMS'] = str(self.fsdp_config.get('use_orig_params', 'true')).lower() if self.tpu_metrics_debug: warnings.warn('using `--tpu_metrics_debug` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--debug tpu_metrics_debug` instead', FutureWarning) if self.debug is None: self.debug = ' tpu_metrics_debug' else: self.debug += ' tpu_metrics_debug' self.tpu_metrics_debug = False if isinstance(self.debug, str): self.debug = [DebugOption(s) for s in self.debug.split()] elif self.debug is None: self.debug = [] self.deepspeed_plugin = None if self.deepspeed: if not is_accelerate_available(): raise ValueError(f"--deepspeed requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`.") from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig self.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.deepspeed) self.hf_deepspeed_config.trainer_config_process(self) from accelerate.utils import DeepSpeedPlugin os.environ['ACCELERATE_USE_DEEPSPEED'] = 'true' self.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.hf_deepspeed_config) elif strtobool(os.environ.get('ACCELERATE_USE_DEEPSPEED', 'false')): from accelerate.utils import DeepSpeedPlugin self.deepspeed_plugin = DeepSpeedPlugin() mixed_precision = os.environ.get('ACCELERATE_MIXED_PRECISION', 'no') self.deepspeed_plugin.set_mixed_precision(mixed_precision) self.deepspeed_plugin.set_deepspeed_weakref() if self.half_precision_backend != 'apex': mixed_precision_dtype = os.environ.get('ACCELERATE_MIXED_PRECISION', 'no') if self.fp16: mixed_precision_dtype = 'fp16' elif self.bf16: mixed_precision_dtype = 'bf16' os.environ['ACCELERATE_MIXED_PRECISION'] = mixed_precision_dtype if self.use_cpu: self.dataloader_pin_memory = False if self.dataloader_num_workers == 0 and self.dataloader_prefetch_factor is not None: raise ValueError('--dataloader_prefetch_factor can only be set when data is loaded in a different process, i.e. when --dataloader_num_workers > 1.') if self.push_to_hub_token is not None: warnings.warn('`--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead.', FutureWarning) self.hub_token = self.push_to_hub_token if self.push_to_hub_model_id is not None: self.hub_model_id = get_full_repo_name(self.push_to_hub_model_id, organization=self.push_to_hub_organization, token=self.hub_token) if self.push_to_hub_organization is not None: warnings.warn(f'`--push_to_hub_model_id` and `--push_to_hub_organization` are deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this argument (in this case {self.hub_model_id}).', FutureWarning) else: warnings.warn(f'`--push_to_hub_model_id` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this argument (in this case {self.hub_model_id}).', FutureWarning) elif self.push_to_hub_organization is not None: self.hub_model_id = f'{self.push_to_hub_organization}/{Path(self.output_dir).name}' warnings.warn(f'`--push_to_hub_organization` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this argument (in this case {self.hub_model_id}).', FutureWarning) if self.eval_use_gather_object and (not is_accelerate_available('0.30.0')): raise ValueError('--eval_use_gather_object requires Accelerate to be version of `accelerate` > 0.30.0.This is not supported and we recommend you to update your version.') if self.data_seed is not None: if not is_accelerate_available('1.1.0'): raise NotImplementedError('data_seed requires Accelerate version `accelerate` >= 1.1.0. This is not supported and we recommend you to update your version.') if self.include_inputs_for_metrics: logger.warning('Using `include_inputs_for_metrics` is deprecated and will be removed in version 5 of 🤗 Transformers. Please use `include_for_metrics` list argument instead.') self.include_for_metrics.append('inputs') if self.include_num_input_tokens_seen is True: self.include_num_input_tokens_seen = 'all' elif self.include_num_input_tokens_seen is False: self.include_num_input_tokens_seen = 'no' def __str__(self): self_as_dict = asdict(self) del self_as_dict['per_gpu_train_batch_size'] del self_as_dict['per_gpu_eval_batch_size'] self_as_dict = {k: f'<{k.upper()}>' if k.endswith('_token') else v for k, v in self_as_dict.items()} attrs_as_str = [f'{k}={v},\n' for k, v in sorted(self_as_dict.items())] return f"{self.__class__.__name__}(\n{''.join(attrs_as_str)})" __repr__ = __str__ @property def train_batch_size(self) -> int: """ The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training). """ if self.per_gpu_train_batch_size: logger.warning('Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future version. Using `--per_device_train_batch_size` is preferred.') per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size train_batch_size = per_device_batch_size * max(1, self.n_gpu) return train_batch_size @property def eval_batch_size(self) -> int: """ The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training). """ if self.per_gpu_eval_batch_size: logger.warning('Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future version. Using `--per_device_eval_batch_size` is preferred.') per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size eval_batch_size = per_device_batch_size * max(1, self.n_gpu) return eval_batch_size @property def ddp_timeout_delta(self) -> timedelta: """ The actual timeout for torch.distributed.init_process_group since it expects a timedelta variable. """ return timedelta(seconds=self.ddp_timeout) @cached_property def _setup_devices(self) -> 'torch.device': requires_backends(self, ['torch']) logger.info('PyTorch: setting up devices') if not is_sagemaker_mp_enabled(): if not is_accelerate_available(): raise ImportError(f"Using the `Trainer` with `PyTorch` requires `accelerate>={ACCELERATE_MIN_VERSION}`: Please run `pip install transformers[torch]` or `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`") accelerator_state_kwargs: dict[str, Any] = {'enabled': True, 'use_configured_state': False} if isinstance(self.accelerator_config, AcceleratorConfig): accelerator_state_kwargs['use_configured_state'] = self.accelerator_config.pop('use_configured_state', False) if accelerator_state_kwargs['use_configured_state']: if PartialState._shared_state == {}: raise ValueError("Passing `'use_configured_state':True` to the AcceleratorConfig requires a pre-configured `AcceleratorState` or `PartialState` to be defined before calling `TrainingArguments`. ") self.distributed_state = PartialState(cpu=self.use_cpu) if self.deepspeed and self.distributed_state.distributed_type != DistributedType.DEEPSPEED: raise RuntimeError('Tried to use an already configured `Accelerator` or `PartialState` that was not initialized for DeepSpeed, but also passed in a `deepspeed` configuration to the `TrainingArguments`. Please set `use_configured_state:False` instead or setup your `Accelerator` or `PartialState` properly.') else: AcceleratorState._reset_state(reset_partial_state=True) self.distributed_state = None if 'ACCELERATE_USE_IPEX' not in os.environ: os.environ['ACCELERATE_USE_IPEX'] = 'false' self._n_gpu = 1 if self.use_cpu or strtobool(os.environ.get('ACCELERATE_USE_CPU', 'False')): accelerator_state_kwargs['cpu'] = True accelerator_state_kwargs['backend'] = self.ddp_backend self._n_gpu = 0 elif is_sagemaker_mp_enabled(): accelerator_state_kwargs['enabled'] = False local_rank = smp.local_rank() device = torch.device('cuda', local_rank) torch.cuda.set_device(device) elif is_sagemaker_dp_enabled(): accelerator_state_kwargs['_use_sagemaker_dp'] = True elif self.deepspeed: accelerator_state_kwargs['use_deepspeed'] = True accelerator_state_kwargs['timeout'] = timedelta(seconds=self.ddp_timeout) else: accelerator_state_kwargs['backend'] = self.ddp_backend accelerator_state_kwargs['timeout'] = timedelta(seconds=self.ddp_timeout) if accelerator_state_kwargs.pop('enabled', False) and (not accelerator_state_kwargs.pop('use_configured_state', False)): use_deepspeed = accelerator_state_kwargs.pop('use_deepspeed', False) if use_deepspeed: os.environ['ACCELERATE_USE_DEEPSPEED'] = 'true' self.distributed_state = PartialState(**accelerator_state_kwargs) if use_deepspeed: del os.environ['ACCELERATE_USE_DEEPSPEED'] if not is_sagemaker_mp_enabled(): device = self.distributed_state.device self.local_rank = self.distributed_state.local_process_index if dist.is_available() and dist.is_initialized() and (self.parallel_mode != ParallelMode.DISTRIBUTED): logger.warning('torch.distributed process group is initialized, but parallel_mode != ParallelMode.DISTRIBUTED. In order to use Torch DDP, launch your script with `python -m torch.distributed.launch') if is_torch_xla_available(): device = self.distributed_state.device self._n_gpu = 0 elif is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled(): pass elif self.distributed_state.distributed_type == DistributedType.NO: if self.use_mps_device: warnings.warn('`use_mps_device` is deprecated and will be removed in version 5.0 of 🤗 Transformers. `mps` device will be used by default if available similar to the way `cuda` device is used.Therefore, no action from user is required. ') if device.type != 'mps': raise ValueError('Either you do not have an MPS-enabled device on this machine or MacOS version is not 12.3+ or current PyTorch install was not built with MPS enabled.') if self.use_cpu: device = torch.device('cpu') elif is_torch_mps_available(): device = torch.device('mps') elif is_torch_xpu_available(): if not is_ipex_available() and (not is_accelerate_available('0.32.0.dev')): raise ImportError('Using the XPU PyTorch backend requires `accelerate>=0.32.0.dev`') device = torch.device('xpu:0') torch.xpu.set_device(device) elif is_torch_mlu_available(): device = torch.device('mlu:0') torch.mlu.set_device(device) elif is_torch_musa_available(): device = torch.device('musa:0') torch.musa.set_device(device) elif is_torch_npu_available(): device = torch.device('npu:0') torch.npu.set_device(device) elif is_torch_hpu_available(): device = torch.device('hpu:0') torch.hpu.set_device(device) else: device = torch.device('cuda:0' if torch.cuda.is_available() else os.environ.get('ACCELERATE_TORCH_DEVICE', 'cpu')) self._n_gpu = torch.cuda.device_count() if device.type == 'cuda': torch.cuda.set_device(device) return device @property def device(self) -> 'torch.device': """ The device used by this process. """ requires_backends(self, ['torch']) return self._setup_devices @property def n_gpu(self): """ The number of GPUs used by this process. Note: This will only be greater than one when you have multiple GPUs available but are not using distributed training. For distributed training, it will always be 1. """ requires_backends(self, ['torch']) if not hasattr(self, '_n_gpu'): _ = self._setup_devices return self._n_gpu @property def parallel_mode(self): """ The current mode used for parallelism if multiple GPUs/TPU cores are available. One of: - `ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU). - `ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses `torch.nn.DataParallel`). - `ParallelMode.DISTRIBUTED`: several GPUs, each having its own process (uses `torch.nn.DistributedDataParallel`). - `ParallelMode.TPU`: several TPU cores. """ requires_backends(self, ['torch']) if is_torch_xla_available(): return ParallelMode.TPU elif is_sagemaker_mp_enabled(): return ParallelMode.SAGEMAKER_MODEL_PARALLEL elif is_sagemaker_dp_enabled(): return ParallelMode.SAGEMAKER_DATA_PARALLEL elif self.distributed_state is not None and self.distributed_state.distributed_type != DistributedType.NO or (self.distributed_state is None and self.local_rank != -1): return ParallelMode.DISTRIBUTED elif self.n_gpu > 1: return ParallelMode.NOT_DISTRIBUTED else: return ParallelMode.NOT_PARALLEL @property def world_size(self): """ The number of processes used in parallel. """ requires_backends(self, ['torch']) if self.distributed_state is not None: return self.distributed_state.num_processes elif is_sagemaker_mp_enabled(): return smp.dp_size() if not smp.state.cfg.prescaled_batch else smp.rdp_size() return 1 @property def process_index(self): """ The index of the current process used. """ requires_backends(self, ['torch']) if self.distributed_state is not None: return self.distributed_state.process_index elif is_sagemaker_mp_enabled(): return smp.dp_rank() if not smp.state.cfg.prescaled_batch else smp.rdp_rank() return 0 @property def local_process_index(self): """ The index of the local process used. """ requires_backends(self, ['torch']) if self.distributed_state is not None: return self.distributed_state.local_process_index elif is_sagemaker_mp_enabled(): return smp.local_rank() return 0 @property def should_log(self): """ Whether or not the current process should produce log. """ if self.log_on_each_node: return self.local_process_index == 0 elif is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.process_index == 0 @property def should_save(self): """ Whether or not the current process should write to disk, e.g., to save models and checkpoints. """ if self.save_on_each_node: return self.local_process_index == 0 elif is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.process_index == 0 def get_process_log_level(self): """ Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process. For the main process the log level defaults to the logging level set (`logging.WARNING` if you didn't do anything) unless overridden by `log_level` argument. For the replica processes the log level defaults to `logging.WARNING` unless overridden by `log_level_replica` argument. The choice between the main and replica process settings is made according to the return value of `should_log`. """ log_level = trainer_log_levels[self.log_level] log_level_replica = trainer_log_levels[self.log_level_replica] log_level_main_node = logging.get_verbosity() if log_level == -1 else log_level log_level_replica_node = logging.get_verbosity() if log_level_replica == -1 else log_level_replica return log_level_main_node if self.should_log else log_level_replica_node @property def place_model_on_device(self): """ Can be subclassed and overridden for some specific integrations. """ return not is_sagemaker_mp_enabled() @property def _no_sync_in_gradient_accumulation(self): """ Whether or not to use no_sync for the gradients when doing gradient accumulation. """ return not (self.deepspeed or is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled() or is_torch_neuroncore_available()) @contextlib.contextmanager def main_process_first(self, local=True, desc='work'): """ A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it's finished releasing the replicas. One such use is for `datasets`'s `map` feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas. Args: local (`bool`, *optional*, defaults to `True`): if `True` first means process of rank 0 of each node if `False` first means process of rank 0 of node rank 0 In multi-node environment with a shared filesystem you most likely will want to use `local=False` so that only the main process of the first node will do the processing. If however, the filesystem is not shared, then the main process of each node will need to do the processing, which is the default behavior. desc (`str`, *optional*, defaults to `"work"`): a work description to be used in debug logs """ if is_torch_available() and self.world_size > 1: main_process_desc = 'main local process' if local else 'main process' if self.distributed_state is not None: is_main_process = self.distributed_state.is_local_main_process if local else self.distributed_state.is_main_process elif is_sagemaker_mp_enabled(): is_main_process = smp.rank() == 0 try: if not is_main_process: logger.debug(f'{self.process_index}: waiting for the {main_process_desc} to perform {desc}') if is_torch_xla_available(): xm.rendezvous(desc) else: dist.barrier() yield finally: if is_main_process: logger.debug(f'{self.process_index}: {main_process_desc} completed {desc}, releasing all replicas') if is_torch_xla_available(): xm.rendezvous(desc) else: dist.barrier() else: yield def get_warmup_steps(self, num_training_steps: int): """ Get number of steps used for a linear warmup. """ warmup_steps = self.warmup_steps if self.warmup_steps > 0 else math.ceil(num_training_steps * self.warmup_ratio) return warmup_steps def _dict_dtype_to_str(self, d: dict[str, Any]) -> None: """ Checks whether the passed dictionary and its nested dicts have a *dtype* key and if it's not None, converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"* string, which can then be stored in the json format. """ if d.get('dtype') is not None and (not isinstance(d['dtype'], str)): d['dtype'] = str(d['dtype']).split('.')[1] for value in d.values(): if isinstance(value, dict): self._dict_dtype_to_str(value) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = {field.name: getattr(self, field.name) for field in fields(self) if field.init} for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith('_token'): d[k] = f'<{k.upper()}>' if is_accelerate_available() and isinstance(v, AcceleratorConfig): d[k] = v.to_dict() if k == 'model_init_kwargs' and isinstance(v, dict) and ('quantization_config' in v): quantization_config = v.get('quantization_config') if quantization_config and (not isinstance(quantization_config, dict)): d[k]['quantization_config'] = quantization_config.to_dict() if k == 'parallelism_config' and v is not None: d[k] = v.to_json() self._dict_dtype_to_str(d) return d def to_json_string(self): """ Serializes this instance to a JSON string. """ return json.dumps(self.to_dict(), indent=2) def to_sanitized_dict(self) -> dict[str, Any]: """ Sanitized serialization to use with TensorBoard’s hparams """ d = self.to_dict() d = {**d, **{'train_batch_size': self.train_batch_size, 'eval_batch_size': self.eval_batch_size}} valid_types = [bool, int, float, str] if is_torch_available(): valid_types.append(torch.Tensor) return {k: v if type(v) in valid_types else str(v) for k, v in d.items()} def set_training(self, learning_rate: float=5e-05, batch_size: int=8, weight_decay: float=0, num_epochs: float=3, max_steps: int=-1, gradient_accumulation_steps: int=1, seed: int=42, gradient_checkpointing: bool=False): """ A method that regroups all basic arguments linked to the training. <Tip> Calling this method will automatically set `self.do_train` to `True`. </Tip> Args: learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate for the optimizer. batch_size (`int` *optional*, defaults to 8): The batch size per device (GPU/TPU core/CPU...) used for training. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in the optimizer. num_train_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. <Tip warning={true}> When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. </Tip> seed (`int`, *optional*, defaults to 42): Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the [`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters. gradient_checkpointing (`bool`, *optional*, defaults to `False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_training(learning_rate=1e-4, batch_size=32) >>> args.learning_rate 1e-4 ``` """ self.do_train = True self.learning_rate = learning_rate self.per_device_train_batch_size = batch_size self.weight_decay = weight_decay self.num_train_epochs = num_epochs self.max_steps = max_steps self.gradient_accumulation_steps = gradient_accumulation_steps self.seed = seed self.gradient_checkpointing = gradient_checkpointing return self def set_evaluate(self, strategy: Union[str, IntervalStrategy]='no', steps: int=500, batch_size: int=8, accumulation_steps: Optional[int]=None, delay: Optional[float]=None, loss_only: bool=False, jit_mode: bool=False): """ A method that regroups all arguments linked to evaluation. Args: strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): The evaluation strategy to adopt during training. Possible values are: - `"no"`: No evaluation is done during training. - `"steps"`: Evaluation is done (and logged) every `steps`. - `"epoch"`: Evaluation is done at the end of each epoch. Setting a `strategy` different from `"no"` will set `self.do_eval` to `True`. steps (`int`, *optional*, defaults to 500): Number of update steps between two evaluations if `strategy="steps"`. batch_size (`int` *optional*, defaults to 8): The batch size per device (GPU/TPU core/CPU...) used for evaluation. accumulation_steps (`int`, *optional*): Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory). delay (`float`, *optional*): Number of epochs or steps to wait for before the first evaluation can be performed, depending on the eval_strategy. loss_only (`bool`, *optional*, defaults to `False`): Ignores all outputs except the loss. jit_mode (`bool`, *optional*): Whether or not to use PyTorch jit trace for inference. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_evaluate(strategy="steps", steps=100) >>> args.eval_steps 100 ``` """ self.eval_strategy = IntervalStrategy(strategy) if self.eval_strategy == IntervalStrategy.STEPS and steps == 0: raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") self.do_eval = self.eval_strategy != IntervalStrategy.NO self.eval_steps = steps self.per_device_eval_batch_size = batch_size self.eval_accumulation_steps = accumulation_steps self.eval_delay = delay self.prediction_loss_only = loss_only self.jit_mode_eval = jit_mode return self def set_testing(self, batch_size: int=8, loss_only: bool=False, jit_mode: bool=False): """ A method that regroups all basic arguments linked to testing on a held-out dataset. <Tip> Calling this method will automatically set `self.do_predict` to `True`. </Tip> Args: batch_size (`int` *optional*, defaults to 8): The batch size per device (GPU/TPU core/CPU...) used for testing. loss_only (`bool`, *optional*, defaults to `False`): Ignores all outputs except the loss. jit_mode (`bool`, *optional*): Whether or not to use PyTorch jit trace for inference. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_testing(batch_size=32) >>> args.per_device_eval_batch_size 32 ``` """ self.do_predict = True self.per_device_eval_batch_size = batch_size self.prediction_loss_only = loss_only self.jit_mode_eval = jit_mode return self def set_save(self, strategy: Union[str, IntervalStrategy]='steps', steps: int=500, total_limit: Optional[int]=None, on_each_node: bool=False): """ A method that regroups all arguments linked to checkpoint saving. Args: strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: No save is done during training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps`. steps (`int`, *optional*, defaults to 500): Number of updates steps before two checkpoint saves if `strategy="steps"`. total_limit (`int`, *optional*): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. on_each_node (`bool`, *optional*, defaults to `False`): When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one. This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_save(strategy="steps", steps=100) >>> args.save_steps 100 ``` """ self.save_strategy = SaveStrategy(strategy) if self.save_strategy == SaveStrategy.STEPS and steps == 0: raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") self.save_steps = steps self.save_total_limit = total_limit self.save_on_each_node = on_each_node return self def set_logging(self, strategy: Union[str, IntervalStrategy]='steps', steps: int=500, report_to: Union[str, list[str]]='none', level: str='passive', first_step: bool=False, nan_inf_filter: bool=False, on_each_node: bool=False, replica_level: str='passive'): """ A method that regroups all arguments linked to logging. Args: strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The logging strategy to adopt during training. Possible values are: - `"no"`: No logging is done during training. - `"epoch"`: Logging is done at the end of each epoch. - `"steps"`: Logging is done every `logging_steps`. steps (`int`, *optional*, defaults to 500): Number of update steps between two logs if `strategy="steps"`. level (`str`, *optional*, defaults to `"passive"`): Logger log level to use on the main process. Possible choices are the log levels as strings: `"debug"`, `"info"`, `"warning"`, `"error"` and `"critical"`, plus a `"passive"` level which doesn't set anything and lets the application set the level. report_to (`str` or `list[str]`, *optional*, defaults to `"all"`): The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, `"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`, `"neptune"`, `"swanlab"`, `"tensorboard"`, `"trackio"` and `"wandb"`. Use `"all"` to report to all integrations installed, `"none"` for no integrations. first_step (`bool`, *optional*, defaults to `False`): Whether to log and evaluate the first `global_step` or not. nan_inf_filter (`bool`, *optional*, defaults to `True`): Whether to filter `nan` and `inf` losses for logging. If set to `True` the loss of every step that is `nan` or `inf` is filtered and the average loss of the current logging window is taken instead. <Tip> `nan_inf_filter` only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model. </Tip> on_each_node (`bool`, *optional*, defaults to `True`): In multinode distributed training, whether to log using `log_level` once per node, or only on the main node. replica_level (`str`, *optional*, defaults to `"passive"`): Logger log level to use on replicas. Same choices as `log_level` Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_logging(strategy="steps", steps=100) >>> args.logging_steps 100 ``` """ self.logging_strategy = IntervalStrategy(strategy) if self.logging_strategy == IntervalStrategy.STEPS and steps == 0: raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") self.logging_steps = steps self.report_to = report_to self.log_level = level self.logging_first_step = first_step self.logging_nan_inf_filter = nan_inf_filter self.log_on_each_node = on_each_node self.log_level_replica = replica_level return self def set_push_to_hub(self, model_id: str, strategy: Union[str, HubStrategy]='every_save', token: Optional[str]=None, private_repo: Optional[bool]=None, always_push: bool=False, revision: Optional[str]=None): """ A method that regroups all arguments linked to synchronizing checkpoints with the Hub. <Tip> Calling this method will set `self.push_to_hub` to `True`, which means the `output_dir` will begin a git directory synced with the repo (determined by `model_id`) and the content will be pushed each time a save is triggered (depending on your `self.save_strategy`). Calling [`~Trainer.save_model`] will also trigger a push. </Tip> Args: model_id (`str`): The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance `"user_name/model"`, which allows you to push to an organization you are a member of with `"organization_name/model"`. strategy (`str` or [`~trainer_utils.HubStrategy`], *optional*, defaults to `"every_save"`): Defines the scope of what is pushed to the Hub and when. Possible values are: - `"end"`: push the model, its configuration, the processing_class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card when the [`~Trainer.save_model`] method is called. - `"every_save"`: push the model, its configuration, the processing_class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training. - `"checkpoint"`: like `"every_save"` but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `trainer.train(resume_from_checkpoint="last-checkpoint")`. - `"all_checkpoints"`: like `"checkpoint"` but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository) token (`str`, *optional*): The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with `hf auth login`. private_repo (`bool`, *optional*, defaults to `False`): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. always_push (`bool`, *optional*, defaults to `False`): Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. revision (`str`, *optional*): The revision to use when pushing to the Hub. Can be a branch name, a tag, or a commit hash. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_push_to_hub("me/awesome-model") >>> args.hub_model_id 'me/awesome-model' ``` """ self.push_to_hub = True self.hub_model_id = model_id self.hub_strategy = HubStrategy(strategy) self.hub_token = token self.hub_private_repo = private_repo self.hub_always_push = always_push self.hub_revision = revision return self def set_optimizer(self, name: Union[str, OptimizerNames]='adamw_torch', learning_rate: float=5e-05, weight_decay: float=0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08, args: Optional[str]=None): """ A method that regroups all arguments linked to the optimizer and its hyperparameters. Args: name (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"`): The optimizer to use: `"adamw_torch"`, `"adamw_torch_fused"`, `"adamw_apex_fused"`, `"adamw_anyprecision"` or `"adafactor"`. learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights. beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the adam optimizer or its variants. beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the adam optimizer or its variants. epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the adam optimizer or its variants. args (`str`, *optional*): Optional arguments that are supplied to AnyPrecisionAdamW (only useful when `optim="adamw_anyprecision"`). Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_optimizer(name="adamw_torch", beta1=0.8) >>> args.optim 'adamw_torch' ``` """ self.optim = OptimizerNames(name) self.learning_rate = learning_rate self.weight_decay = weight_decay self.adam_beta1 = beta1 self.adam_beta2 = beta2 self.adam_epsilon = epsilon self.optim_args = args return self def set_lr_scheduler(self, name: Union[str, SchedulerType]='linear', num_epochs: float=3.0, max_steps: int=-1, warmup_ratio: float=0, warmup_steps: int=0): """ A method that regroups all arguments linked to the learning rate scheduler and its hyperparameters. Args: name (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. num_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_lr_scheduler(name="cosine", warmup_ratio=0.05) >>> args.warmup_ratio 0.05 ``` """ self.lr_scheduler_type = SchedulerType(name) self.num_train_epochs = num_epochs self.max_steps = max_steps self.warmup_ratio = warmup_ratio self.warmup_steps = warmup_steps return self def set_dataloader(self, train_batch_size: int=8, eval_batch_size: int=8, drop_last: bool=False, num_workers: int=0, pin_memory: bool=True, persistent_workers: bool=False, prefetch_factor: Optional[int]=None, auto_find_batch_size: bool=False, ignore_data_skip: bool=False, sampler_seed: Optional[int]=None): """ A method that regroups all arguments linked to the dataloaders creation. Args: drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. num_workers (`int`, *optional*, defaults to 0): Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process. pin_memory (`bool`, *optional*, defaults to `True`): Whether you want to pin memory in data loaders or not. Will default to `True`. persistent_workers (`bool`, *optional*, defaults to `False`): If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage. Will default to `False`. prefetch_factor (`int`, *optional*): Number of batches loaded in advance by each worker. 2 means there will be a total of 2 * num_workers batches prefetched across all workers. auto_find_batch_size (`bool`, *optional*, defaults to `False`) Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (`pip install accelerate`) ignore_data_skip (`bool`, *optional*, defaults to `False`): When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to `True`, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have. sampler_seed (`int`, *optional*): Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as `self.seed`. This can be used to ensure reproducibility of data sampling, independent of the model seed. Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_dataloader(train_batch_size=16, eval_batch_size=64) >>> args.per_device_train_batch_size 16 ``` """ self.per_device_train_batch_size = train_batch_size self.per_device_eval_batch_size = eval_batch_size self.dataloader_drop_last = drop_last self.dataloader_num_workers = num_workers self.dataloader_pin_memory = pin_memory self.dataloader_persistent_workers = persistent_workers self.dataloader_prefetch_factor = prefetch_factor self.auto_find_batch_size = auto_find_batch_size self.ignore_data_skip = ignore_data_skip self.data_seed = sampler_seed return self
null
49
30
47
5
28
14
7
0.61
0
32
10
3
32
4
32
32
2,865
275
1,605
309
1,474
987
714
214
676
131
0
4
239
6,544
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/training_args_seq2seq.py
transformers.training_args_seq2seq.Seq2SeqTrainingArguments
from dataclasses import dataclass, field from .generation.configuration_utils import GenerationConfig from .utils import add_start_docstrings from pathlib import Path from .training_args import TrainingArguments from typing import Optional, Union @dataclass @add_start_docstrings(TrainingArguments.__doc__) class Seq2SeqTrainingArguments(TrainingArguments): """ Args: predict_with_generate (`bool`, *optional*, defaults to `False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). generation_max_length (`int`, *optional*): The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `max_length` value of the model configuration. generation_num_beams (`int`, *optional*): The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `num_beams` value of the model configuration. generation_config (`str` or `Path` or [`~generation.GenerationConfig`], *optional*): Allows to load a [`~generation.GenerationConfig`] from the `from_pretrained` method. This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. - a path to a *directory* containing a configuration file saved using the [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. - a [`~generation.GenerationConfig`] object. """ sortish_sampler: bool = field(default=False, metadata={'help': 'Whether to use SortishSampler or not.'}) predict_with_generate: bool = field(default=False, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'}) generation_max_length: Optional[int] = field(default=None, metadata={'help': 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `max_length` value of the model configuration.'}) generation_num_beams: Optional[int] = field(default=None, metadata={'help': 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `num_beams` value of the model configuration.'}) generation_config: Optional[Union[str, Path, GenerationConfig]] = field(default=None, metadata={'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'}) def to_dict(self): """ Serializes this instance while replace `Enum` by their values and `GenerationConfig` by dictionaries (for JSON serialization support). It obfuscates the token values by removing their value. """ d = super().to_dict() for k, v in d.items(): if isinstance(v, GenerationConfig): d[k] = v.to_dict() return d
@dataclass @add_start_docstrings(TrainingArguments.__doc__) class Seq2SeqTrainingArguments(TrainingArguments): ''' Args: predict_with_generate (`bool`, *optional*, defaults to `False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). generation_max_length (`int`, *optional*): The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `max_length` value of the model configuration. generation_num_beams (`int`, *optional*): The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `num_beams` value of the model configuration. generation_config (`str` or `Path` or [`~generation.GenerationConfig`], *optional*): Allows to load a [`~generation.GenerationConfig`] from the `from_pretrained` method. This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. - a path to a *directory* containing a configuration file saved using the [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. - a [`~generation.GenerationConfig`] object. ''' def to_dict(self): ''' Serializes this instance while replace `Enum` by their values and `GenerationConfig` by dictionaries (for JSON serialization support). It obfuscates the token values by removing their value. ''' pass
4
2
11
0
6
5
3
0.66
1
2
1
0
1
0
1
33
61
3
35
9
33
23
12
9
10
3
1
2
3
6,545
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/backbone_utils.py
transformers.utils.backbone_utils.BackboneConfigMixin
from typing import TYPE_CHECKING, Optional, Union class BackboneConfigMixin: """ A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. """ @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: list[str]): """ Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=None, stage_names=self.stage_names) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[tuple[int], list[int]]): """ Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=None, out_indices=out_indices, stage_names=self.stage_names) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. """ output = super().to_dict() output['out_features'] = output.pop('_out_features') output['out_indices'] = output.pop('_out_indices') return output
class BackboneConfigMixin: ''' A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. ''' @property def out_features(self): pass @out_features.setter def out_features(self): ''' Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. ''' pass @property def out_indices(self): pass @out_indices.setter def out_indices(self): ''' Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. ''' pass def to_dict(self): ''' Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. ''' pass
10
4
5
0
3
2
1
0.59
0
3
0
19
5
2
5
5
40
5
22
12
12
13
14
8
8
1
0
0
5
6,546
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/backbone_utils.py
transformers.utils.backbone_utils.BackboneMixin
from typing import TYPE_CHECKING, Optional, Union import inspect class BackboneMixin: backbone_type: Optional[BackboneType] = None has_attentions: bool = True def _init_timm_backbone(self, config) -> None: """ Initialize the backbone model from timm The backbone must already be loaded to self._backbone """ if getattr(self, '_backbone', None) is None: raise ValueError('self._backbone must be set before calling _init_timm_backbone') self.stage_names = [stage['module'] for stage in self._backbone.feature_info.info] self.num_features = [stage['num_chs'] for stage in self._backbone.feature_info.info] out_indices = list(self._backbone.feature_info.out_indices) out_features = self._backbone.feature_info.module_name() verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) self._out_features, self._out_indices = (out_features, out_indices) def _init_transformers_backbone(self, config) -> None: stage_names = getattr(config, 'stage_names') out_features = getattr(config, 'out_features', None) out_indices = getattr(config, 'out_indices', None) self.stage_names = stage_names self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names) self.num_features = None def _init_backbone(self, config) -> None: """ Method to initialize the backbone. This method is called by the constructor of the base class after the pretrained model weights have been loaded. """ self.config = config self.use_timm_backbone = getattr(config, 'use_timm_backbone', False) self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS if self.backbone_type == BackboneType.TIMM: self._init_timm_backbone(config) elif self.backbone_type == BackboneType.TRANSFORMERS: self._init_transformers_backbone(config) else: raise ValueError(f'backbone_type {self.backbone_type} not supported.') @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: list[str]): """ Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=None, stage_names=self.stage_names) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[tuple[int], list[int]]): """ Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=None, out_indices=out_indices, stage_names=self.stage_names) @property def out_feature_channels(self): return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} @property def channels(self): return [self.out_feature_channels[name] for name in self.out_features] def forward_with_filtered_kwargs(self, *args, **kwargs): if not self.has_attentions: kwargs.pop('output_attentions', None) if self.backbone_type == BackboneType.TIMM: signature = dict(inspect.signature(self.forward).parameters) kwargs = {k: v for k, v in kwargs.items() if k in signature} return self(*args, **kwargs) def forward(self, pixel_values, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None): raise NotImplementedError('This method should be implemented by the derived class.') def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. """ output = super().to_dict() output['out_features'] = output.pop('_out_features') output['out_indices'] = output.pop('_out_indices') return output
class BackboneMixin: def _init_timm_backbone(self, config) -> None: ''' Initialize the backbone model from timm The backbone must already be loaded to self._backbone ''' pass def _init_transformers_backbone(self, config) -> None: pass def _init_backbone(self, config) -> None: ''' Method to initialize the backbone. This method is called by the constructor of the base class after the pretrained model weights have been loaded. ''' pass @property def out_features(self): pass @out_features.setter def out_features(self): ''' Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. ''' pass @property def out_indices(self): pass @out_indices.setter def out_indices(self): ''' Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. ''' pass @property def out_feature_channels(self): pass @property def channels(self): pass def forward_with_filtered_kwargs(self, *args, **kwargs): pass def forward_with_filtered_kwargs(self, *args, **kwargs): pass def to_dict(self): ''' Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. ''' pass
19
5
8
1
5
2
1
0.37
0
10
1
19
12
6
12
12
115
18
71
39
46
26
49
27
36
4
0
1
16
6,547
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/backbone_utils.py
transformers.utils.backbone_utils.BackboneType
import enum class BackboneType(enum.Enum): TIMM = 'timm' TRANSFORMERS = 'transformers'
class BackboneType(enum.Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
3
0
3
3
2
0
3
3
2
0
4
0
0
6,548
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/chat_template_utils.py
transformers.utils.chat_template_utils.DocstringParsingException
class DocstringParsingException(Exception): """Exception raised for errors in parsing docstrings to generate JSON schemas""" pass
class DocstringParsingException(Exception): '''Exception raised for errors in parsing docstrings to generate JSON schemas''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
10
4
1
2
1
1
1
2
1
1
0
3
0
0
6,549
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/chat_template_utils.py
transformers.utils.chat_template_utils.TypeHintParsingException
class TypeHintParsingException(Exception): """Exception raised for errors in parsing type hints to generate JSON schemas""" pass
class TypeHintParsingException(Exception): '''Exception raised for errors in parsing type hints to generate JSON schemas''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
10
4
1
2
1
1
1
2
1
1
0
3
0
0
6,550
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/deprecation.py
transformers.utils.deprecation.Action
from . import ExplicitEnum, is_torch_available, is_torchdynamo_compiling class Action(ExplicitEnum): NONE = 'none' NOTIFY = 'notify' NOTIFY_ALWAYS = 'notify_always' RAISE = 'raise'
class Action(ExplicitEnum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
5
0
5
5
4
0
5
5
4
0
1
0
0
6,551
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_detectron2_objects.py
transformers.utils.dummy_detectron2_objects.LayoutLMv2Model
from ..utils import requires_backends class LayoutLMv2Model: def __init__(self, *args, **kwargs): requires_backends(self, ['detectron2']) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['detectron2'])
class LayoutLMv2Model: def __init__(self, *args, **kwargs): pass @classmethod def from_pretrained(cls, *args, **kwargs): pass
4
0
2
0
2
0
1
0
0
0
0
0
1
0
2
2
7
1
6
4
2
0
5
3
2
1
0
0
2
6,552
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py
transformers.utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.Pop2PianoFeatureExtractor
from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ['essentia', 'librosa', 'pretty_midi', 'scipy', 'torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['essentia', 'librosa', 'pretty_midi', 'scipy', 'torch'])
class Pop2PianoFeatureExtractor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,553
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py
transformers.utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.Pop2PianoProcessor
from ..utils import DummyObject, requires_backends class Pop2PianoProcessor(metaclass=DummyObject): _backends = ['essentia', 'librosa', 'pretty_midi', 'scipy', 'torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['essentia', 'librosa', 'pretty_midi', 'scipy', 'torch'])
class Pop2PianoProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,554
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py
transformers.utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.Pop2PianoTokenizer
from ..utils import DummyObject, requires_backends class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ['essentia', 'librosa', 'pretty_midi', 'scipy', 'torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['essentia', 'librosa', 'pretty_midi', 'scipy', 'torch'])
class Pop2PianoTokenizer(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,555
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_music_objects.py
transformers.utils.dummy_music_objects.Pop2PianoFeatureExtractor
from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ['music'] def __init__(self, *args, **kwargs): requires_backends(self, ['music'])
class Pop2PianoFeatureExtractor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
15
5
1
4
3
2
0
4
3
2
1
3
0
1
6,556
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_music_objects.py
transformers.utils.dummy_music_objects.Pop2PianoTokenizer
from ..utils import DummyObject, requires_backends class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ['music'] def __init__(self, *args, **kwargs): requires_backends(self, ['music'])
class Pop2PianoTokenizer(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
15
5
1
4
3
2
0
4
3
2
1
3
0
1
6,557
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.Adafactor
from ..utils import DummyObject, requires_backends class Adafactor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Adafactor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,558
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.AlternatingCodebooksLogitsProcessor
from ..utils import DummyObject, requires_backends class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,559
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.BayesianDetectorConfig
from ..utils import DummyObject, requires_backends class BayesianDetectorConfig(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class BayesianDetectorConfig(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,560
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.BayesianDetectorModel
from ..utils import DummyObject, requires_backends class BayesianDetectorModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class BayesianDetectorModel(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,561
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.BeamScorer
from ..utils import DummyObject, requires_backends class BeamScorer(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class BeamScorer(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,562
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.Cache
from ..utils import DummyObject, requires_backends class Cache(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Cache(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,563
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.ClassifierFreeGuidanceLogitsProcessor
from ..utils import DummyObject, requires_backends class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,564
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.ConstrainedBeamSearchScorer
from ..utils import DummyObject, requires_backends class ConstrainedBeamSearchScorer(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ConstrainedBeamSearchScorer(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,565
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.Constraint
from ..utils import DummyObject, requires_backends class Constraint(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Constraint(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,566
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.ConstraintListState
from ..utils import DummyObject, requires_backends class ConstraintListState(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ConstraintListState(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,567
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.Conv1D
from ..utils import DummyObject, requires_backends class Conv1D(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Conv1D(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,568
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.DisjunctiveConstraint
from ..utils import DummyObject, requires_backends class DisjunctiveConstraint(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class DisjunctiveConstraint(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,569
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.DynamicCache
from ..utils import DummyObject, requires_backends class DynamicCache(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class DynamicCache(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,570
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.EncoderDecoderCache
from ..utils import DummyObject, requires_backends class EncoderDecoderCache(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class EncoderDecoderCache(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,571
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.EncoderNoRepeatNGramLogitsProcessor
from ..utils import DummyObject, requires_backends class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,572
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.EncoderRepetitionPenaltyLogitsProcessor
from ..utils import DummyObject, requires_backends class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,573
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.EosTokenCriteria
from ..utils import DummyObject, requires_backends class EosTokenCriteria(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class EosTokenCriteria(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,574
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.EpsilonLogitsWarper
from ..utils import DummyObject, requires_backends class EpsilonLogitsWarper(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class EpsilonLogitsWarper(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,575
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.EtaLogitsWarper
from ..utils import DummyObject, requires_backends class EtaLogitsWarper(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class EtaLogitsWarper(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,576
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.ExponentialDecayLengthPenalty
from ..utils import DummyObject, requires_backends class ExponentialDecayLengthPenalty(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ExponentialDecayLengthPenalty(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,577
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.ForcedBOSTokenLogitsProcessor
from ..utils import DummyObject, requires_backends class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,578
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.ForcedEOSTokenLogitsProcessor
from ..utils import DummyObject, requires_backends class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,579
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.GenerationMixin
from ..utils import DummyObject, requires_backends class GenerationMixin(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class GenerationMixin(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,580
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.GlueDataTrainingArguments
from ..utils import DummyObject, requires_backends class GlueDataTrainingArguments(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class GlueDataTrainingArguments(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,581
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.GlueDataset
from ..utils import DummyObject, requires_backends class GlueDataset(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class GlueDataset(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,582
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.HQQQuantizedCache
from ..utils import DummyObject, requires_backends class HQQQuantizedCache(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class HQQQuantizedCache(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,583
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.HybridCache
from ..utils import DummyObject, requires_backends class HybridCache(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class HybridCache(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,584
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.InfNanRemoveLogitsProcessor
from ..utils import DummyObject, requires_backends class InfNanRemoveLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class InfNanRemoveLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,585
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.LineByLineTextDataset
from ..utils import DummyObject, requires_backends class LineByLineTextDataset(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class LineByLineTextDataset(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,586
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.LineByLineWithRefDataset
from ..utils import DummyObject, requires_backends class LineByLineWithRefDataset(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class LineByLineWithRefDataset(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,587
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.LineByLineWithSOPTextDataset
from ..utils import DummyObject, requires_backends class LineByLineWithSOPTextDataset(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class LineByLineWithSOPTextDataset(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,588
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.LogitNormalization
from ..utils import DummyObject, requires_backends class LogitNormalization(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class LogitNormalization(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,589
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.LogitsProcessor
from ..utils import DummyObject, requires_backends class LogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class LogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,590
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.LogitsProcessorList
from ..utils import DummyObject, requires_backends class LogitsProcessorList(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class LogitsProcessorList(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,591
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.MaxLengthCriteria
from ..utils import DummyObject, requires_backends class MaxLengthCriteria(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class MaxLengthCriteria(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,592
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.MaxTimeCriteria
from ..utils import DummyObject, requires_backends class MaxTimeCriteria(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class MaxTimeCriteria(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,593
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.MinLengthLogitsProcessor
from ..utils import DummyObject, requires_backends class MinLengthLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class MinLengthLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,594
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.MinNewTokensLengthLogitsProcessor
from ..utils import DummyObject, requires_backends class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,595
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.MinPLogitsWarper
from ..utils import DummyObject, requires_backends class MinPLogitsWarper(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class MinPLogitsWarper(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,596
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.NoBadWordsLogitsProcessor
from ..utils import DummyObject, requires_backends class NoBadWordsLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class NoBadWordsLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,597
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.NoRepeatNGramLogitsProcessor
from ..utils import DummyObject, requires_backends class NoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class NoRepeatNGramLogitsProcessor(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,598
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.OffloadedCache
from ..utils import DummyObject, requires_backends class OffloadedCache(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class OffloadedCache(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1
6,599
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/utils/dummy_pt_objects.py
transformers.utils.dummy_pt_objects.OffloadedStaticCache
from ..utils import DummyObject, requires_backends class OffloadedStaticCache(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class OffloadedStaticCache(metaclass=DummyObject): def __init__(self, *args, **kwargs): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
5
1
4
3
2
0
4
3
2
1
1
0
1