id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
400
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/ggml.py
transformers.integrations.ggml.GGUFLlamaConverter
from .. import AddedToken from ..convert_slow_tokenizer import GemmaConverter, GPT2Converter, LlamaConverter, Qwen2Converter, T5Converter from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors import numpy as np from tokenizers.models import BPE, Unigram class GGUFLlamaConverter(LlamaConverter): def __init__(self, tokenizer_dict): self.proto = GGUFTokenizerSkeleton(tokenizer_dict) self.original_tokenizer = self.proto self.additional_kwargs = {} self.is_llama_3_tokenizer = getattr(self.proto, 'tokenizer_type', 'llama') != 'llama' def vocab(self, proto): return list(zip(proto.tokens, proto.scores)) def merges(self, proto): return proto.merges def tokenizer(self, proto): vocab_scores = self.vocab(self.proto) merges = self.merges(self.proto) bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} unk_token = proto.tokens[proto.unk_token_id] if proto.unk_token_id is not None else None bos_token = proto.tokens[proto.bos_token_id] if getattr(proto, 'bos_token_id', None) is not None else None eos_token = proto.tokens[proto.bos_token_id] if getattr(proto, 'eos_token_id', None) is not None else None tokenizer = Tokenizer(BPE(bpe_vocab, merges, unk_token=unk_token, fuse_unk=True, byte_fallback=True)) special_tokens = [] if not hasattr(self.proto, 'token_type'): if unk_token is not None: special_tokens.append(AddedToken(unk_token, normalized=False, special=True)) if bos_token is not None: special_tokens.append(AddedToken(bos_token, normalized=False, special=True)) if eos_token is not None: special_tokens.append(AddedToken(eos_token, normalized=False, special=True)) else: special_tokens_idx = np.where(np.array(self.proto.token_type) == 3)[0] for idx in special_tokens_idx: special_tokens.append(AddedToken(self.proto.tokens[idx], normalized=False, special=True)) if len(special_tokens) != 0: tokenizer.add_special_tokens(special_tokens) if len(self.proto.added_tokens) != 0: tokenizer.add_tokens([AddedToken(added_token, normalized=False, special=False) for added_token in self.proto.added_tokens]) self.additional_kwargs['unk_token'] = unk_token self.additional_kwargs['eos_token'] = bos_token self.additional_kwargs['bos_token'] = eos_token if self.is_llama_3_tokenizer: self.additional_kwargs['add_prefix_space'] = None self.additional_kwargs['clean_up_tokenization_spaces'] = True self.additional_kwargs['legacy'] = False self.original_tokenizer.legacy = False return tokenizer def decoder(self, replacement, add_prefix_space): sequence = [decoders.ByteFallback(), decoders.Fuse(), decoders.Replace('▁', ' ')] if self.is_llama_3_tokenizer: sequence += [decoders.ByteLevel(add_prefix_space=False, trim_offsets=False, use_regex=True)] if add_prefix_space: sequence += [decoders.Strip(content=' ', left=1)] return decoders.Sequence(sequence) def converted(self): tokenizer = self.tokenizer(self.proto) normalizer = self.normalizer(self.proto) if normalizer is not None: tokenizer.normalizer = normalizer replacement = '▁' add_prefix_space = True if hasattr(self.original_tokenizer, 'add_prefix_space'): add_prefix_space = self.original_tokenizer.add_prefix_space pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) if pre_tokenizer is not None: tokenizer.pre_tokenizer = pre_tokenizer tokenizer.decoder = self.decoder(replacement, add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor if self.is_llama_3_tokenizer: tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False, trim_offsets=False, use_regex=True) tokenizer.normalizer = normalizers.Sequence([]) return tokenizer
class GGUFLlamaConverter(LlamaConverter): def __init__(self, tokenizer_dict): pass def vocab(self, proto): pass def merges(self, proto): pass def tokenizer(self, proto): pass def decoder(self, replacement, add_prefix_space): pass def converted(self): pass
7
0
19
4
14
1
4
0.08
1
4
1
0
6
4
6
23
119
26
86
28
79
7
69
28
62
12
3
2
24
401
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/ggml.py
transformers.integrations.ggml.GGUFPhi3Converter
from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors from ..convert_slow_tokenizer import GemmaConverter, GPT2Converter, LlamaConverter, Qwen2Converter, T5Converter from tokenizers.models import BPE, Unigram from .. import AddedToken class GGUFPhi3Converter(LlamaConverter): def __init__(self, tokenizer_dict): self.proto = GGUFTokenizerSkeleton(tokenizer_dict) self.original_tokenizer = self.proto self.additional_kwargs = {} def vocab(self, proto): return list(zip(proto.tokens, proto.scores)) def merges(self, proto): return proto.merges def tokenizer(self, proto): vocab_scores = self.vocab(self.proto) merges = self.merges(self.proto) bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} tokenizer = Tokenizer(BPE(bpe_vocab, merges)) tokenizer.add_special_tokens([AddedToken('</s>', rstrip=True, lstrip=False, normalized=False, special=True), AddedToken('<|endoftext|>', normalized=False, special=True), AddedToken('<|assistant|>', rstrip=True, normalized=False, special=True), AddedToken('<|placeholder1|>', rstrip=True, normalized=False, special=True), AddedToken('<|placeholder2|>', rstrip=True, normalized=False, special=True), AddedToken('<|placeholder3|>', rstrip=True, normalized=False, special=True), AddedToken('<|placeholder4|>', rstrip=True, normalized=False, special=True), AddedToken('<|system|>', rstrip=True, normalized=False, special=True), AddedToken('<|end|>', rstrip=True, normalized=False, special=True), AddedToken('<|placeholder5|>', rstrip=True, normalized=False, special=True), AddedToken('<|placeholder6|>', rstrip=True, normalized=False, special=True), AddedToken('<|user|>', rstrip=True, normalized=False, special=True)]) self.additional_kwargs['unk_token'] = proto.tokens[proto.unk_token_id] if proto.unk_token_id is not None else None self.additional_kwargs['eos_token'] = proto.tokens[proto.eos_token_id] if proto.eos_token_id is not None else None self.additional_kwargs['bos_token'] = proto.tokens[proto.bos_token_id] if proto.bos_token_id is not None else None self.additional_kwargs['pad_token'] = proto.tokens[proto.pad_token_id] if proto.pad_token_id is not None else None return tokenizer def decoder(self, replacement, add_prefix_space): sequence = [decoders.ByteFallback(), decoders.Fuse(), decoders.Replace(replacement, ' ')] if add_prefix_space: sequence += [decoders.Strip(content=' ', left=1)] return decoders.Sequence(sequence) def converted(self) -> Tokenizer: tokenizer = self.tokenizer(self.proto) replacement = '▁' add_prefix_space = True if hasattr(self.original_tokenizer, 'add_prefix_space'): add_prefix_space = self.original_tokenizer.add_prefix_space tokenizer.decoder = self.decoder(replacement, add_prefix_space) return tokenizer
class GGUFPhi3Converter(LlamaConverter): def __init__(self, tokenizer_dict): pass def vocab(self, proto): pass def merges(self, proto): pass def tokenizer(self, proto): pass def decoder(self, replacement, add_prefix_space): pass def converted(self) -> Tokenizer: pass
7
0
11
1
10
0
2
0.02
1
4
1
0
6
3
6
23
73
12
60
18
53
1
33
18
26
5
3
1
12
402
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/ggml.py
transformers.integrations.ggml.GGUFQwen2Converter
from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors from ..convert_slow_tokenizer import GemmaConverter, GPT2Converter, LlamaConverter, Qwen2Converter, T5Converter from .. import AddedToken class GGUFQwen2Converter(Qwen2Converter): def __init__(self, tokenizer_dict): self.original_tokenizer = GGUFTokenizerSkeleton(tokenizer_dict) self.additional_kwargs = {} def converted(self) -> Tokenizer: vocab = {word: i for i, word in enumerate(self.original_tokenizer.tokens)} merges = self.original_tokenizer.merges tokenizer = super().converted(vocab, merges) tokenizer.add_special_tokens([AddedToken('<|endoftext|>', normalized=False, special=True), AddedToken('<|im_start|>', normalized=False, special=True), AddedToken('<|im_end|>', normalized=False, special=True)]) return tokenizer
class GGUFQwen2Converter(Qwen2Converter): def __init__(self, tokenizer_dict): pass def converted(self) -> Tokenizer: pass
3
0
8
1
8
0
1
0
1
3
1
0
2
2
2
5
18
2
16
8
13
0
10
8
7
1
2
0
2
403
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/ggml.py
transformers.integrations.ggml.GGUFT5Converter
from tokenizers.models import BPE, Unigram from ..convert_slow_tokenizer import GemmaConverter, GPT2Converter, LlamaConverter, Qwen2Converter, T5Converter from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors class GGUFT5Converter(T5Converter): def __init__(self, tokenizer_dict): tokenizer_dict['merges'] = ['dummy text'] self.proto = GGUFTokenizerSkeleton(tokenizer_dict) self.token2id = {k: v for v, k in enumerate(self.proto.tokens)} self.original_tokenizer = self.proto self.additional_kwargs = {} def vocab(self, proto): return list(zip(proto.tokens, proto.scores)) def normalizer(self, proto): if getattr(self.original_tokenizer, 'legacy', True): sequence = [] if getattr(self.original_tokenizer, 'add_prefix_space', True): sequence += [normalizers.Prepend(prepend='▁')] sequence += [normalizers.Replace(pattern=' ', content='▁')] return normalizers.Sequence(sequence) return None def post_processor(self): return processors.TemplateProcessing(single=['$A', '</s>'], pair=['$A', '</s>', '$B', '</s>'], special_tokens=[('</s>', self.token2id['</s>'])]) def converted(self) -> Tokenizer: vocab_scores = self.vocab(self.proto) tokenizer = Tokenizer(Unigram(vocab_scores, unk_id=self.proto.unk_token_id, byte_fallback=False)) normalizer = self.normalizer(self.proto) if normalizer is not None: tokenizer.normalizer = normalizer replacement = '▁' add_prefix_space = True if hasattr(self.original_tokenizer, 'add_prefix_space'): add_prefix_space = self.original_tokenizer.add_prefix_space pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) if pre_tokenizer is not None: tokenizer.pre_tokenizer = pre_tokenizer tokenizer.decoder = self.decoder(replacement, add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor return tokenizer
class GGUFT5Converter(T5Converter): def __init__(self, tokenizer_dict): pass def vocab(self, proto): pass def normalizer(self, proto): pass def post_processor(self): pass def converted(self) -> Tokenizer: pass
6
0
11
1
10
1
2
0.06
1
4
1
0
5
4
5
18
61
10
49
18
43
3
37
18
31
5
3
2
11
404
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/ggml.py
transformers.integrations.ggml.GGUFTokenizerSkeleton
from ..utils.logging import tqdm class GGUFTokenizerSkeleton: def __init__(self, dict_): for k, v in dict_.items(): setattr(self, k, v) if not hasattr(self, 'merges'): if not hasattr(self, 'tokens') or not hasattr(self, 'scores'): raise ValueError('tokens and scores need to be passed for a LLaMa tokenizer without merges to be instantiated.') tokens = self.tokens scores = self.scores vocab = {t: scores[i] for i, t in enumerate(tokens)} logger.warning('Merges were not in checkpoint, building merges on the fly.') merges = [] for merge, piece_score in tqdm(vocab.items()): local = [] for index in range(1, len(merge)): piece_l, piece_r = (merge[:index], merge[index:]) if piece_l in tokens and piece_r in tokens: local.append((piece_l, piece_r, piece_score)) local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]), reverse=True) merges.extend(local) merges = sorted(merges, key=lambda val: val[2], reverse=True) merges = [(val[0], val[1]) for val in merges] self.merges = merges else: self.merges = [tuple(merge.split(' ')) for merge in self.merges] if not hasattr(self, 'scores'): self.scores = [None for _ in range(len(self.tokens))] if not hasattr(self, 'added_tokens'): self.added_tokens = [] if not hasattr(self, 'unk_token_id'): self.unk_token_id = None if hasattr(self, 'unknown_token_id') and self.unk_token_id is None: self.unk_token_id = self.unknown_token_id
class GGUFTokenizerSkeleton: def __init__(self, dict_): pass
2
0
40
5
34
1
11
0.03
0
4
0
0
1
4
1
1
41
5
35
15
33
1
32
15
30
11
0
4
11
405
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/higgs.py
transformers.integrations.higgs.HiggsLinear
from typing import Optional class HiggsLinear(torch.nn.Module): def __init__(self, in_features: int, out_features: int, num_bits: int, bias=True, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, group_size: int=256, hadamard_size: int=1024): super().__init__() self.in_features = in_features self.out_features = out_features self.num_bits = num_bits self.group_size = group_size self.hadamard_size = hadamard_size assert in_features % group_size == 0 assert num_bits in [2, 3, 4] self.weight = nn.Parameter(torch.empty((out_features * num_bits // 16, in_features), dtype=torch.int16, device=device), requires_grad=False) self.scales = nn.Parameter(torch.empty((out_features, in_features // group_size), dtype=dtype, device=device), requires_grad=False) self.tables = nn.Parameter(torch.empty((2 ** num_bits,), dtype=dtype, device=device), requires_grad=False) self.tables2 = nn.Parameter(torch.empty((2 ** num_bits, 2 ** num_bits, 2), dtype=dtype, device=device), requires_grad=False) if bias: self.bias = nn.Parameter(torch.empty(out_features, device=device, dtype=dtype), requires_grad=False) else: self.register_parameter('bias', None) self.workspace = None self.tune_metadata: TuneMetaData = None def forward(self, x): x = pad_to_block(x, [-1], self.hadamard_size) if self.workspace is None: raise Exception('Workspace must be set before calling forward') return qgemm_v2(x, self.weight, self.scales, self.tables, self.tables2.view(dtype=torch.float32), self.workspace, self.tune_metadata, hadamard_size=self.hadamard_size)
class HiggsLinear(torch.nn.Module): def __init__(self, in_features: int, out_features: int, num_bits: int, bias=True, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, group_size: int=256, hadamard_size: int=1024): pass def forward(self, x): pass
3
0
29
3
26
1
2
0.02
1
3
0
0
2
12
2
12
59
7
52
25
39
1
24
15
21
2
1
1
4
406
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.AzureMLCallback
from ..trainer_callback import ProgressCallback, TrainerCallback class AzureMLCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [AzureML](https://pypi.org/project/azureml-sdk/). """ def __init__(self, azureml_run=None): if not is_azureml_available(): raise RuntimeError('AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.') self.azureml_run = azureml_run def on_init_end(self, args, state, control, **kwargs): from azureml.core.run import Run if self.azureml_run is None and state.is_world_process_zero: self.azureml_run = Run.get_context() def on_log(self, args, state, control, logs=None, **kwargs): if self.azureml_run and state.is_world_process_zero: for k, v in logs.items(): if isinstance(v, (int, float)): self.azureml_run.log(k, v, description=k)
class AzureMLCallback(TrainerCallback): ''' A [`TrainerCallback`] that sends the logs to [AzureML](https://pypi.org/project/azureml-sdk/). ''' def __init__(self, azureml_run=None): pass def on_init_end(self, args, state, control, **kwargs): pass def on_log(self, args, state, control, logs=None, **kwargs): pass
4
1
5
0
4
0
3
0.21
1
3
0
0
3
1
3
18
21
4
14
7
9
3
14
7
9
4
1
3
8
407
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.ClearMLCallback
import os from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available from ..trainer_callback import ProgressCallback, TrainerCallback from dataclasses import asdict, fields class ClearMLCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [ClearML](https://clear.ml/). Environment: - **CLEARML_PROJECT** (`str`, *optional*, defaults to `HuggingFace Transformers`): ClearML project name. - **CLEARML_TASK** (`str`, *optional*, defaults to `Trainer`): ClearML task name. - **CLEARML_LOG_MODEL** (`bool`, *optional*, defaults to `False`): Whether to log models as artifacts during training. """ log_suffix = '' _hparams_section = 'Transformers' _model_config_section = 'Model Configuration' _ignore_hparams_overrides = '_ignore_hparams_ui_overrides_' _ignoge_model_config_overrides = '_ignore_model_config_ui_overrides_' _model_config_description = 'The configuration of model number {}.' _model_config_description_note = 'Note that, when cloning this task and running it remotely, the configuration might be applied to another model instead of this one. To avoid this, initialize the task externally by calling `Task.init` before the `ClearMLCallback` is instantiated.' _train_run_counter = 0 _model_connect_counter = 0 _task_created_in_callback = False _should_close_on_train_end = None def __init__(self): if is_clearml_available(): import clearml self._clearml = clearml else: raise RuntimeError("ClearMLCallback requires 'clearml' to be installed. Run `pip install clearml`.") self._initialized = False self._clearml_task = None self._log_model = False self._checkpoints_saved = [] def setup(self, args, state, model, processing_class, **kwargs): if self._clearml is None: return if self._initialized: return ClearMLCallback._train_run_counter += 1 ClearMLCallback._model_connect_counter += 1 ClearMLCallback.log_suffix = '' if ClearMLCallback._train_run_counter == 1 else '_' + str(ClearMLCallback._train_run_counter) if state.is_world_process_zero: logger.info('Automatic ClearML logging enabled.') if self._clearml_task is None: if ClearMLCallback._should_close_on_train_end is None: if not self._clearml.Task.running_locally() or self._clearml.Task.current_task(): ClearMLCallback._should_close_on_train_end = False else: ClearMLCallback._should_close_on_train_end = True if self._clearml.Task.running_locally() and self._clearml.Task.current_task(): self._clearml_task = self._clearml.Task.current_task() self._log_model = os.getenv('CLEARML_LOG_MODEL', 'FALSE' if not ClearMLCallback._task_created_in_callback else 'TRUE').upper() in ENV_VARS_TRUE_VALUES.union({'TRUE'}) logger.info('External ClearML Task has been connected.') else: self._clearml_task = self._clearml.Task.init(project_name=os.getenv('CLEARML_PROJECT', 'HuggingFace Transformers'), task_name=os.getenv('CLEARML_TASK', 'Trainer'), auto_connect_frameworks={'tensorboard': False, 'pytorch': False}, output_uri=True) self._log_model = os.getenv('CLEARML_LOG_MODEL', 'TRUE').upper() in ENV_VARS_TRUE_VALUES.union({'TRUE'}) ClearMLCallback._task_created_in_callback = True logger.info('ClearML Task has been initialized.') self._initialized = True suffixed_hparams_section = ClearMLCallback._hparams_section + ClearMLCallback.log_suffix ignore_hparams_config_section = suffixed_hparams_section + '/' + ClearMLCallback._ignore_hparams_overrides if self._clearml.Task.running_locally(): self._copy_training_args_as_hparams(args, suffixed_hparams_section) self._clearml_task.set_parameter(name=ignore_hparams_config_section, value=True, value_type=bool, description='If True, ignore Transformers hyperparameters overrides done in the UI/backend ' + 'when running remotely. Otherwise, the overrides will be applied when running remotely') elif not self._clearml_task.get_parameter(ignore_hparams_config_section, default=True, cast=True): self._clearml_task.connect(args, suffixed_hparams_section) else: self._copy_training_args_as_hparams(args, ClearMLCallback._hparams_section + ClearMLCallback.log_suffix) if getattr(model, 'config', None) is not None: ignore_model_config_section = suffixed_hparams_section + '/' + ClearMLCallback._ignoge_model_config_overrides configuration_object_description = ClearMLCallback._model_config_description.format(ClearMLCallback._model_connect_counter) if ClearMLCallback._model_connect_counter != ClearMLCallback._train_run_counter: configuration_object_description += ' ' + ClearMLCallback._model_config_description_note if self._clearml.Task.running_locally(): self._clearml_task.set_parameter(name=ignore_model_config_section, value=True, value_type=bool, description='If True, ignore Transformers model configuration overrides done in the UI/backend ' + 'when running remotely. Otherwise, the overrides will be applied when running remotely') self._clearml_task.set_configuration_object(name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix, config_dict=model.config.to_dict(), description=configuration_object_description) elif not self._clearml_task.get_parameter(ignore_model_config_section, default=True, cast=True): model.config = model.config.from_dict(self._clearml_task.get_configuration_object_as_dict(ClearMLCallback._model_config_section + ClearMLCallback.log_suffix)) else: self._clearml_task.set_configuration_object(name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix, config_dict=model.config.to_dict(), description=configuration_object_description) def on_train_begin(self, args, state, control, model=None, processing_class=None, **kwargs): if self._clearml is None: return self._checkpoints_saved = [] if state.is_hyper_param_search: self._initialized = False if not self._initialized: self.setup(args, state, model, processing_class, **kwargs) def on_train_end(self, args, state, control, **kwargs): if ClearMLCallback._should_close_on_train_end: self._clearml_task.close() ClearMLCallback._train_run_counter = 0 def on_log(self, args, state, control, model=None, processing_class=None, logs=None, **kwargs): if self._clearml is None: return if not self._initialized: self.setup(args, state, model, processing_class, **kwargs) if state.is_world_process_zero: eval_prefix = 'eval_' eval_prefix_len = len(eval_prefix) test_prefix = 'test_' test_prefix_len = len(test_prefix) single_value_scalars = ['train_runtime', 'train_samples_per_second', 'train_steps_per_second', 'train_loss', 'total_flos', 'epoch'] for k, v in logs.items(): if isinstance(v, (int, float)): if k in single_value_scalars: self._clearml_task.get_logger().report_single_value(name=k + ClearMLCallback.log_suffix, value=v) elif k.startswith(eval_prefix): self._clearml_task.get_logger().report_scalar(title='eval' + ClearMLCallback.log_suffix, series=k[eval_prefix_len:], value=v, iteration=state.global_step) elif k.startswith(test_prefix): self._clearml_task.get_logger().report_scalar(title='test' + ClearMLCallback.log_suffix, series=k[test_prefix_len:], value=v, iteration=state.global_step) else: self._clearml_task.get_logger().report_scalar(title='train' + ClearMLCallback.log_suffix, series=k, value=v, iteration=state.global_step) else: logger.warning(f'''Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a scalar. This invocation of ClearML logger's report_scalar() is incorrect so we dropped this attribute.''') def on_save(self, args, state, control, **kwargs): if self._log_model and self._clearml_task and state.is_world_process_zero: ckpt_dir = f'checkpoint-{state.global_step}' artifact_path = os.path.join(args.output_dir, ckpt_dir) name = ckpt_dir + ClearMLCallback.log_suffix logger.info(f'Logging checkpoint artifact `{name}`. This may take some time.') output_model = self._clearml.OutputModel(task=self._clearml_task, name=name) output_model.connect(task=self._clearml_task, name=name) output_model.update_weights_package(weights_path=artifact_path, target_filename=ckpt_dir, iteration=state.global_step, auto_delete_file=False) self._checkpoints_saved.append(output_model) while args.save_total_limit and args.save_total_limit < len(self._checkpoints_saved): try: self._clearml.model.Model.remove(self._checkpoints_saved[0], delete_weights_file=True, force=True, raise_on_errors=True) except Exception as e: logger.warning(f'Could not remove checkpoint `{self._checkpoints_saved[0].name}` after going over the `save_total_limit`. Error is: {e}') break self._checkpoints_saved = self._checkpoints_saved[1:] def _copy_training_args_as_hparams(self, training_args, prefix): as_dict = {field.name: getattr(training_args, field.name) for field in fields(training_args) if field.init and (not field.name.endswith('_token'))} flat_dict = {str(k): v for k, v in self._clearml.utilities.proxy_object.flatten_dictionary(as_dict).items()} self._clearml_task._arguments.copy_from_dict(flat_dict, prefix=prefix)
class ClearMLCallback(TrainerCallback): ''' A [`TrainerCallback`] that sends the logs to [ClearML](https://clear.ml/). Environment: - **CLEARML_PROJECT** (`str`, *optional*, defaults to `HuggingFace Transformers`): ClearML project name. - **CLEARML_TASK** (`str`, *optional*, defaults to `Trainer`): ClearML task name. - **CLEARML_LOG_MODEL** (`bool`, *optional*, defaults to `False`): Whether to log models as artifacts during training. ''' def __init__(self): pass def setup(self, args, state, model, processing_class, **kwargs): pass def on_train_begin(self, args, state, control, model=None, processing_class=None, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass def on_log(self, args, state, control, model=None, processing_class=None, logs=None, **kwargs): pass def on_save(self, args, state, control, **kwargs): pass def _copy_training_args_as_hparams(self, training_args, prefix): pass
8
1
31
1
29
0
5
0.05
1
7
0
0
7
5
7
22
251
16
223
42
214
12
114
41
105
16
1
4
38
408
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.CodeCarbonCallback
from ..trainer_callback import ProgressCallback, TrainerCallback class CodeCarbonCallback(TrainerCallback): """ A [`TrainerCallback`] that tracks the CO2 emission of training. """ def __init__(self): if not is_codecarbon_available(): raise RuntimeError('CodeCarbonCallback requires `codecarbon` to be installed. Run `pip install codecarbon`.') elif torch.version.hip: raise RuntimeError('CodeCarbonCallback requires `codecarbon` package, which is not compatible with AMD ROCm (https://github.com/mlco2/codecarbon/pull/490). When using the Trainer, please specify the `report_to` argument (https://huggingface.co/docs/transformers/v4.39.3/en/main_classes/trainer#transformers.TrainingArguments.report_to) to disable CodeCarbonCallback.') import codecarbon self._codecarbon = codecarbon self.tracker = None def on_init_end(self, args, state, control, **kwargs): if self.tracker is None and state.is_local_process_zero: self.tracker = self._codecarbon.EmissionsTracker(output_dir=args.output_dir) def on_train_begin(self, args, state, control, model=None, **kwargs): if self.tracker and state.is_local_process_zero: self.tracker.start() def on_train_end(self, args, state, control, **kwargs): if self.tracker and state.is_local_process_zero: self.tracker.stop()
class CodeCarbonCallback(TrainerCallback): ''' A [`TrainerCallback`] that tracks the CO2 emission of training. ''' def __init__(self): pass def on_init_end(self, args, state, control, **kwargs): pass def on_train_begin(self, args, state, control, model=None, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass
5
1
6
1
5
1
2
0.23
1
1
0
0
4
2
4
19
32
6
22
8
16
5
17
8
11
3
1
1
9
409
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.CometCallback
import os from ..trainer_callback import ProgressCallback, TrainerCallback class CometCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [Comet ML](https://www.comet.com/site/). """ def __init__(self): if _is_comet_installed is False or _is_comet_recent_enough is False: raise RuntimeError(f'CometCallback requires comet-ml>={_MIN_COMET_VERSION} to be installed. Run `pip install comet-ml>={_MIN_COMET_VERSION}`.') self._initialized = False self._log_assets = False self._experiment = None def setup(self, args, state, model): """ Setup the optional Comet integration. Environment: - **COMET_MODE** (`str`, *optional*, default to `get_or_create`): Control whether to create and log to a new Comet experiment or append to an existing experiment. It accepts the following values: * `get_or_create`: Decides automatically depending if `COMET_EXPERIMENT_KEY` is set and whether an Experiment with that key already exists or not. * `create`: Always create a new Comet Experiment. * `get`: Always try to append to an Existing Comet Experiment. Requires `COMET_EXPERIMENT_KEY` to be set. * `ONLINE`: **deprecated**, used to create an online Experiment. Use `COMET_START_ONLINE=1` instead. * `OFFLINE`: **deprecated**, used to created an offline Experiment. Use `COMET_START_ONLINE=0` instead. * `DISABLED`: **deprecated**, used to disable Comet logging. Use the `--report_to` flag to control the integrations used for logging result instead. - **COMET_PROJECT_NAME** (`str`, *optional*): Comet project name for experiments. - **COMET_LOG_ASSETS** (`str`, *optional*, defaults to `TRUE`): Whether or not to log training assets (checkpoints, etc), to Comet. Can be `TRUE`, or `FALSE`. For a number of configurable items in the environment, see [here](https://www.comet.com/docs/v2/guides/experiment-management/configure-sdk/#explore-comet-configuration-options). """ self._initialized = True log_assets = os.getenv('COMET_LOG_ASSETS', 'FALSE').upper() if log_assets in {'TRUE', '1'}: self._log_assets = True if state.is_world_process_zero: comet_old_mode = os.getenv('COMET_MODE') mode = None online = None if comet_old_mode is not None: comet_old_mode = comet_old_mode.lower() if comet_old_mode == 'online': online = True elif comet_old_mode == 'offline': online = False elif comet_old_mode in ('get', 'get_or_create', 'create'): mode = comet_old_mode elif comet_old_mode: logger.warning('Invalid COMET_MODE env value %r, Comet logging is disabled', comet_old_mode) return if state.is_hyper_param_search: if mode is not None: logger.warning('Hyperparameter Search is enabled, forcing the creation of new experiments, COMET_MODE value %r is ignored', comet_old_mode) mode = 'create' import comet_ml experiment_config = comet_ml.ExperimentConfig(name=args.run_name) self._experiment = comet_ml.start(online=online, mode=mode, experiment_config=experiment_config) self._experiment.__internal_api__set_model_graph__(model, framework='transformers') params = {'args': args.to_dict()} if hasattr(model, 'config') and model.config is not None: model_config = model.config.to_dict() params['config'] = model_config if hasattr(model, 'peft_config') and model.peft_config is not None: peft_config = model.peft_config params['peft_config'] = peft_config self._experiment.__internal_api__log_parameters__(params, framework='transformers', source='manual', flatten_nested=True) if state.is_hyper_param_search: optimization_id = getattr(state, 'trial_name', None) optimization_params = getattr(state, 'trial_params', None) self._experiment.log_optimization(optimization_id=optimization_id, parameters=optimization_params) def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: if self._experiment is not None: rewritten_logs = rewrite_logs(logs) self._experiment.__internal_api__log_metrics__(rewritten_logs, step=state.global_step, epoch=state.epoch, framework='transformers') def on_train_end(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero: if self._experiment is not None: if self._log_assets is True: logger.info('Logging checkpoints. This may take time.') self._experiment.log_asset_folder(args.output_dir, recursive=True, log_file_name=True, step=state.global_step) if state.is_hyper_param_search: self._experiment.clean() self._initialized = False def on_predict(self, args, state, control, metrics, **kwargs): if not self._initialized: self.setup(args, state, model=None) if state.is_world_process_zero and self._experiment is not None: rewritten_metrics = rewrite_logs(metrics) self._experiment.__internal_api__log_metrics__(rewritten_metrics, step=state.global_step, epoch=state.epoch, framework='transformers')
class CometCallback(TrainerCallback): ''' A [`TrainerCallback`] that sends the logs to [Comet ML](https://www.comet.com/site/). ''' def __init__(self): pass def setup(self, args, state, model): ''' Setup the optional Comet integration. Environment: - **COMET_MODE** (`str`, *optional*, default to `get_or_create`): Control whether to create and log to a new Comet experiment or append to an existing experiment. It accepts the following values: * `get_or_create`: Decides automatically depending if `COMET_EXPERIMENT_KEY` is set and whether an Experiment with that key already exists or not. * `create`: Always create a new Comet Experiment. * `get`: Always try to append to an Existing Comet Experiment. Requires `COMET_EXPERIMENT_KEY` to be set. * `ONLINE`: **deprecated**, used to create an online Experiment. Use `COMET_START_ONLINE=1` instead. * `OFFLINE`: **deprecated**, used to created an offline Experiment. Use `COMET_START_ONLINE=0` instead. * `DISABLED`: **deprecated**, used to disable Comet logging. Use the `--report_to` flag to control the integrations used for logging result instead. - **COMET_PROJECT_NAME** (`str`, *optional*): Comet project name for experiments. - **COMET_LOG_ASSETS** (`str`, *optional*, defaults to `TRUE`): Whether or not to log training assets (checkpoints, etc), to Comet. Can be `TRUE`, or `FALSE`. For a number of configurable items in the environment, see [here](https://www.comet.com/docs/v2/guides/experiment-management/configure-sdk/#explore-comet-configuration-options). ''' pass def on_train_begin(self, args, state, control, model=None, **kwargs): pass def on_log(self, args, state, control, model=None, logs=None, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass def on_predict(self, args, state, control, metrics, **kwargs): pass
7
2
22
3
15
5
5
0.38
1
1
0
0
6
3
6
21
142
21
88
23
80
33
71
23
63
14
1
3
30
410
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.DVCLiveCallback
from typing import TYPE_CHECKING, Any, Literal, Optional, Union import os from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available from ..trainer_callback import ProgressCallback, TrainerCallback class DVCLiveCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [DVCLive](https://www.dvc.org/doc/dvclive). Use the environment variables below in `setup` to configure the integration. To customize this callback beyond those environment variables, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Args: live (`dvclive.Live`, *optional*, defaults to `None`): Optional Live instance. If None, a new instance will be created using **kwargs. log_model (Union[Literal["all"], bool], *optional*, defaults to `None`): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True`, the final checkpoint is logged at the end of training. If set to `"all"`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. """ def __init__(self, live: Optional[Any]=None, log_model: Optional[Union[Literal['all'], bool]]=None, **kwargs): if not is_dvclive_available(): raise RuntimeError('DVCLiveCallback requires dvclive to be installed. Run `pip install dvclive`.') from dvclive import Live self._initialized = False self.live = None if isinstance(live, Live): self.live = live elif live is not None: raise RuntimeError(f'Found class {live.__class__} for live, expected dvclive.Live') self._log_model = log_model if self._log_model is None: log_model_env = os.getenv('HF_DVCLIVE_LOG_MODEL', 'FALSE') if log_model_env.upper() in ENV_VARS_TRUE_VALUES: self._log_model = True elif log_model_env.lower() == 'all': self._log_model = 'all' def setup(self, args, state, model): """ Setup the optional DVCLive integration. To customize this callback beyond the environment variables below, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Environment: - **HF_DVCLIVE_LOG_MODEL** (`str`, *optional*): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True` or *1*, the final checkpoint is logged at the end of training. If set to `all`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. """ from dvclive import Live self._initialized = True if state.is_world_process_zero: if not self.live: self.live = Live() self.live.log_params(args.to_dict()) def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: from dvclive.plots import Metric from dvclive.utils import standardize_metric_name for key, value in logs.items(): if Metric.could_log(value): self.live.log_metric(standardize_metric_name(key, 'dvclive.huggingface'), value) else: logger.warning(f'''Trainer is attempting to log a value of "{value}" of type {type(value)} for key "{key}" as a scalar. This invocation of DVCLive's Live.log_metric() is incorrect so we dropped this attribute.''') self.live.next_step() def on_save(self, args, state, control, **kwargs): if self._log_model == 'all' and self._initialized and state.is_world_process_zero: self.live.log_artifact(args.output_dir) def on_train_end(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero: from transformers.trainer import Trainer if self._log_model is True: fake_trainer = Trainer(args=args, model=kwargs.get('model'), processing_class=kwargs.get('processing_class'), eval_dataset=['fake']) name = 'best' if args.load_best_model_at_end else 'last' output_dir = os.path.join(args.output_dir, name) fake_trainer.save_model(output_dir) self.live.log_artifact(output_dir, name=name, type='model', copy=True) self.live.end()
class DVCLiveCallback(TrainerCallback): ''' A [`TrainerCallback`] that sends the logs to [DVCLive](https://www.dvc.org/doc/dvclive). Use the environment variables below in `setup` to configure the integration. To customize this callback beyond those environment variables, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Args: live (`dvclive.Live`, *optional*, defaults to `None`): Optional Live instance. If None, a new instance will be created using **kwargs. log_model (Union[Literal["all"], bool], *optional*, defaults to `None`): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True`, the final checkpoint is logged at the end of training. If set to `"all"`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. ''' def __init__(self, live: Optional[Any]=None, log_model: Optional[Union[Literal['all'], bool]]=None, **kwargs): pass def setup(self, args, state, model): ''' Setup the optional DVCLive integration. To customize this callback beyond the environment variables below, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Environment: - **HF_DVCLIVE_LOG_MODEL** (`str`, *optional*): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True` or *1*, the final checkpoint is logged at the end of training. If set to `all`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. ''' pass def on_train_begin(self, args, state, control, model=None, **kwargs): pass def on_log(self, args, state, control, model=None, logs=None, **kwargs): pass def on_save(self, args, state, control, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass
7
2
14
1
11
2
4
0.31
1
5
0
0
6
3
6
21
103
14
68
25
51
21
50
20
38
7
1
3
23
411
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.DagsHubCallback
from pathlib import Path import os from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available class DagsHubCallback(MLflowCallback): """ A [`TrainerCallback`] that logs to [DagsHub](https://dagshub.com/). Extends [`MLflowCallback`] """ def __init__(self): super().__init__() if not is_dagshub_available(): raise ImportError('DagsHubCallback requires dagshub to be installed. Run `pip install dagshub`.') from dagshub.upload import Repo self.Repo = Repo def setup(self, *args, **kwargs): """ Setup the DagsHub's Logging integration. Environment: - **HF_DAGSHUB_LOG_ARTIFACTS** (`str`, *optional*): Whether to save the data and model artifacts for the experiment. Default to `False`. """ self.log_artifacts = os.getenv('HF_DAGSHUB_LOG_ARTIFACTS', 'FALSE').upper() in ENV_VARS_TRUE_VALUES self.name = os.getenv('HF_DAGSHUB_MODEL_NAME') or 'main' self.remote = os.getenv('MLFLOW_TRACKING_URI') self.repo = self.Repo(owner=self.remote.split(os.sep)[-2], name=self.remote.split(os.sep)[-1].split('.')[0], branch=os.getenv('BRANCH') or 'main') self.path = Path('artifacts') if self.remote is None: raise RuntimeError('DagsHubCallback requires the `MLFLOW_TRACKING_URI` environment variable to be set. Did you run `dagshub.init()`?') super().setup(*args, **kwargs) def on_train_end(self, args, state, control, **kwargs): if self.log_artifacts: if getattr(self, 'train_dataloader', None): torch.save(self.train_dataloader.dataset, os.path.join(args.output_dir, 'dataset.pt')) self.repo.directory(str(self.path)).add_dir(args.output_dir)
class DagsHubCallback(MLflowCallback): ''' A [`TrainerCallback`] that logs to [DagsHub](https://dagshub.com/). Extends [`MLflowCallback`] ''' def __init__(self): pass def setup(self, *args, **kwargs): ''' Setup the DagsHub's Logging integration. Environment: - **HF_DAGSHUB_LOG_ARTIFACTS** (`str`, *optional*): Whether to save the data and model artifacts for the experiment. Default to `False`. ''' pass def on_train_end(self, args, state, control, **kwargs): pass
4
2
13
2
9
2
2
0.32
1
5
0
0
3
6
3
25
47
10
28
11
23
9
21
11
16
3
2
2
7
412
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.FlyteCallback
from ..utils import PushToHubMixin, flatten_dict, is_datasets_available, is_pandas_available, is_torch_available, logging import os from ..trainer_callback import ProgressCallback, TrainerCallback class FlyteCallback(TrainerCallback): """A [`TrainerCallback`] that sends the logs to [Flyte](https://flyte.org/). NOTE: This callback only works within a Flyte task. Args: save_log_history (`bool`, *optional*, defaults to `True`): When set to True, the training logs are saved as a Flyte Deck. sync_checkpoints (`bool`, *optional*, defaults to `True`): When set to True, checkpoints are synced with Flyte and can be used to resume training in the case of an interruption. Example: ```python # Note: This example skips over some setup steps for brevity. from flytekit import current_context, task @task def train_hf_transformer(): cp = current_context().checkpoint trainer = Trainer(..., callbacks=[FlyteCallback()]) output = trainer.train(resume_from_checkpoint=cp.restore()) ``` """ def __init__(self, save_log_history: bool=True, sync_checkpoints: bool=True): super().__init__() if not is_flytekit_available(): raise ImportError('FlyteCallback requires flytekit to be installed. Run `pip install flytekit`.') if not is_flyte_deck_standard_available() or not is_pandas_available(): logger.warning('Syncing log history requires both flytekitplugins-deck-standard and pandas to be installed. Run `pip install flytekitplugins-deck-standard pandas` to enable this feature.') save_log_history = False from flytekit import current_context self.cp = current_context().checkpoint self.save_log_history = save_log_history self.sync_checkpoints = sync_checkpoints def on_save(self, args, state, control, **kwargs): if self.sync_checkpoints and state.is_world_process_zero: ckpt_dir = f'checkpoint-{state.global_step}' artifact_path = os.path.join(args.output_dir, ckpt_dir) logger.info(f'Syncing checkpoint in {ckpt_dir} to Flyte. This may take time.') self.cp.save(artifact_path) def on_train_end(self, args, state, control, **kwargs): if self.save_log_history: import pandas as pd from flytekit import Deck from flytekitplugins.deck.renderer import TableRenderer log_history_df = pd.DataFrame(state.log_history) Deck('Log History', TableRenderer().to_html(log_history_df))
class FlyteCallback(TrainerCallback): '''A [`TrainerCallback`] that sends the logs to [Flyte](https://flyte.org/). NOTE: This callback only works within a Flyte task. Args: save_log_history (`bool`, *optional*, defaults to `True`): When set to True, the training logs are saved as a Flyte Deck. sync_checkpoints (`bool`, *optional*, defaults to `True`): When set to True, checkpoints are synced with Flyte and can be used to resume training in the case of an interruption. Example: ```python # Note: This example skips over some setup steps for brevity. from flytekit import current_context, task @task def train_hf_transformer(): cp = current_context().checkpoint trainer = Trainer(..., callbacks=[FlyteCallback()]) output = trainer.train(resume_from_checkpoint=cp.restore()) ``` ''' def __init__(self, save_log_history: bool=True, sync_checkpoints: bool=True): pass def on_save(self, args, state, control, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass
4
1
11
2
9
0
2
0.68
1
3
0
0
3
3
3
18
61
14
28
14
20
19
25
14
17
3
1
1
7
413
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.MLflowCallback
import json import packaging.version from ..utils import PushToHubMixin, flatten_dict, is_datasets_available, is_pandas_available, is_torch_available, logging import re from ..trainer_callback import ProgressCallback, TrainerCallback import os from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available class MLflowCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [MLflow](https://www.mlflow.org/). Can be disabled by setting environment variable `DISABLE_MLFLOW_INTEGRATION = TRUE`. """ def __init__(self): if not is_mlflow_available(): raise RuntimeError('MLflowCallback requires mlflow to be installed. Run `pip install mlflow`.') import mlflow self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH self._MAX_PARAMS_TAGS_PER_BATCH = mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH self._initialized = False self._auto_end_run = False self._log_artifacts = False self._ml_flow = mlflow def setup(self, args, state, model): """ Setup the optional MLflow integration. Environment: - **HF_MLFLOW_LOG_ARTIFACTS** (`str`, *optional*): Whether to use MLflow `.log_artifact()` facility to log artifacts. This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to `True` or *1*, will copy each saved checkpoint on each save in [`TrainingArguments`]'s `output_dir` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location. - **MLFLOW_TRACKING_URI** (`str`, *optional*): Whether to store runs at a specific path or remote server. Unset by default, which skips setting the tracking URI entirely. - **MLFLOW_EXPERIMENT_NAME** (`str`, *optional*, defaults to `None`): Whether to use an MLflow experiment_name under which to launch the run. Default to `None` which will point to the `Default` experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be activated. If an experiment with this name does not exist, a new experiment with this name is created. - **MLFLOW_TAGS** (`str`, *optional*): A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: `os.environ['MLFLOW_TAGS']='{"release.candidate": "RC1", "release.version": "2.2.0"}'`. - **MLFLOW_NESTED_RUN** (`str`, *optional*): Whether to use MLflow nested runs. If set to `True` or *1*, will create a nested run inside the current run. - **MLFLOW_RUN_ID** (`str`, *optional*): Allow to reattach to an existing run which can be useful when resuming training from a checkpoint. When `MLFLOW_RUN_ID` environment variable is set, `start_run` attempts to resume a run with the specified run ID and other parameters are ignored. - **MLFLOW_FLATTEN_PARAMS** (`str`, *optional*, defaults to `False`): Whether to flatten the parameters dictionary before logging. - **MLFLOW_MAX_LOG_PARAMS** (`int`, *optional*): Set the maximum number of parameters to log in the run. """ self._log_artifacts = os.getenv('HF_MLFLOW_LOG_ARTIFACTS', 'FALSE').upper() in ENV_VARS_TRUE_VALUES self._nested_run = os.getenv('MLFLOW_NESTED_RUN', 'FALSE').upper() in ENV_VARS_TRUE_VALUES self._tracking_uri = os.getenv('MLFLOW_TRACKING_URI', None) self._experiment_name = os.getenv('MLFLOW_EXPERIMENT_NAME', None) self._flatten_params = os.getenv('MLFLOW_FLATTEN_PARAMS', 'FALSE').upper() in ENV_VARS_TRUE_VALUES self._run_id = os.getenv('MLFLOW_RUN_ID', None) self._max_log_params = os.getenv('MLFLOW_MAX_LOG_PARAMS', None) self._async_log = packaging.version.parse(self._ml_flow.__version__) >= packaging.version.parse('2.8.0') logger.debug(f'MLflow experiment_name={self._experiment_name}, run_name={args.run_name}, nested={self._nested_run}, tracking_uri={self._tracking_uri}') if state.is_world_process_zero: if not self._ml_flow.is_tracking_uri_set(): if self._tracking_uri: self._ml_flow.set_tracking_uri(self._tracking_uri) logger.debug(f'MLflow tracking URI is set to {self._tracking_uri}') else: logger.debug('Environment variable `MLFLOW_TRACKING_URI` is not provided and therefore will not be explicitly set.') else: logger.debug(f'MLflow tracking URI is set to {self._ml_flow.get_tracking_uri()}') if self._ml_flow.active_run() is None or self._nested_run or self._run_id: if self._experiment_name: self._ml_flow.set_experiment(self._experiment_name) self._ml_flow.start_run(run_name=args.run_name, nested=self._nested_run) logger.debug(f'MLflow run started with run_id={self._ml_flow.active_run().info.run_id}') self._auto_end_run = True combined_dict = args.to_dict() if hasattr(model, 'config') and model.config is not None: model_config = model.config.to_dict() combined_dict = {**model_config, **combined_dict} combined_dict = flatten_dict(combined_dict) if self._flatten_params else combined_dict for name, value in list(combined_dict.items()): if len(str(value)) > self._MAX_PARAM_VAL_LENGTH: logger.warning(f'''Trainer is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow's log_param() only accepts values no longer than 250 characters so we dropped this attribute. You can use `MLFLOW_FLATTEN_PARAMS` environment variable to flatten the parameters and avoid this message.''') del combined_dict[name] combined_dict_items = list(combined_dict.items()) if self._max_log_params and self._max_log_params.isdigit(): max_log_params = int(self._max_log_params) if max_log_params < len(combined_dict_items): logger.debug(f'Reducing the number of parameters to log from {len(combined_dict_items)} to {max_log_params}.') combined_dict_items = combined_dict_items[:max_log_params] for i in range(0, len(combined_dict_items), self._MAX_PARAMS_TAGS_PER_BATCH): if self._async_log: self._ml_flow.log_params(dict(combined_dict_items[i:i + self._MAX_PARAMS_TAGS_PER_BATCH]), synchronous=False) else: self._ml_flow.log_params(dict(combined_dict_items[i:i + self._MAX_PARAMS_TAGS_PER_BATCH])) mlflow_tags = os.getenv('MLFLOW_TAGS', None) if mlflow_tags: mlflow_tags = json.loads(mlflow_tags) self._ml_flow.set_tags(mlflow_tags) self._initialized = True def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, logs, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: metrics = {} for k, v in logs.items(): if isinstance(v, (int, float)): metrics[k] = v elif isinstance(v, torch.Tensor) and v.numel() == 1: metrics[k] = v.item() else: logger.warning(f'''Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. MLflow's log_metric() only accepts float and int types so we dropped this attribute.''') sanitized_metrics = {re.sub('[^0-9A-Za-z_\\-\\.\\ :/]', '_', k): v for k, v in metrics.items()} if self._async_log: self._ml_flow.log_metrics(metrics=sanitized_metrics, step=state.global_step, synchronous=False) else: self._ml_flow.log_metrics(metrics=sanitized_metrics, step=state.global_step) def on_train_end(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero: if self._auto_end_run and self._ml_flow.active_run(): self._ml_flow.end_run() def on_save(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero and self._log_artifacts: ckpt_dir = f'checkpoint-{state.global_step}' artifact_path = os.path.join(args.output_dir, ckpt_dir) logger.info(f'Logging checkpoint artifacts in {ckpt_dir}. This may take time.') self._ml_flow.pyfunc.log_model(ckpt_dir, artifacts={'model_path': artifact_path}, python_model=self._ml_flow.pyfunc.PythonModel()) def __del__(self): if self._auto_end_run and callable(getattr(self._ml_flow, 'active_run', None)) and (self._ml_flow.active_run() is not None): self._ml_flow.end_run()
class MLflowCallback(TrainerCallback): ''' A [`TrainerCallback`] that sends the logs to [MLflow](https://www.mlflow.org/). Can be disabled by setting environment variable `DISABLE_MLFLOW_INTEGRATION = TRUE`. ''' def __init__(self): pass def setup(self, args, state, model): ''' Setup the optional MLflow integration. Environment: - **HF_MLFLOW_LOG_ARTIFACTS** (`str`, *optional*): Whether to use MLflow `.log_artifact()` facility to log artifacts. This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to `True` or *1*, will copy each saved checkpoint on each save in [`TrainingArguments`]'s `output_dir` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location. - **MLFLOW_TRACKING_URI** (`str`, *optional*): Whether to store runs at a specific path or remote server. Unset by default, which skips setting the tracking URI entirely. - **MLFLOW_EXPERIMENT_NAME** (`str`, *optional*, defaults to `None`): Whether to use an MLflow experiment_name under which to launch the run. Default to `None` which will point to the `Default` experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be activated. If an experiment with this name does not exist, a new experiment with this name is created. - **MLFLOW_TAGS** (`str`, *optional*): A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: `os.environ['MLFLOW_TAGS']='{"release.candidate": "RC1", "release.version": "2.2.0"}'`. - **MLFLOW_NESTED_RUN** (`str`, *optional*): Whether to use MLflow nested runs. If set to `True` or *1*, will create a nested run inside the current run. - **MLFLOW_RUN_ID** (`str`, *optional*): Allow to reattach to an existing run which can be useful when resuming training from a checkpoint. When `MLFLOW_RUN_ID` environment variable is set, `start_run` attempts to resume a run with the specified run ID and other parameters are ignored. - **MLFLOW_FLATTEN_PARAMS** (`str`, *optional*, defaults to `False`): Whether to flatten the parameters dictionary before logging. - **MLFLOW_MAX_LOG_PARAMS** (`int`, *optional*): Set the maximum number of parameters to log in the run. ''' pass def on_train_begin(self, args, state, control, model=None, **kwargs): pass def on_log(self, args, state, control, logs, model=None, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass def on_save(self, args, state, control, **kwargs): pass def __del__(self): pass
8
2
23
1
17
6
5
0.36
1
9
0
1
7
13
7
22
176
14
119
33
110
43
87
33
78
15
1
3
33
414
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.NeptuneCallback
from .. import __version__ as version from ..trainer_callback import ProgressCallback, TrainerCallback import os import tempfile from typing import TYPE_CHECKING, Any, Literal, Optional, Union import shutil import numpy as np class NeptuneCallback(TrainerCallback): """TrainerCallback that sends the logs to [Neptune](https://app.neptune.ai). Args: api_token (`str`, *optional*): Neptune API token obtained upon registration. You can leave this argument out if you have saved your token to the `NEPTUNE_API_TOKEN` environment variable (strongly recommended). See full setup instructions in the [docs](https://docs.neptune.ai/setup/installation). project (`str`, *optional*): Name of an existing Neptune project, in the form "workspace-name/project-name". You can find and copy the name in Neptune from the project settings -> Properties. If None (default), the value of the `NEPTUNE_PROJECT` environment variable is used. name (`str`, *optional*): Custom name for the run. base_namespace (`str`, *optional*, defaults to "finetuning"): In the Neptune run, the root namespace that will contain all of the metadata logged by the callback. log_parameters (`bool`, *optional*, defaults to `True`): If True, logs all Trainer arguments and model parameters provided by the Trainer. log_checkpoints (`str`, *optional*): If "same", uploads checkpoints whenever they are saved by the Trainer. If "last", uploads only the most recently saved checkpoint. If "best", uploads the best checkpoint (among the ones saved by the Trainer). If `None`, does not upload checkpoints. run (`Run`, *optional*): Pass a Neptune run object if you want to continue logging to an existing run. Read more about resuming runs in the [docs](https://docs.neptune.ai/logging/to_existing_object). **neptune_run_kwargs (*optional*): Additional keyword arguments to be passed directly to the [`neptune.init_run()`](https://docs.neptune.ai/api/neptune#init_run) function when a new run is created. For instructions and examples, see the [Transformers integration guide](https://docs.neptune.ai/integrations/transformers) in the Neptune documentation. """ integration_version_key = 'source_code/integrations/transformers' model_parameters_key = 'model_parameters' trial_name_key = 'trial' trial_params_key = 'trial_params' trainer_parameters_key = 'trainer_parameters' flat_metrics = {'train/epoch'} def __init__(self, *, api_token: Optional[str]=None, project: Optional[str]=None, name: Optional[str]=None, base_namespace: str='finetuning', run=None, log_parameters: bool=True, log_checkpoints: Optional[str]=None, **neptune_run_kwargs): if not is_neptune_available(): raise ValueError('NeptuneCallback requires the Neptune client library to be installed. To install the library, run `pip install neptune`.') try: from neptune import Run from neptune.internal.utils import verify_type except ImportError: from neptune.new.internal.utils import verify_type from neptune.new.metadata_containers.run import Run verify_type('api_token', api_token, (str, type(None))) verify_type('project', project, (str, type(None))) verify_type('name', name, (str, type(None))) verify_type('base_namespace', base_namespace, str) verify_type('run', run, (Run, type(None))) verify_type('log_parameters', log_parameters, bool) verify_type('log_checkpoints', log_checkpoints, (str, type(None))) self._base_namespace_path = base_namespace self._log_parameters = log_parameters self._log_checkpoints = log_checkpoints self._initial_run: Optional[Run] = run self._run = None self._is_monitoring_run = False self._run_id = None self._force_reset_monitoring_run = False self._init_run_kwargs = {'api_token': api_token, 'project': project, 'name': name, **neptune_run_kwargs} self._volatile_checkpoints_dir = None self._should_upload_checkpoint = self._log_checkpoints is not None self._recent_checkpoint_path = None if self._log_checkpoints in {'last', 'best'}: self._target_checkpoints_namespace = f'checkpoints/{self._log_checkpoints}' self._should_clean_recently_uploaded_checkpoint = True else: self._target_checkpoints_namespace = 'checkpoints' self._should_clean_recently_uploaded_checkpoint = False def _stop_run_if_exists(self): if self._run: self._run.stop() del self._run self._run = None def _initialize_run(self, **additional_neptune_kwargs): try: from neptune import init_run from neptune.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException except ImportError: from neptune.new import init_run from neptune.new.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException self._stop_run_if_exists() try: run_params = additional_neptune_kwargs.copy() run_params.update(self._init_run_kwargs) self._run = init_run(**run_params) self._run_id = self._run['sys/id'].fetch() except (NeptuneMissingProjectNameException, NeptuneMissingApiTokenException) as e: raise NeptuneMissingConfiguration() from e def _use_initial_run(self): self._run = self._initial_run self._is_monitoring_run = True self._run_id = self._run['sys/id'].fetch() self._initial_run = None def _ensure_run_with_monitoring(self): if self._initial_run is not None: self._use_initial_run() else: if not self._force_reset_monitoring_run and self._is_monitoring_run: return if self._run and (not self._is_monitoring_run) and (not self._force_reset_monitoring_run): self._initialize_run(with_id=self._run_id) self._is_monitoring_run = True else: self._initialize_run() self._force_reset_monitoring_run = False def _ensure_at_least_run_without_monitoring(self): if self._initial_run is not None: self._use_initial_run() elif not self._run: self._initialize_run(with_id=self._run_id, capture_stdout=False, capture_stderr=False, capture_hardware_metrics=False, capture_traceback=False) self._is_monitoring_run = False @property def run(self): if self._run is None: self._ensure_at_least_run_without_monitoring() return self._run @property def _metadata_namespace(self): return self.run[self._base_namespace_path] def _log_integration_version(self): self.run[NeptuneCallback.integration_version_key] = version def _log_trainer_parameters(self, args): self._metadata_namespace[NeptuneCallback.trainer_parameters_key] = args.to_sanitized_dict() def _log_model_parameters(self, model): from neptune.utils import stringify_unsupported if model and hasattr(model, 'config') and (model.config is not None): self._metadata_namespace[NeptuneCallback.model_parameters_key] = stringify_unsupported(model.config.to_dict()) def _log_hyper_param_search_parameters(self, state): if state and hasattr(state, 'trial_name'): self._metadata_namespace[NeptuneCallback.trial_name_key] = state.trial_name if state and hasattr(state, 'trial_params') and (state.trial_params is not None): self._metadata_namespace[NeptuneCallback.trial_params_key] = state.trial_params def _log_model_checkpoint(self, source_directory: str, checkpoint: str): target_path = relative_path = os.path.join(source_directory, checkpoint) if self._volatile_checkpoints_dir is not None: consistent_checkpoint_path = os.path.join(self._volatile_checkpoints_dir, checkpoint) try: cpkt_path = relative_path.replace('..', '').lstrip(os.path.sep) copy_path = os.path.join(consistent_checkpoint_path, cpkt_path) shutil.copytree(relative_path, copy_path) target_path = consistent_checkpoint_path except OSError as e: logger.warning(f"NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{e}'. Could fail trying to upload.") self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path) if self._should_clean_recently_uploaded_checkpoint and self._recent_checkpoint_path is not None: self._metadata_namespace[self._target_checkpoints_namespace].delete_files(self._recent_checkpoint_path) self._recent_checkpoint_path = relative_path def on_init_end(self, args, state, control, **kwargs): self._volatile_checkpoints_dir = None if self._log_checkpoints and (args.overwrite_output_dir or args.save_total_limit is not None): self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name if self._log_checkpoints == 'best' and (not args.load_best_model_at_end): raise ValueError('To save the best model checkpoint, the load_best_model_at_end argument must be enabled.') def on_train_begin(self, args, state, control, model=None, **kwargs): if not state.is_world_process_zero: return self._ensure_run_with_monitoring() self._force_reset_monitoring_run = True self._log_integration_version() if self._log_parameters: self._log_trainer_parameters(args) self._log_model_parameters(model) if state.is_hyper_param_search: self._log_hyper_param_search_parameters(state) def on_train_end(self, args, state, control, **kwargs): self._stop_run_if_exists() def __del__(self): if self._volatile_checkpoints_dir is not None: shutil.rmtree(self._volatile_checkpoints_dir, ignore_errors=True) self._stop_run_if_exists() def on_save(self, args, state, control, **kwargs): if self._should_upload_checkpoint: self._log_model_checkpoint(args.output_dir, f'checkpoint-{state.global_step}') def on_evaluate(self, args, state, control, metrics=None, **kwargs): if self._log_checkpoints == 'best': best_metric_name = args.metric_for_best_model if not best_metric_name.startswith('eval_'): best_metric_name = f'eval_{best_metric_name}' metric_value = metrics.get(best_metric_name) operator = np.greater if args.greater_is_better else np.less self._should_upload_checkpoint = state.best_metric is None or operator(metric_value, state.best_metric) @classmethod def get_run(cls, trainer): for callback in trainer.callback_handler.callbacks: if isinstance(callback, cls): return callback.run raise Exception("The trainer doesn't have a NeptuneCallback configured.") def on_log(self, args, state, control, logs: Optional[dict[str, float]]=None, **kwargs): if not state.is_world_process_zero: return if logs is not None: for name, value in rewrite_logs(logs).items(): if isinstance(value, (int, float)): if name in NeptuneCallback.flat_metrics: self._metadata_namespace[name] = value else: self._metadata_namespace[name].log(value, step=state.global_step)
class NeptuneCallback(TrainerCallback): '''TrainerCallback that sends the logs to [Neptune](https://app.neptune.ai). Args: api_token (`str`, *optional*): Neptune API token obtained upon registration. You can leave this argument out if you have saved your token to the `NEPTUNE_API_TOKEN` environment variable (strongly recommended). See full setup instructions in the [docs](https://docs.neptune.ai/setup/installation). project (`str`, *optional*): Name of an existing Neptune project, in the form "workspace-name/project-name". You can find and copy the name in Neptune from the project settings -> Properties. If None (default), the value of the `NEPTUNE_PROJECT` environment variable is used. name (`str`, *optional*): Custom name for the run. base_namespace (`str`, *optional*, defaults to "finetuning"): In the Neptune run, the root namespace that will contain all of the metadata logged by the callback. log_parameters (`bool`, *optional*, defaults to `True`): If True, logs all Trainer arguments and model parameters provided by the Trainer. log_checkpoints (`str`, *optional*): If "same", uploads checkpoints whenever they are saved by the Trainer. If "last", uploads only the most recently saved checkpoint. If "best", uploads the best checkpoint (among the ones saved by the Trainer). If `None`, does not upload checkpoints. run (`Run`, *optional*): Pass a Neptune run object if you want to continue logging to an existing run. Read more about resuming runs in the [docs](https://docs.neptune.ai/logging/to_existing_object). **neptune_run_kwargs (*optional*): Additional keyword arguments to be passed directly to the [`neptune.init_run()`](https://docs.neptune.ai/api/neptune#init_run) function when a new run is created. For instructions and examples, see the [Transformers integration guide](https://docs.neptune.ai/integrations/transformers) in the Neptune documentation. ''' def __init__(self, *, api_token: Optional[str]=None, project: Optional[str]=None, name: Optional[str]=None, base_namespace: str='finetuning', run=None, log_parameters: bool=True, log_checkpoints: Optional[str]=None, **neptune_run_kwargs): pass def _stop_run_if_exists(self): pass def _initialize_run(self, **additional_neptune_kwargs): pass def _use_initial_run(self): pass def _ensure_run_with_monitoring(self): pass def _ensure_at_least_run_without_monitoring(self): pass @property def run(self): pass @property def _metadata_namespace(self): pass def _log_integration_version(self): pass def _log_trainer_parameters(self, args): pass def _log_model_parameters(self, model): pass def _log_hyper_param_search_parameters(self, state): pass def _log_model_checkpoint(self, source_directory: str, checkpoint: str): pass def on_init_end(self, args, state, control, **kwargs): pass def on_train_begin(self, args, state, control, model=None, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass def __del__(self): pass def on_save(self, args, state, control, **kwargs): pass def on_evaluate(self, args, state, control, metrics=None, **kwargs): pass @classmethod def get_run(cls, trainer): pass def on_log(self, args, state, control, logs: Optional[dict[str, float]]=None, **kwargs): pass
25
1
10
1
9
0
3
0.13
1
10
1
0
20
14
21
36
271
49
196
77
151
26
163
61
132
6
1
4
56
415
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.NeptuneMissingConfiguration
class NeptuneMissingConfiguration(Exception): def __init__(self): super().__init__('\n ------ Unsupported ---- We were not able to create new runs. You provided a custom Neptune run to\n `NeptuneCallback` with the `run` argument. For the integration to work fully, provide your `api_token` and\n `project` by saving them as environment variables or passing them to the callback.\n ')
class NeptuneMissingConfiguration(Exception): def __init__(self): pass
2
0
8
0
8
0
1
0
1
1
0
0
1
0
1
11
9
0
9
2
7
0
3
2
1
1
3
0
1
416
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.TensorBoardCallback
import os from ..trainer_callback import ProgressCallback, TrainerCallback class TensorBoardCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard). Args: tb_writer (`SummaryWriter`, *optional*): The writer to use. Will instantiate one if not set. """ def __init__(self, tb_writer=None): has_tensorboard = is_tensorboard_available() if not has_tensorboard: raise RuntimeError('TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX.') if has_tensorboard: try: from torch.utils.tensorboard import SummaryWriter self._SummaryWriter = SummaryWriter except ImportError: try: from tensorboardX import SummaryWriter self._SummaryWriter = SummaryWriter except ImportError: self._SummaryWriter = None else: self._SummaryWriter = None self.tb_writer = tb_writer def _init_summary_writer(self, args, log_dir=None): log_dir = log_dir or args.logging_dir if self._SummaryWriter is not None: self.tb_writer = self._SummaryWriter(log_dir=log_dir) def on_train_begin(self, args, state, control, **kwargs): if not state.is_world_process_zero: return log_dir = None if state.is_hyper_param_search: trial_name = state.trial_name if trial_name is not None: log_dir = os.path.join(args.logging_dir, trial_name) if self.tb_writer is None: self._init_summary_writer(args, log_dir) if self.tb_writer is not None: self.tb_writer.add_text('args', args.to_json_string()) if 'model' in kwargs: model = kwargs['model'] if hasattr(model, 'config') and model.config is not None: model_config_json = model.config.to_json_string() self.tb_writer.add_text('model_config', model_config_json) def on_log(self, args, state, control, logs=None, **kwargs): if not state.is_world_process_zero: return if self.tb_writer is None: self._init_summary_writer(args) if self.tb_writer is not None: logs = rewrite_logs(logs) for k, v in logs.items(): if isinstance(v, (int, float)): self.tb_writer.add_scalar(k, v, state.global_step) elif isinstance(v, str): self.tb_writer.add_text(k, v, state.global_step) else: logger.warning(f'''Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a scalar. This invocation of Tensorboard's writer.add_scalar() is incorrect so we dropped this attribute.''') self.tb_writer.flush() def on_train_end(self, args, state, control, **kwargs): if self.tb_writer: self.tb_writer.close() self.tb_writer = None
class TensorBoardCallback(TrainerCallback): ''' A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard). Args: tb_writer (`SummaryWriter`, *optional*): The writer to use. Will instantiate one if not set. ''' def __init__(self, tb_writer=None): pass def _init_summary_writer(self, args, log_dir=None): pass def on_train_begin(self, args, state, control, **kwargs): pass def on_log(self, args, state, control, logs=None, **kwargs): pass def on_train_end(self, args, state, control, **kwargs): pass
6
1
15
2
13
0
5
0.11
1
7
0
0
5
2
5
20
86
14
66
16
58
7
55
16
47
8
1
3
24
417
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.WandbCallback
import importlib.util import importlib.metadata from ..trainer_callback import ProgressCallback, TrainerCallback from .. import PreTrainedModel, TrainingArguments import os from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available import tempfile from .. import modelcard import copy import numbers from pathlib import Path class WandbCallback(TrainerCallback): """ A [`TrainerCallback`] that logs metrics, media, model checkpoints to [Weight and Biases](https://www.wandb.com/). """ def __init__(self): has_wandb = is_wandb_available() if not has_wandb: if importlib.util.find_spec('wandb') is not None: wandb_disabled = os.getenv('WANDB_DISABLED', '').upper() in ENV_VARS_TRUE_VALUES if wandb_disabled: raise RuntimeError("You specified `report_to='wandb'` but also set the `WANDB_DISABLED` environment variable.\nThis disables wandb logging, even though it was explicitly requested.\n\n- To enable wandb logging: unset `WANDB_DISABLED`.\n- To disable logging: use `report_to='none'`.\n\nNote: WANDB_DISABLED is deprecated and will be removed in v5.") raise RuntimeError('WandbCallback requires wandb to be installed. Run `pip install wandb`.') if has_wandb: import wandb self._wandb = wandb self._initialized = False self._log_model = WandbLogModel(os.getenv('WANDB_LOG_MODEL', 'false')) def setup(self, args, state, model, **kwargs): """ Setup the optional Weights & Biases (*wandb*) integration. One can subclass and override this method to customize the setup if needed. Find more information [here](https://docs.wandb.ai/guides/integrations/huggingface). You can also override the following environment variables: Environment: - **WANDB_LOG_MODEL** (`str`, *optional*, defaults to `"false"`): Whether to log model and checkpoints during training. Can be `"end"`, `"checkpoint"` or `"false"`. If set to `"end"`, the model will be uploaded at the end of training. If set to `"checkpoint"`, the checkpoint will be uploaded every `args.save_steps` . If set to `"false"`, the model will not be uploaded. Use along with [`~transformers.TrainingArguments.load_best_model_at_end`] to upload best model. <Deprecated version="5.0"> Setting `WANDB_LOG_MODEL` as `bool` will be deprecated in version 5 of 🤗 Transformers. </Deprecated> - **WANDB_WATCH** (`str`, *optional* defaults to `"false"`): Can be `"gradients"`, `"all"`, `"parameters"`, or `"false"`. Set to `"all"` to log gradients and parameters. - **WANDB_PROJECT** (`str`, *optional*, defaults to `"huggingface"`): Set this to a custom string to store results in a different project. - **WANDB_DISABLED** (`bool`, *optional*, defaults to `False`): Whether to disable wandb entirely. Set `WANDB_DISABLED=true` to disable. """ if self._wandb is None: return self._initialized = True from wandb.sdk.lib.config_util import ConfigError as WandbConfigError if state.is_world_process_zero: logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"') combined_dict = {**args.to_dict()} if hasattr(model, 'config') and model.config is not None: model_config = model.config if isinstance(model.config, dict) else model.config.to_dict() combined_dict = {**model_config, **combined_dict} if hasattr(model, 'peft_config') and model.peft_config is not None: peft_config = model.peft_config combined_dict = {**{'peft_config': peft_config}, **combined_dict} trial_name = state.trial_name init_args = {} if trial_name is not None: init_args['name'] = trial_name init_args['group'] = args.run_name or args.output_dir elif args.run_name is not None: init_args['name'] = args.run_name if args.run_name == args.output_dir: self._wandb.termwarn('The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.', repeat=False) if self._wandb.run is None: self._wandb.init(project=os.getenv('WANDB_PROJECT', 'huggingface'), **init_args) self._wandb.config.update(combined_dict or {}, allow_val_change=True) if getattr(self._wandb, 'define_metric', None): self._wandb.define_metric('train/global_step') self._wandb.define_metric('*', step_metric='train/global_step', step_sync=True) _watch_model = os.getenv('WANDB_WATCH', 'false') if not is_torch_xla_available() and _watch_model in ('all', 'parameters', 'gradients'): self._wandb.watch(model, log=_watch_model, log_freq=max(100, state.logging_steps)) self._wandb.run._label(code='transformers_trainer') try: self._wandb.config['model/num_parameters'] = model.num_parameters() except AttributeError: logger.info('Could not log the number of model parameters in Weights & Biases due to an AttributeError.') except WandbConfigError: logger.warning('A ConfigError was raised whilst setting the number of model parameters in Weights & Biases config.') if self._log_model.is_enabled: with tempfile.TemporaryDirectory() as temp_dir: model_name = f'model-{self._wandb.run.id}' if args.run_name is None or args.run_name == args.output_dir else f'model-{self._wandb.run.name}' model_artifact = self._wandb.Artifact(name=model_name, type='model', metadata={'model_config': model.config.to_dict() if hasattr(model, 'config') else None, 'num_parameters': self._wandb.config.get('model/num_parameters'), 'initial_model': True}) save_model_architecture_to_file(model, temp_dir) for f in Path(temp_dir).glob('*'): if f.is_file(): with model_artifact.new_file(f.name, mode='wb') as fa: fa.write(f.read_bytes()) self._wandb.run.log_artifact(model_artifact, aliases=['base_model']) badge_markdown = f'[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>]({self._wandb.run.url})' modelcard.AUTOGENERATED_TRAINER_COMMENT += f'\n{badge_markdown}' def on_train_begin(self, args, state, control, model=None, **kwargs): if self._wandb is None: return hp_search = state.is_hyper_param_search if hp_search: self._wandb.finish() self._initialized = False args.run_name = None if not self._initialized: self.setup(args, state, model, **kwargs) def on_train_end(self, args: TrainingArguments, state, control, model=None, processing_class=None, **kwargs): if self._wandb is None: return if self._log_model.is_enabled and self._initialized and state.is_world_process_zero: from ..trainer import Trainer args_for_fake = copy.deepcopy(args) args_for_fake.deepspeed = None args_for_fake.deepspeed_plugin = None fake_trainer = Trainer(args=args_for_fake, model=model, processing_class=processing_class, eval_dataset=['fake']) with tempfile.TemporaryDirectory() as temp_dir: fake_trainer.save_model(temp_dir) metadata = {k: v for k, v in dict(self._wandb.summary).items() if isinstance(v, numbers.Number) and (not k.startswith('_'))} if not args.load_best_model_at_end else {f'eval/{args.metric_for_best_model}': state.best_metric, 'train/total_floss': state.total_flos, 'model/num_parameters': self._wandb.config.get('model/num_parameters')} metadata['final_model'] = True logger.info('Logging model artifacts. ...') model_name = f'model-{self._wandb.run.id}' if args.run_name is None or args.run_name == args.output_dir else f'model-{self._wandb.run.name}' save_model_architecture_to_file(model, temp_dir) artifact = self._wandb.Artifact(name=model_name, type='model', metadata=metadata) for f in Path(temp_dir).glob('*'): if f.is_file(): with artifact.new_file(f.name, mode='wb') as fa: fa.write(f.read_bytes()) self._wandb.run.log_artifact(artifact, aliases=['final_model']) def on_log(self, args, state, control, model=None, logs=None, **kwargs): single_value_scalars = ['train_runtime', 'train_samples_per_second', 'train_steps_per_second', 'train_loss', 'total_flos'] if self._wandb is None: return if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: for k, v in logs.items(): if k in single_value_scalars: self._wandb.run.summary[k] = v non_scalar_logs = {k: v for k, v in logs.items() if k not in single_value_scalars} non_scalar_logs = rewrite_logs(non_scalar_logs) self._wandb.log({**non_scalar_logs, 'train/global_step': state.global_step}) def on_save(self, args, state, control, **kwargs): if self._log_model == WandbLogModel.CHECKPOINT and self._initialized and state.is_world_process_zero: checkpoint_metadata = {k: v for k, v in dict(self._wandb.summary).items() if isinstance(v, numbers.Number) and (not k.startswith('_'))} checkpoint_metadata['model/num_parameters'] = self._wandb.config.get('model/num_parameters') ckpt_dir = f'checkpoint-{state.global_step}' artifact_path = os.path.join(args.output_dir, ckpt_dir) logger.info(f'Logging checkpoint artifacts in {ckpt_dir}. ...') checkpoint_name = f'model-{self._wandb.run.id}' if args.run_name is None or args.run_name == args.output_dir else f'model-{self._wandb.run.name}' artifact = self._wandb.Artifact(name=checkpoint_name, type='model', metadata=checkpoint_metadata) artifact.add_dir(artifact_path) self._wandb.log_artifact(artifact, aliases=[f'epoch_{round(state.epoch, 2)}', f'checkpoint_global_step_{state.global_step}']) def on_predict(self, args, state, control, metrics, **kwargs): if self._wandb is None: return if not self._initialized: self.setup(args, state, **kwargs) if state.is_world_process_zero: metrics = rewrite_logs(metrics) self._wandb.log(metrics)
class WandbCallback(TrainerCallback): ''' A [`TrainerCallback`] that logs metrics, media, model checkpoints to [Weight and Biases](https://www.wandb.com/). ''' def __init__(self): pass def setup(self, args, state, model, **kwargs): ''' Setup the optional Weights & Biases (*wandb*) integration. One can subclass and override this method to customize the setup if needed. Find more information [here](https://docs.wandb.ai/guides/integrations/huggingface). You can also override the following environment variables: Environment: - **WANDB_LOG_MODEL** (`str`, *optional*, defaults to `"false"`): Whether to log model and checkpoints during training. Can be `"end"`, `"checkpoint"` or `"false"`. If set to `"end"`, the model will be uploaded at the end of training. If set to `"checkpoint"`, the checkpoint will be uploaded every `args.save_steps` . If set to `"false"`, the model will not be uploaded. Use along with [`~transformers.TrainingArguments.load_best_model_at_end`] to upload best model. <Deprecated version="5.0"> Setting `WANDB_LOG_MODEL` as `bool` will be deprecated in version 5 of 🤗 Transformers. </Deprecated> - **WANDB_WATCH** (`str`, *optional* defaults to `"false"`): Can be `"gradients"`, `"all"`, `"parameters"`, or `"false"`. Set to `"all"` to log gradients and parameters. - **WANDB_PROJECT** (`str`, *optional*, defaults to `"huggingface"`): Set this to a custom string to store results in a different project. - **WANDB_DISABLED** (`bool`, *optional*, defaults to `False`): Whether to disable wandb entirely. Set `WANDB_DISABLED=true` to disable. ''' pass def on_train_begin(self, args, state, control, model=None, **kwargs): pass def on_train_end(self, args: TrainingArguments, state, control, model=None, processing_class=None, **kwargs): pass def on_log(self, args, state, control, model=None, logs=None, **kwargs): pass def on_save(self, args, state, control, **kwargs): pass def on_predict(self, args, state, control, metrics, **kwargs): pass
8
2
33
3
26
4
7
0.18
1
8
2
0
7
3
7
22
244
28
183
43
172
33
121
39
110
19
1
6
46
418
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/integration_utils.py
transformers.integrations.integration_utils.WandbLogModel
from typing import TYPE_CHECKING, Any, Literal, Optional, Union import os from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available from enum import Enum class WandbLogModel(str, Enum): """Enum of possible log model values in W&B.""" CHECKPOINT = 'checkpoint' END = 'end' FALSE = 'false' @property def is_enabled(self) -> bool: """Check if the value corresponds to a state where the `WANDB_LOG_MODEL` setting is enabled.""" return self in (WandbLogModel.CHECKPOINT, WandbLogModel.END) @classmethod def _missing_(cls, value: Any) -> 'WandbLogModel': if not isinstance(value, str): raise TypeError(f'Expecting to have a string `WANDB_LOG_MODEL` setting, but got {type(value)}') if value.upper() in ENV_VARS_TRUE_VALUES: raise DeprecationWarning(f"Setting `WANDB_LOG_MODEL` as {os.getenv('WANDB_LOG_MODEL')} is deprecated and will be removed in version 5 of transformers. Use one of `'end'` or `'checkpoint'` instead.") logger.info(f"Setting `WANDB_LOG_MODEL` from {os.getenv('WANDB_LOG_MODEL')} to `end` instead") return WandbLogModel.END logger.warning(f'Received unrecognized `WANDB_LOG_MODEL` setting value={value}; so disabling `WANDB_LOG_MODEL`') return WandbLogModel.FALSE
class WandbLogModel(str, Enum): '''Enum of possible log model values in W&B.''' @property def is_enabled(self) -> bool: '''Check if the value corresponds to a state where the `WANDB_LOG_MODEL` setting is enabled.''' pass @classmethod def _missing_(cls, value: Any) -> 'WandbLogModel': pass
5
2
9
0
8
1
2
0.09
2
5
0
0
1
0
2
117
27
3
22
8
17
2
15
6
12
3
4
1
4
419
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/mistral.py
transformers.integrations.mistral.MistralConverter
from transformers.convert_slow_tokenizer import bytes_to_unicode from tokenizers import Regex, Tokenizer, decoders, pre_tokenizers, processors from tokenizers.models import BPE class MistralConverter: """ A general tiktoken converter. """ def __init__(self, vocab=None, pattern="(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", add_prefix_space=False, additional_special_tokens=None, *args, **kwargs): super().__init__(*args) self.vocab = vocab self.pattern = pattern self.add_prefix_space = add_prefix_space self.additional_special_tokens = additional_special_tokens def extract_vocab_merges_from_model(self, vocab: str): bpe_ranks = vocab byte_encoder = bytes_to_unicode() def token_bytes_to_string(b): return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')]) merges = [] vocab = {} for idx, (token, rank) in enumerate(bpe_ranks.items()): if token not in self.additional_special_tokens: vocab[token_bytes_to_string(token)] = idx if len(token) == 1: continue local = [] for index in range(1, len(token)): piece_l, piece_r = (token[:index], token[index:]) if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r in bpe_ranks): local.append((piece_l, piece_r, rank)) local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False) merges.extend(local) else: vocab[token] = idx merges = sorted(merges, key=lambda val: val[2], reverse=False) merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges] return (vocab, merges) def tokenizer(self): vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab) tokenizer = Tokenizer(BPE(vocab_scores, merges, fuse_unk=False)) if hasattr(tokenizer.model, 'ignore_merges'): tokenizer.model.ignore_merges = True return tokenizer def converted(self) -> Tokenizer: tokenizer = self.tokenizer() tokenizer.pre_tokenizer = pre_tokenizers.Sequence([pre_tokenizers.Split(Regex(self.pattern), behavior='isolated', invert=False), pre_tokenizers.ByteLevel(add_prefix_space=self.add_prefix_space, use_regex=False)]) tokenizer.decoder = decoders.ByteLevel() tokenizer.add_special_tokens(self.additional_special_tokens) tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer
class MistralConverter: ''' A general tiktoken converter. ''' def __init__(self, vocab=None, pattern="(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", add_prefix_space=False, additional_special_tokens=None, *args, **kwargs): pass def extract_vocab_merges_from_model(self, vocab: str): pass def token_bytes_to_string(b): pass def tokenizer(self): pass def converted(self) -> Tokenizer: pass
6
1
12
1
12
0
2
0.05
0
4
0
0
4
4
4
4
68
8
57
28
43
3
43
20
37
6
0
4
11
420
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/integrations/peft.py
transformers.integrations.peft.PeftAdapterMixin
import re from typing import Any, Optional, Union import warnings import importlib from packaging import version from ..utils import check_peft_version, find_adapter_config_file, is_accelerate_available, is_peft_available, is_torch_available, logging import inspect class PeftAdapterMixin: """ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT library: https://huggingface.co/docs/peft/index Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods that anyone can load, train and run with this mixin class: - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3 - AdaLora: https://huggingface.co/papers/2303.10512 Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable" into a torch module. For using these methods, please refer to the usage guide of PEFT library. With this mixin, if the correct PEFT version is installed, it is possible to: - Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model - Attach new adapters in the model and train them with Trainer or by your own. - Attach multiple adapters and iteratively activate / deactivate them - Activate / deactivate all adapters from the model. - Get the `state_dict` of the active adapter. """ _hf_peft_config_loaded = False def load_adapter(self, peft_model_id: Optional[str]=None, adapter_name: Optional[str]=None, revision: Optional[str]=None, token: Optional[str]=None, device_map: Optional[str]='auto', max_memory: Optional[str]=None, offload_folder: Optional[str]=None, offload_index: Optional[int]=None, peft_config: Optional[dict[str, Any]]=None, adapter_state_dict: Optional[dict[str, 'torch.Tensor']]=None, low_cpu_mem_usage: bool=False, is_trainable: bool=False, adapter_kwargs: Optional[dict[str, Any]]=None) -> None: """ Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft Requires peft as a backend to load the adapter weights. Args: peft_model_id (`str`, *optional*): The identifier of the model to look for on the Hub, or a local path to the saved adapter config file and adapter weights. adapter_name (`str`, *optional*): The adapter name to use. If not set, will use the default adapter. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> token (`str`, `optional`): Whether to use authentication token to load the remote folder. Useful to load private repositories that are on HuggingFace Hub. You might need to call `hf auth login` and paste your tokens to cache it. device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, `optional`): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, `optional`): `offload_index` argument to be passed to `accelerate.dispatch_model` method. peft_config (`dict[str, Any]`, *optional*): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods. This argument is used in case users directly pass PEFT state dicts adapter_state_dict (`dict[str, torch.Tensor]`, *optional*): The state dict of the adapter to load. This argument is used in case users directly pass PEFT state dicts low_cpu_mem_usage (`bool`, *optional*, defaults to `False`): Reduce memory usage while loading the PEFT adapter. This should also speed up the loading process. Requires PEFT version 0.13.0 or higher. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. adapter_kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and `find_adapter_config_file` method. """ check_peft_version(min_version=MIN_PEFT_VERSION) peft_load_kwargs = {} key_mapping = adapter_kwargs.pop('key_mapping', None) if adapter_kwargs is not None else None if key_mapping is None and any((allowed_name in self.__class__.__name__.lower() for allowed_name in VLMS)): key_mapping = self._checkpoint_conversion_mapping if low_cpu_mem_usage: min_version_lcmu = '0.13.0' if version.parse(importlib.metadata.version('peft')) >= version.parse(min_version_lcmu): peft_load_kwargs['low_cpu_mem_usage'] = low_cpu_mem_usage else: raise ValueError(f'The version of PEFT you are using does not support `low_cpu_mem_usage` yet, please install PEFT >= {min_version_lcmu}.') adapter_name = adapter_name if adapter_name is not None else 'default' if adapter_kwargs is None: adapter_kwargs = {} from peft import PeftConfig, inject_adapter_in_model, load_peft_weights from peft.utils import set_peft_model_state_dict if self._hf_peft_config_loaded and adapter_name in self.peft_config: raise ValueError(f'Adapter with name {adapter_name} already exists. Please use a different name.') if peft_model_id is None and (adapter_state_dict is None and peft_config is None): raise ValueError('You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter.') if 'device' not in adapter_kwargs: device = self.device if not hasattr(self, 'hf_device_map') else list(self.hf_device_map.values())[0] else: device = adapter_kwargs.pop('device') if isinstance(device, torch.device): device = str(device) if revision is not None and 'revision' not in adapter_kwargs: adapter_kwargs['revision'] = revision elif revision is not None and 'revision' in adapter_kwargs and (revision != adapter_kwargs['revision']): logger.error('You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. The one in `adapter_kwargs` will be used.') if 'token' in adapter_kwargs: token = adapter_kwargs.pop('token') if peft_config is None: adapter_config_file = find_adapter_config_file(peft_model_id, token=token, **adapter_kwargs) if adapter_config_file is None: raise ValueError(f'adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the adapter model.') peft_config = PeftConfig.from_pretrained(peft_model_id, token=token, **adapter_kwargs) peft_config.inference_mode = not is_trainable inject_adapter_in_model(peft_config, self, adapter_name, **peft_load_kwargs) if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True if peft_model_id is not None: adapter_state_dict = load_peft_weights(peft_model_id, token=token, device=device, **adapter_kwargs) processed_adapter_state_dict = {} prefix = 'base_model.model.' for key, value in adapter_state_dict.items(): if key.startswith(prefix): new_key = key[len(prefix):] else: new_key = key if key_mapping: for pattern, replacement in key_mapping.items(): new_key, n_replace = re.subn(pattern, replacement, new_key) if n_replace > 0: break processed_adapter_state_dict[new_key] = value incompatible_keys = set_peft_model_state_dict(self, processed_adapter_state_dict, adapter_name, **peft_load_kwargs) if incompatible_keys is not None: err_msg = '' origin_name = peft_model_id if peft_model_id is not None else 'state_dict' if hasattr(incompatible_keys, 'unexpected_keys') and len(incompatible_keys.unexpected_keys) > 0: err_msg = f"Loading adapter weights from {origin_name} led to unexpected keys not found in the model: {', '.join(incompatible_keys.unexpected_keys)}. " missing_keys = getattr(incompatible_keys, 'missing_keys', None) if missing_keys: lora_missing_keys = [k for k in missing_keys if 'lora_' in k and adapter_name in k] if lora_missing_keys: err_msg += f"Loading adapter weights from {origin_name} led to missing keys in the model: {', '.join(lora_missing_keys)}" if err_msg: logger.warning(err_msg) if peft_config.inference_mode: self.eval() if getattr(self, 'hf_device_map', None) is not None and len(set(self.hf_device_map.values()).intersection({'cpu', 'disk'})) > 0 and (len(self.peft_config) == 1): self._dispatch_accelerate_model(device_map=device_map, max_memory=max_memory, offload_folder=offload_folder, offload_index=offload_index) def add_adapter(self, adapter_config, adapter_name: Optional[str]=None) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the default adapter name). Args: adapter_config (`~peft.PeftConfig`): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) from peft import PeftConfig, inject_adapter_in_model adapter_name = adapter_name or 'default' if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f'Adapter with name {adapter_name} already exists. Please use a different name.') if not isinstance(adapter_config, PeftConfig): raise TypeError(f'adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.') adapter_config.base_model_name_or_path = self.__dict__.get('name_or_path', None) inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name) def set_adapter(self, adapter_name: Union[list[str], str]) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters. Args: adapter_name (`Union[list[str], str]`): The name of the adapter to set. Can be also a list of strings to set multiple adapters. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') elif isinstance(adapter_name, list): missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError(f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s). current loaded adapters are: {list(self.peft_config.keys())}") elif adapter_name not in self.peft_config: raise ValueError(f'Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}') from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper _adapters_has_been_set = False for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): if hasattr(module, 'set_adapter'): module.set_adapter(adapter_name) else: module.active_adapter = adapter_name _adapters_has_been_set = True if not _adapters_has_been_set: raise ValueError('Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters.') def disable_adapters(self) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Disable all adapters that are attached to the model. This leads to inferring with the base model only. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): if hasattr(module, 'enable_adapters'): module.enable_adapters(enabled=False) else: module.disable_adapters = True def enable_adapters(self) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Enable adapters that are attached to the model. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, 'enable_adapters'): module.enable_adapters(enabled=True) else: module.disable_adapters = False def active_adapters(self) -> list[str]: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters for inference) returns the list of all active adapters so that users can deal with them accordingly. For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return a single string. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError('PEFT is not available. Please install PEFT to use this function: `pip install peft`.') if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): active_adapters = module.active_adapter break if isinstance(active_adapters, str): active_adapters = [active_adapters] return active_adapters def active_adapter(self) -> str: warnings.warn('The `active_adapter` method is deprecated and will be removed in a future version.', FutureWarning) return self.active_adapters()[0] def get_adapter_state_dict(self, adapter_name: Optional[str]=None, state_dict: Optional[dict]=None) -> dict: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter. If no adapter_name is passed, the active adapter is used. Args: adapter_name (`str`, *optional*): The name of the adapter to get the state dict from. If no name is passed, the active adapter is used. state_dict (nested dictionary of `torch.Tensor`, *optional*) The state dictionary of the model. Will default to `self.state_dict()`, but can be used if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft import get_peft_model_state_dict if adapter_name is None: adapter_name = self.active_adapters()[0] adapter_state_dict = get_peft_model_state_dict(self, state_dict=state_dict, adapter_name=adapter_name) return adapter_state_dict def _dispatch_accelerate_model(self, device_map: str, max_memory: Optional[int]=None, offload_folder: Optional[str]=None, offload_index: Optional[int]=None) -> None: """ Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with accelerate (i.e. with `device_map=xxx`) Args: device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, *optional*): The offload_index argument to be passed to `accelerate.dispatch_model` method. """ dispatch_model_kwargs = {} if 'offload_index' in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs['offload_index'] = offload_index no_split_module_classes = self._no_split_modules if device_map != 'sequential': max_memory = get_balanced_memory(self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=device_map == 'balanced_low_0') if isinstance(device_map, str): device_map = infer_auto_device_map(self, max_memory=max_memory, no_split_module_classes=no_split_module_classes) dispatch_model(self, device_map=device_map, offload_dir=offload_folder, **dispatch_model_kwargs) def delete_adapter(self, adapter_names: Union[list[str], str]) -> None: """ Delete an adapter's LoRA layers from the underlying model. Args: adapter_names (`Union[list[str], str]`): The name(s) of the adapter(s) to delete. Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic" ) pipeline.delete_adapters("cinematic") ``` """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer if isinstance(adapter_names, str): adapter_names = [adapter_names] missing_adapters = [name for name in adapter_names if name not in self.peft_config] if missing_adapters: raise ValueError(f"The following adapter(s) are not present and cannot be deleted: {', '.join(missing_adapters)}") for adapter_name in adapter_names: for module in self.modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, 'delete_adapter'): module.delete_adapter(adapter_name) else: raise ValueError('The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1') if getattr(self, '_hf_peft_config_loaded', False) and hasattr(self, 'peft_config'): self.peft_config.pop(adapter_name, None) if len(self.peft_config) == 0: del self.peft_config self._hf_peft_config_loaded = False
class PeftAdapterMixin: ''' A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT library: https://huggingface.co/docs/peft/index Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods that anyone can load, train and run with this mixin class: - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3 - AdaLora: https://huggingface.co/papers/2303.10512 Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable" into a torch module. For using these methods, please refer to the usage guide of PEFT library. With this mixin, if the correct PEFT version is installed, it is possible to: - Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model - Attach new adapters in the model and train them with Trainer or by your own. - Attach multiple adapters and iteratively activate / deactivate them - Activate / deactivate all adapters from the model. - Get the `state_dict` of the active adapter. ''' def load_adapter(self, peft_model_id: Optional[str]=None, adapter_name: Optional[str]=None, revision: Optional[str]=None, token: Optional[str]=None, device_map: Optional[str]='auto', max_memory: Optional[str]=None, offload_folder: Optional[str]=None, offload_index: Optional[int]=None, peft_config: Optional[dict[str, Any]]=None, adapter_state_dict: Optional[dict[str, 'torch.Tensor']]=None, low_cpu_mem_usage: bool=False, is_trainable: bool=False, adapter_kwargs: Optional[dict[str, Any]]=None) -> None: ''' Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft Requires peft as a backend to load the adapter weights. Args: peft_model_id (`str`, *optional*): The identifier of the model to look for on the Hub, or a local path to the saved adapter config file and adapter weights. adapter_name (`str`, *optional*): The adapter name to use. If not set, will use the default adapter. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> token (`str`, `optional`): Whether to use authentication token to load the remote folder. Useful to load private repositories that are on HuggingFace Hub. You might need to call `hf auth login` and paste your tokens to cache it. device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, `optional`): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, `optional`): `offload_index` argument to be passed to `accelerate.dispatch_model` method. peft_config (`dict[str, Any]`, *optional*): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods. This argument is used in case users directly pass PEFT state dicts adapter_state_dict (`dict[str, torch.Tensor]`, *optional*): The state dict of the adapter to load. This argument is used in case users directly pass PEFT state dicts low_cpu_mem_usage (`bool`, *optional*, defaults to `False`): Reduce memory usage while loading the PEFT adapter. This should also speed up the loading process. Requires PEFT version 0.13.0 or higher. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. adapter_kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and `find_adapter_config_file` method. ''' pass def add_adapter(self, adapter_config, adapter_name: Optional[str]=None) -> None: ''' If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the default adapter name). Args: adapter_config (`~peft.PeftConfig`): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. ''' pass def set_adapter(self, adapter_name: Union[list[str], str]) -> None: ''' If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters. Args: adapter_name (`Union[list[str], str]`): The name of the adapter to set. Can be also a list of strings to set multiple adapters. ''' pass def disable_adapters(self) -> None: ''' If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Disable all adapters that are attached to the model. This leads to inferring with the base model only. ''' pass def enable_adapters(self) -> None: ''' If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Enable adapters that are attached to the model. ''' pass def active_adapters(self) -> list[str]: ''' If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters for inference) returns the list of all active adapters so that users can deal with them accordingly. For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return a single string. ''' pass def active_adapters(self) -> list[str]: pass def get_adapter_state_dict(self, adapter_name: Optional[str]=None, state_dict: Optional[dict]=None) -> dict: ''' If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter. If no adapter_name is passed, the active adapter is used. Args: adapter_name (`str`, *optional*): The name of the adapter to get the state dict from. If no name is passed, the active adapter is used. state_dict (nested dictionary of `torch.Tensor`, *optional*) The state dictionary of the model. Will default to `self.state_dict()`, but can be used if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism). ''' pass def _dispatch_accelerate_model(self, device_map: str, max_memory: Optional[int]=None, offload_folder: Optional[str]=None, offload_index: Optional[int]=None) -> None: ''' Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with accelerate (i.e. with `device_map=xxx`) Args: device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, *optional*): The offload_index argument to be passed to `accelerate.dispatch_model` method. ''' pass def delete_adapter(self, adapter_names: Union[list[str], str]) -> None: ''' Delete an adapter's LoRA layers from the underlying model. Args: adapter_names (`Union[list[str], str]`): The name(s) of the adapter(s) to delete. Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic" ) pipeline.delete_adapters("cinematic") ``` ''' pass
11
10
51
8
26
16
7
0.68
0
12
0
0
10
0
10
10
544
97
266
70
223
181
173
49
151
27
0
4
74
421
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py
transformers.kernels.falcon_mamba.selective_scan_with_ln_interface.MambaInnerFn
from einops import rearrange, repeat from torch.cuda.amp import custom_bwd, custom_fwd import torch import torch.nn.functional as F import selective_scan_cuda class MambaInnerFn(torch.autograd.Function): @staticmethod @custom_fwd def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, out_proj_weight, out_proj_bias, A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1, b_rms_weight=None, c_rms_weight=None, dt_rms_weight=None, b_c_dt_rms_eps=1e-06): """ xz: (batch, dim, seqlen) """ assert causal_conv1d_cuda is not None, 'causal_conv1d_cuda is not available. Please install causal-conv1d.' assert checkpoint_lvl in [0, 1] L = xz.shape[-1] delta_rank = delta_proj_weight.shape[1] d_state = A.shape[-1] * (1 if not A.is_complex() else 2) if torch.is_autocast_enabled(): x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) out_proj_bias = out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) if out_proj_bias is not None else None if xz.stride(-1) != 1: xz = xz.contiguous() conv1d_weight = rearrange(conv1d_weight, 'd 1 w -> d w') x, z = xz.chunk(2, dim=1) conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, None, None, None, True) x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), 'd (b l) -> b d l', l=L) ctx.is_variable_B = B is None ctx.is_variable_C = C is None ctx.B_proj_bias_is_None = B_proj_bias is None ctx.C_proj_bias_is_None = C_proj_bias is None if B is None: B = x_dbl[:, delta_rank:delta_rank + d_state] if B_proj_bias is not None: B = B + B_proj_bias.to(dtype=B.dtype) if not A.is_complex(): B = rearrange(B, '(b l) dstate -> b 1 dstate l', l=L).contiguous() else: B = rearrange(B, '(b l) (dstate two) -> b 1 dstate (l two)', l=L, two=2).contiguous() elif B.stride(-1) != 1: B = B.contiguous() if C is None: C = x_dbl[:, -d_state:] if C_proj_bias is not None: C = C + C_proj_bias.to(dtype=C.dtype) if not A.is_complex(): C = rearrange(C, '(b l) dstate -> b 1 dstate l', l=L).contiguous() else: C = rearrange(C, '(b l) (dstate two) -> b 1 dstate (l two)', l=L, two=2).contiguous() elif C.stride(-1) != 1: C = C.contiguous() if D is not None: D = D.contiguous() if b_rms_weight is not None: B = rearrange(B, 'b 1 dstate l -> (b l) dstate', l=L).contiguous() B = rms_norm_forward(B, b_rms_weight, bias=None, eps=b_c_dt_rms_eps) B = rearrange(B, '(b l) dstate -> b 1 dstate l', l=L).contiguous() if c_rms_weight is not None: C = rearrange(C, 'b 1 dstate l -> (b l) dstate', l=L).contiguous() C = rms_norm_forward(C, c_rms_weight, bias=None, eps=b_c_dt_rms_eps) C = rearrange(C, '(b l) dstate -> b 1 dstate l', l=L).contiguous() if dt_rms_weight is not None: delta = rearrange(delta, 'b d l -> (b l) d', l=L).contiguous() delta = rms_norm_forward(delta, dt_rms_weight, bias=None, eps=b_c_dt_rms_eps) delta = rearrange(delta, '(b l) d -> b d l', l=L).contiguous() out, scan_intermediates, out_z = selective_scan_cuda.fwd(conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus) ctx.delta_softplus = delta_softplus ctx.out_proj_bias_is_None = out_proj_bias is None ctx.checkpoint_lvl = checkpoint_lvl ctx.b_rms_weight = b_rms_weight ctx.c_rms_weight = c_rms_weight ctx.dt_rms_weight = dt_rms_weight ctx.b_c_dt_rms_eps = b_c_dt_rms_eps if checkpoint_lvl >= 1: conv1d_out, delta = (None, None) ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, b_rms_weight, c_rms_weight, dt_rms_weight, out) return F.linear(rearrange(out_z, 'b d l -> b l d'), out_proj_weight, out_proj_bias) @staticmethod @custom_bwd def backward(ctx, dout): assert causal_conv1d_cuda is not None, 'causal_conv1d_cuda is not available. Please install causal-conv1d.' xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, b_rms_weight, c_rms_weight, dt_rms_weight, out = ctx.saved_tensors L = xz.shape[-1] delta_rank = delta_proj_weight.shape[1] d_state = A.shape[-1] * (1 if not A.is_complex() else 2) x, z = xz.chunk(2, dim=1) if dout.stride(-1) != 1: dout = dout.contiguous() if ctx.checkpoint_lvl == 1: conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, None, None, None, True) delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), 'd (b l) -> b d l', l=L) if dt_rms_weight is not None: delta = rearrange(delta, 'b d l -> (b l) d', l=L).contiguous() delta = rms_norm_forward(delta, ctx.dt_rms_weight, None, ctx.b_c_dt_rms_eps) delta = rearrange(delta, '(b l) d -> b d l', l=L).contiguous() if b_rms_weight is not None: B = rearrange(B, 'b 1 dstate l -> (b l) dstate', l=L).contiguous() B = rms_norm_forward(B, ctx.b_rms_weight, None, ctx.b_c_dt_rms_eps) B = rearrange(B, '(b l) dstate -> b 1 dstate l', l=L).contiguous() if c_rms_weight is not None: C = rearrange(C, 'b 1 dstate l -> (b l) dstate', l=L).contiguous() C = rms_norm_forward(C, ctx.c_rms_weight, None, ctx.b_c_dt_rms_eps) C = rearrange(C, '(b l) dstate -> b 1 dstate l', l=L).contiguous() dxz = torch.empty_like(xz) dx, dz = dxz.chunk(2, dim=1) dout = rearrange(dout, 'b l e -> e (b l)') dout_y = rearrange(out_proj_weight.t() @ dout, 'd (b l) -> b d l', l=L) dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd(conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz, ctx.delta_softplus, True) dout_proj_weight = torch.einsum('eB,dB->ed', dout, rearrange(out_z, 'b d l -> d (b l)')) dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None dD = dD if D is not None else None dx_dbl = torch.empty_like(x_dbl) dB_proj_bias = None if ctx.is_variable_B: if not A.is_complex(): dB = rearrange(dB, 'b 1 dstate l -> (b l) dstate').contiguous() else: dB = rearrange(dB, 'b 1 dstate (l two) -> (b l) (dstate two)', two=2).contiguous() dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None dx_dbl[:, delta_rank:delta_rank + d_state] = dB dB = None dC_proj_bias = None if ctx.is_variable_C: if not A.is_complex(): dC = rearrange(dC, 'b 1 dstate l -> (b l) dstate').contiguous() else: dC = rearrange(dC, 'b 1 dstate (l two) -> (b l) (dstate two)', two=2).contiguous() dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None dx_dbl[:, -d_state:] = dC dC = None ddelta = rearrange(ddelta, 'b d l -> d (b l)') ddelta_proj_weight = torch.einsum('dB,Br->dr', ddelta, x_dbl[:, :delta_rank]) dx_dbl[:, :delta_rank] = torch.einsum('dB,dr->Br', ddelta, delta_proj_weight) dconv1d_out = rearrange(dconv1d_out, 'b d l -> d (b l)') dx_proj_weight = torch.einsum('Br,Bd->rd', dx_dbl, rearrange(conv1d_out, 'b d l -> (b l) d')) dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) dconv1d_out = rearrange(dconv1d_out, 'd (b l) -> b d l', b=x.shape[0], l=x.shape[-1]) dx, dconv1d_weight, dconv1d_bias, *_ = causal_conv1d_cuda.causal_conv1d_bwd(x, conv1d_weight, conv1d_bias, dconv1d_out, None, None, None, dx, False, True) dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None dconv1d_weight = rearrange(dconv1d_weight, 'd w -> d 1 w') return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, dout_proj_weight, dout_proj_bias, dA, dB, dC, dD, ddelta_bias if delta_bias is not None else None, dB_proj_bias, dC_proj_bias, None, None, None, None, None, None)
class MambaInnerFn(torch.autograd.Function): @staticmethod @custom_fwd def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, out_proj_weight, out_proj_bias, A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1, b_rms_weight=None, c_rms_weight=None, dt_rms_weight=None, b_c_dt_rms_eps=1e-06): ''' xz: (batch, dim, seqlen) ''' pass @staticmethod @custom_bwd def backward(ctx, dout): pass
7
1
128
2
119
13
18
0.11
1
0
0
0
0
0
2
32
262
4
242
70
213
26
128
28
125
19
5
2
36
422
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py
transformers.kernels.falcon_mamba.selective_scan_with_ln_interface.SelectiveScanFn
import torch.nn.functional as F import torch from einops import rearrange, repeat import selective_scan_cuda class SelectiveScanFn(torch.autograd.Function): @staticmethod def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, return_last_state=False): if u.stride(-1) != 1: u = u.contiguous() if delta.stride(-1) != 1: delta = delta.contiguous() if D is not None: D = D.contiguous() if B.stride(-1) != 1: B = B.contiguous() if C.stride(-1) != 1: C = C.contiguous() if z is not None and z.stride(-1) != 1: z = z.contiguous() if B.dim() == 3: B = rearrange(B, 'b dstate l -> b 1 dstate l') ctx.squeeze_B = True if C.dim() == 3: C = rearrange(C, 'b dstate l -> b 1 dstate l') ctx.squeeze_C = True out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus) ctx.delta_softplus = delta_softplus ctx.has_z = z is not None last_state = x[:, :, -1, 1::2] if not ctx.has_z: ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x) return out if not return_last_state else (out, last_state) else: ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out) out_z = rest[0] return out_z if not return_last_state else (out_z, last_state) @staticmethod def backward(ctx, dout, *args): if not ctx.has_z: u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors z = None out = None else: u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors if dout.stride(-1) != 1: dout = dout.contiguous() du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd(u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus, False) dz = rest[0] if ctx.has_z else None dB = dB.squeeze(1) if getattr(ctx, 'squeeze_B', False) else dB dC = dC.squeeze(1) if getattr(ctx, 'squeeze_C', False) else dC return (du, ddelta, dA, dB, dC, dD if D is not None else None, dz, ddelta_bias if delta_bias is not None else None, None, None)
class SelectiveScanFn(torch.autograd.Function): @staticmethod def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, return_last_state=False): pass @staticmethod def backward(ctx, dout, *args): pass
5
0
38
0
36
3
10
0.07
1
0
0
0
0
0
2
32
79
1
75
15
68
5
43
11
40
12
5
1
20
423
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/loss/loss_deformable_detr.py
transformers.loss.loss_deformable_detr.DeformableDetrHungarianMatcher
import torch.nn as nn from ..image_transforms import center_to_corners_format import torch from .loss_for_object_detection import HungarianMatcher, ImageLoss, _set_aux_loss, generalized_box_iou, sigmoid_focal_loss class DeformableDetrHungarianMatcher(HungarianMatcher): @torch.no_grad() def forward(self, outputs, targets): """ Differences: - out_prob = outputs["logits"].flatten(0, 1).sigmoid() instead of softmax - class_cost uses alpha and gamma """ batch_size, num_queries = outputs['logits'].shape[:2] out_prob = outputs['logits'].flatten(0, 1).sigmoid() out_bbox = outputs['pred_boxes'].flatten(0, 1) target_ids = torch.cat([v['class_labels'] for v in targets]) target_bbox = torch.cat([v['boxes'] for v in targets]) alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * out_prob ** gamma * -(1 - out_prob + 1e-08).log() pos_cost_class = alpha * (1 - out_prob) ** gamma * -(out_prob + 1e-08).log() class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v['boxes']) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class DeformableDetrHungarianMatcher(HungarianMatcher): @torch.no_grad() def forward(self, outputs, targets): ''' Differences: - out_prob = outputs["logits"].flatten(0, 1).sigmoid() instead of softmax - class_cost uses alpha and gamma ''' pass
3
1
36
7
18
13
1
0.65
1
1
0
0
1
0
1
13
38
7
20
18
17
13
19
17
17
1
2
0
1
424
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/loss/loss_deformable_detr.py
transformers.loss.loss_deformable_detr.DeformableDetrImageLoss
import torch.nn as nn import torch from .loss_for_object_detection import HungarianMatcher, ImageLoss, _set_aux_loss, generalized_box_iou, sigmoid_focal_loss class DeformableDetrImageLoss(ImageLoss): def __init__(self, matcher, num_classes, focal_alpha, losses): nn.Module.__init__(self) self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if 'logits' not in outputs: raise KeyError('No logits were found in the outputs') source_logits = outputs['logits'] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t['class_labels'][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] losses = {'loss_ce': loss_ce} return losses
class DeformableDetrImageLoss(ImageLoss): def __init__(self, matcher, num_classes, focal_alpha, losses): pass def loss_labels(self, outputs, targets, indices, num_boxes): ''' Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] ''' pass
3
1
19
2
15
2
2
0.16
1
2
0
0
2
4
2
21
41
5
31
14
28
5
21
14
18
2
2
1
3
425
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/loss/loss_for_object_detection.py
transformers.loss.loss_for_object_detection.HungarianMatcher
import torch.nn as nn import torch from ..utils import is_accelerate_available, is_scipy_available, is_vision_available, requires_backends class HungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1): super().__init__() requires_backends(self, ['scipy']) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0): raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`list[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `list[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs['logits'].shape[:2] out_prob = outputs['logits'].flatten(0, 1).softmax(-1) out_bbox = outputs['pred_boxes'].flatten(0, 1) target_ids = torch.cat([v['class_labels'] for v in targets]) target_bbox = torch.cat([v['boxes'] for v in targets]) class_cost = -out_prob[:, target_ids] bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v['boxes']) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class HungarianMatcher(nn.Module): ''' This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. ''' def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1): pass @torch.no_grad() def forward(self, outputs, targets): ''' Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`list[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `list[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) ''' pass
4
2
29
5
11
14
2
1.71
1
4
0
1
2
3
2
12
76
13
24
18
20
41
23
17
20
2
1
1
3
426
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/loss/loss_for_object_detection.py
transformers.loss.loss_for_object_detection.ImageLoss
import torch.nn as nn import torch from ..utils import is_accelerate_available, is_scipy_available, is_vision_available, requires_backends class ImageLoss(nn.Module): """ This class computes the losses for DetrForObjectDetection/DetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). A note on the `num_classes` argument (copied from original repo in detr.py): "the naming of the `num_classes` parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2 (`max_obj_id` + 1). For more details on this, check the following discussion https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223" Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. eos_coef (`float`): Relative classification weight applied to the no-object category. losses (`list[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__(self, matcher, num_classes, eos_coef, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.eos_coef = eos_coef self.losses = losses empty_weight = torch.ones(self.num_classes + 1) empty_weight[-1] = self.eos_coef self.register_buffer('empty_weight', empty_weight) def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if 'logits' not in outputs: raise KeyError('No logits were found in the outputs') source_logits = outputs['logits'] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t['class_labels'][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device) target_classes[idx] = target_classes_o loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight) losses = {'loss_ce': loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs['logits'] device = logits.device target_lengths = torch.as_tensor([len(v['class_labels']) for v in targets], device=device) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if 'pred_boxes' not in outputs: raise KeyError('No predicted boxes found in outputs') idx = self._get_source_permutation_idx(indices) source_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if 'pred_masks' not in outputs: raise KeyError('No predicted masks found in outputs') source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs['pred_masks'] source_masks = source_masks[source_idx] masks = [t['masks'] for t in targets] target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] source_masks = nn.functional.interpolate(source_masks[:, None], size=target_masks.shape[-2:], mode='bilinear', align_corners=False) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = {'loss_mask': sigmoid_focal_loss(source_masks, target_masks, num_boxes), 'loss_dice': dice_loss(source_masks, target_masks, num_boxes)} return losses def _get_source_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for source, _ in indices]) return (batch_idx, source_idx) def _get_target_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for _, target in indices]) return (batch_idx, target_idx) def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = {'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks} if loss not in loss_map: raise ValueError(f'Loss {loss} not supported') return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`list[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'auxiliary_outputs'} indices = self.matcher(outputs_without_aux, targets) num_boxes = sum((len(t['class_labels']) for t in targets)) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) if 'auxiliary_outputs' in outputs: for i, auxiliary_outputs in enumerate(outputs['auxiliary_outputs']): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == 'masks': continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f'_{i}': v for k, v in l_dict.items()} losses.update(l_dict) return losses
null
11
6
18
2
12
4
2
0.52
1
7
0
1
9
4
9
19
197
30
110
53
99
57
95
52
85
8
1
4
20
427
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/loss/loss_for_object_detection.py
transformers.loss.loss_for_object_detection.NestedTensor
from torch import Tensor from typing import Optional class NestedTensor: def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return (self.tensors, self.mask) def __repr__(self): return str(self.tensors)
class NestedTensor: def __init__(self, tensors, mask: Optional[Tensor]): pass def to(self, device): pass def decompose(self): pass def __repr__(self): pass
5
0
4
0
4
0
1
0
0
2
0
0
4
2
4
4
19
3
16
10
11
0
15
10
10
2
0
1
5
428
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/loss/loss_rt_detr.py
transformers.loss.loss_rt_detr.RTDetrHungarianMatcher
import torch.nn as nn import torch from ..utils import is_scipy_available, is_vision_available, requires_backends import torch.nn.functional as F from .loss_for_object_detection import box_iou, dice_loss, generalized_box_iou, nested_tensor_from_tensor_list, sigmoid_focal_loss class RTDetrHungarianMatcher(nn.Module): """This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: config: RTDetrConfig """ def __init__(self, config): super().__init__() requires_backends(self, ['scipy']) self.class_cost = config.matcher_class_cost self.bbox_cost = config.matcher_bbox_cost self.giou_cost = config.matcher_giou_cost self.use_focal_loss = config.use_focal_loss self.alpha = config.matcher_alpha self.gamma = config.matcher_gamma if self.class_cost == self.bbox_cost == self.giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """Performs the matching Params: outputs: This is a dict that contains at least these entries: "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates Returns: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs['logits'].shape[:2] out_bbox = outputs['pred_boxes'].flatten(0, 1) target_ids = torch.cat([v['class_labels'] for v in targets]) target_bbox = torch.cat([v['boxes'] for v in targets]) if self.use_focal_loss: out_prob = F.sigmoid(outputs['logits'].flatten(0, 1)) out_prob = out_prob[:, target_ids] neg_cost_class = (1 - self.alpha) * out_prob ** self.gamma * -(1 - out_prob + 1e-08).log() pos_cost_class = self.alpha * (1 - out_prob) ** self.gamma * -(out_prob + 1e-08).log() class_cost = pos_cost_class - neg_cost_class else: out_prob = outputs['logits'].flatten(0, 1).softmax(-1) class_cost = -out_prob[:, target_ids] bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v['boxes']) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class RTDetrHungarianMatcher(nn.Module): '''This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: config: RTDetrConfig ''' def __init__(self, config): pass @torch.no_grad() def forward(self, outputs, targets): '''Performs the matching Params: outputs: This is a dict that contains at least these entries: "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates Returns: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) ''' pass
4
2
33
5
16
13
2
0.97
1
3
0
0
2
6
2
12
79
14
34
23
30
33
32
22
29
2
1
1
4
429
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/loss/loss_rt_detr.py
transformers.loss.loss_rt_detr.RTDetrLoss
import torch import torch.nn as nn import torch.nn.functional as F from .loss_for_object_detection import box_iou, dice_loss, generalized_box_iou, nested_tensor_from_tensor_list, sigmoid_focal_loss class RTDetrLoss(nn.Module): """ This class computes the losses for RTDetr. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. weight_dict (`Dict`): Dictionary relating each loss with its weights. These losses are configured in RTDetrConf as `weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou` losses (`list[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. alpha (`float`): Parameter alpha used to compute the focal loss. gamma (`float`): Parameter gamma used to compute the focal loss. eos_coef (`float`): Relative classification weight applied to the no-object category. num_classes (`int`): Number of object categories, omitting the special no-object category. """ def __init__(self, config): super().__init__() self.matcher = RTDetrHungarianMatcher(config) self.num_classes = config.num_labels self.weight_dict = {'loss_vfl': config.weight_loss_vfl, 'loss_bbox': config.weight_loss_bbox, 'loss_giou': config.weight_loss_giou} self.losses = ['vfl', 'boxes'] self.eos_coef = config.eos_coefficient empty_weight = torch.ones(config.num_labels + 1) empty_weight[-1] = self.eos_coef self.register_buffer('empty_weight', empty_weight) self.alpha = config.focal_loss_alpha self.gamma = config.focal_loss_gamma def loss_labels_vfl(self, outputs, targets, indices, num_boxes, log=True): if 'pred_boxes' not in outputs: raise KeyError('No predicted boxes found in outputs') if 'logits' not in outputs: raise KeyError('No predicted logits found in outputs') idx = self._get_source_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([_target['boxes'][i] for _target, (_, i) in zip(targets, indices)], dim=0) ious, _ = box_iou(center_to_corners_format(src_boxes.detach()), center_to_corners_format(target_boxes)) ious = torch.diag(ious) src_logits = outputs['logits'] target_classes_original = torch.cat([_target['class_labels'][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] target_score_original = torch.zeros_like(target_classes, dtype=src_logits.dtype) target_score_original[idx] = ious.to(target_score_original.dtype) target_score = target_score_original.unsqueeze(-1) * target pred_score = F.sigmoid(src_logits.detach()) weight = self.alpha * pred_score.pow(self.gamma) * (1 - target) + target_score loss = F.binary_cross_entropy_with_logits(src_logits, target_score, weight=weight, reduction='none') loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {'loss_vfl': loss} def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if 'logits' not in outputs: raise KeyError('No logits were found in the outputs') src_logits = outputs['logits'] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target['class_labels'][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_original loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.class_weight) losses = {'loss_ce': loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs['logits'] device = logits.device target_lengths = torch.as_tensor([len(v['class_labels']) for v in targets], device=device) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if 'pred_boxes' not in outputs: raise KeyError('No predicted boxes found in outputs') idx = self._get_source_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) losses = {} loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(generalized_box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if 'pred_masks' not in outputs: raise KeyError('No predicted masks found in outputs') source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs['pred_masks'] source_masks = source_masks[source_idx] masks = [t['masks'] for t in targets] target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] source_masks = nn.functional.interpolate(source_masks[:, None], size=target_masks.shape[-2:], mode='bilinear', align_corners=False) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = {'loss_mask': sigmoid_focal_loss(source_masks, target_masks, num_boxes), 'loss_dice': dice_loss(source_masks, target_masks, num_boxes)} return losses def loss_labels_bce(self, outputs, targets, indices, num_boxes, log=True): src_logits = outputs['logits'] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target['class_labels'][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = F.binary_cross_entropy_with_logits(src_logits, target * 1.0, reduction='none') loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {'loss_bce': loss} def _get_source_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for source, _ in indices]) return (batch_idx, source_idx) def _get_target_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for _, target in indices]) return (batch_idx, target_idx) def loss_labels_focal(self, outputs, targets, indices, num_boxes, log=True): if 'logits' not in outputs: raise KeyError('No logits found in outputs') src_logits = outputs['logits'] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target['class_labels'][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = sigmoid_focal_loss(src_logits, target, self.alpha, self.gamma) loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {'loss_focal': loss} def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = {'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, 'bce': self.loss_labels_bce, 'focal': self.loss_labels_focal, 'vfl': self.loss_labels_vfl} if loss not in loss_map: raise ValueError(f'Loss {loss} not supported') return loss_map[loss](outputs, targets, indices, num_boxes) @staticmethod def get_cdn_matched_indices(dn_meta, targets): dn_positive_idx, dn_num_group = (dn_meta['dn_positive_idx'], dn_meta['dn_num_group']) num_gts = [len(t['class_labels']) for t in targets] device = targets[0]['class_labels'].device dn_match_indices = [] for i, num_gt in enumerate(num_gts): if num_gt > 0: gt_idx = torch.arange(num_gt, dtype=torch.int64, device=device) gt_idx = gt_idx.tile(dn_num_group) assert len(dn_positive_idx[i]) == len(gt_idx) dn_match_indices.append((dn_positive_idx[i], gt_idx)) else: dn_match_indices.append((torch.zeros(0, dtype=torch.int64, device=device), torch.zeros(0, dtype=torch.int64, device=device))) return dn_match_indices def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`list[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if 'auxiliary_outputs' not in k} indices = self.matcher(outputs_without_aux, targets) num_boxes = sum((len(t['class_labels']) for t in targets)) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) num_boxes = torch.clamp(num_boxes, min=1).item() losses = {} for loss in self.losses: l_dict = self.get_loss(loss, outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} losses.update(l_dict) if 'auxiliary_outputs' in outputs: for i, auxiliary_outputs in enumerate(outputs['auxiliary_outputs']): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == 'masks': continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f'_aux_{i}': v for k, v in l_dict.items()} losses.update(l_dict) if 'dn_auxiliary_outputs' in outputs: if 'denoising_meta_values' not in outputs: raise ValueError("The output must have the 'denoising_meta_values` key. Please, ensure that 'outputs' includes a 'denoising_meta_values' entry.") indices = self.get_cdn_matched_indices(outputs['denoising_meta_values'], targets) num_boxes = num_boxes * outputs['denoising_meta_values']['dn_num_group'] for i, auxiliary_outputs in enumerate(outputs['dn_auxiliary_outputs']): for loss in self.losses: if loss == 'masks': continue kwargs = {} l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f'_dn_{i}': v for k, v in l_dict.items()} losses.update(l_dict) return losses
class RTDetrLoss(nn.Module): ''' This class computes the losses for RTDetr. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. weight_dict (`Dict`): Dictionary relating each loss with its weights. These losses are configured in RTDetrConf as `weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou` losses (`list[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. alpha (`float`): Parameter alpha used to compute the focal loss. gamma (`float`): Parameter gamma used to compute the focal loss. eos_coef (`float`): Relative classification weight applied to the no-object category. num_classes (`int`): Number of object categories, omitting the special no-object category. ''' def __init__(self, config): pass def loss_labels_vfl(self, outputs, targets, indices, num_boxes, log=True): pass def loss_labels_vfl(self, outputs, targets, indices, num_boxes, log=True): '''Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] ''' pass @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): ''' Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. ''' pass def loss_boxes(self, outputs, targets, indices, num_boxes): ''' Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. ''' pass def loss_masks(self, outputs, targets, indices, num_boxes): ''' Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. ''' pass def loss_labels_bce(self, outputs, targets, indices, num_boxes, log=True): pass def _get_source_permutation_idx(self, indices): pass def _get_target_permutation_idx(self, indices): pass def loss_labels_focal(self, outputs, targets, indices, num_boxes, log=True): pass def get_loss(self, loss, outputs, targets, indices, num_boxes): pass @staticmethod def get_cdn_matched_indices(dn_meta, targets): pass def forward(self, outputs, targets): ''' This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`list[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. ''' pass
16
6
21
2
16
3
2
0.28
1
7
1
0
12
7
13
23
306
43
205
92
189
58
168
90
154
11
1
4
32
430
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modelcard.py
transformers.modelcard.ModelCard
import os import json import warnings from .utils import MODEL_CARD_NAME, cached_file, is_datasets_available, is_offline_mode, is_tokenizers_available, is_torch_available, logging import copy class ModelCard: """ Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards. Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://huggingface.co/papers/1810.03993 Note: A model card can be loaded and saved to disk. """ def __init__(self, **kwargs): warnings.warn('The class `ModelCard` is deprecated and will be removed in version 5 of Transformers', FutureWarning) self.model_details = kwargs.pop('model_details', {}) self.intended_use = kwargs.pop('intended_use', {}) self.factors = kwargs.pop('factors', {}) self.metrics = kwargs.pop('metrics', {}) self.evaluation_data = kwargs.pop('evaluation_data', {}) self.training_data = kwargs.pop('training_data', {}) self.quantitative_analyses = kwargs.pop('quantitative_analyses', {}) self.ethical_considerations = kwargs.pop('ethical_considerations', {}) self.caveats_and_recommendations = kwargs.pop('caveats_and_recommendations', {}) for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def save_pretrained(self, save_directory_or_file): """Save a model card object to the directory or file `save_directory_or_file`.""" if os.path.isdir(save_directory_or_file): output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME) else: output_model_card_file = save_directory_or_file self.to_json_file(output_model_card_file) logger.info(f'Model card saved in {output_model_card_file}') @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): """ Instantiate a [`ModelCard`] from a pre-trained model model card. Parameters: pretrained_model_name_or_path: either: - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co. - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`] method, e.g.: `./my_model_directory/`. - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`. cache_dir: (*optional*) string: Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache should not be used. kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading. - The values in kwargs of any keys which are model card attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the *return_unused_kwargs* keyword parameter. proxies: (*optional*) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (*optional*) bool: - If False, then this function returns just the final model card object. - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of kwargs which has not been used to update *ModelCard* and is otherwise ignored. Examples: ```python # Download model card from huggingface.co and cache. modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased") # Model card was saved using *save_pretrained('./test/saved_model/')* modelcard = ModelCard.from_pretrained("./test/saved_model/") modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False) ```""" cache_dir = kwargs.pop('cache_dir', None) proxies = kwargs.pop('proxies', None) return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) from_pipeline = kwargs.pop('_from_pipeline', None) user_agent = {'file_type': 'model_card'} if from_pipeline is not None: user_agent['using_pipeline'] = from_pipeline is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): resolved_model_card_file = pretrained_model_name_or_path is_local = True else: try: resolved_model_card_file = cached_file(pretrained_model_name_or_path, filename=MODEL_CARD_NAME, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent) if is_local: logger.info(f'loading model card file {resolved_model_card_file}') else: logger.info(f'loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}') modelcard = cls.from_json_file(resolved_model_card_file) except (OSError, json.JSONDecodeError): modelcard = cls() to_remove = [] for key, value in kwargs.items(): if hasattr(modelcard, key): setattr(modelcard, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f'Model card: {modelcard}') if return_unused_kwargs: return (modelcard, kwargs) else: return modelcard @classmethod def from_dict(cls, json_object): """Constructs a `ModelCard` from a Python dictionary of parameters.""" return cls(**json_object) @classmethod def from_json_file(cls, json_file): """Constructs a `ModelCard` from a json file of parameters.""" with open(json_file, encoding='utf-8') as reader: text = reader.read() dict_obj = json.loads(text) return cls(**dict_obj) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n' def to_json_file(self, json_file_path): """Save this instance to a json file.""" with open(json_file_path, 'w', encoding='utf-8') as writer: writer.write(self.to_json_string())
class ModelCard: ''' Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards. Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://huggingface.co/papers/1810.03993 Note: A model card can be loaded and saved to disk. ''' def __init__(self, **kwargs): pass def save_pretrained(self, save_directory_or_file): '''Save a model card object to the directory or file `save_directory_or_file`.''' pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): ''' Instantiate a [`ModelCard`] from a pre-trained model model card. Parameters: pretrained_model_name_or_path: either: - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co. - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`] method, e.g.: `./my_model_directory/`. - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`. cache_dir: (*optional*) string: Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache should not be used. kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading. - The values in kwargs of any keys which are model card attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the *return_unused_kwargs* keyword parameter. proxies: (*optional*) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (*optional*) bool: - If False, then this function returns just the final model card object. - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of kwargs which has not been used to update *ModelCard* and is otherwise ignored. Examples: ```python # Download model card from huggingface.co and cache. modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased") # Model card was saved using *save_pretrained('./test/saved_model/')* modelcard = ModelCard.from_pretrained("./test/saved_model/") modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False) ```''' pass @classmethod def from_dict(cls, json_object): '''Constructs a `ModelCard` from a Python dictionary of parameters.''' pass @classmethod def from_json_file(cls, json_file): '''Constructs a `ModelCard` from a json file of parameters.''' pass def __eq__(self, other): pass def __repr__(self): pass def to_dict(self): '''Serializes this instance to a Python dictionary.''' pass def to_json_string(self): '''Serializes this instance to a JSON string.''' pass def to_json_file(self, json_file_path): '''Save this instance to a json file.''' pass
14
8
15
2
9
5
2
0.6
0
4
0
0
7
9
10
10
171
29
89
41
75
53
74
35
63
9
0
3
21
431
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modelcard.py
transformers.modelcard.TrainingSummary
from .utils import MODEL_CARD_NAME, cached_file, is_datasets_available, is_offline_mode, is_tokenizers_available, is_torch_available, logging from huggingface_hub import model_info from huggingface_hub.utils import HFValidationError from . import __version__ from dataclasses import dataclass import yaml import os from typing import Any, Optional, Union from huggingface_hub.errors import OfflineModeIsEnabled from pathlib import Path import requests @dataclass class TrainingSummary: model_name: str language: Optional[Union[str, list[str]]] = None license: Optional[str] = None tags: Optional[Union[str, list[str]]] = None finetuned_from: Optional[str] = None tasks: Optional[Union[str, list[str]]] = None dataset: Optional[Union[str, list[str]]] = None dataset_tags: Optional[Union[str, list[str]]] = None dataset_args: Optional[Union[str, list[str]]] = None dataset_metadata: Optional[dict[str, Any]] = None eval_results: Optional[dict[str, float]] = None eval_lines: Optional[list[str]] = None hyperparameters: Optional[dict[str, Any]] = None source: Optional[str] = 'trainer' def __post_init__(self): if self.license is None and (not is_offline_mode()) and (self.finetuned_from is not None) and (len(self.finetuned_from) > 0): try: info = model_info(self.finetuned_from) for tag in info.tags: if tag.startswith('license:'): self.license = tag[8:] except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, HFValidationError, OfflineModeIsEnabled): pass def create_model_index(self, metric_mapping): model_index = {'name': self.model_name} dataset_names = _listify(self.dataset) dataset_tags = _listify(self.dataset_tags) dataset_args = _listify(self.dataset_args) dataset_metadata = _listify(self.dataset_metadata) if len(dataset_args) < len(dataset_tags): dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) dataset_mapping = dict(zip(dataset_tags, dataset_names)) dataset_arg_mapping = dict(zip(dataset_tags, dataset_args)) dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata)) task_mapping = {task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING} model_index['results'] = [] if len(task_mapping) == 0 and len(dataset_mapping) == 0: return [model_index] if len(task_mapping) == 0: task_mapping = {None: None} if len(dataset_mapping) == 0: dataset_mapping = {None: None} all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] for task_tag, ds_tag in all_possibilities: result = {} if task_tag is not None: result['task'] = {'name': task_mapping[task_tag], 'type': task_tag} if ds_tag is not None: metadata = dataset_metadata_mapping.get(ds_tag, {}) result['dataset'] = {'name': dataset_mapping[ds_tag], 'type': ds_tag, **metadata} if dataset_arg_mapping[ds_tag] is not None: result['dataset']['args'] = dataset_arg_mapping[ds_tag] if len(metric_mapping) > 0: result['metrics'] = [] for metric_tag, metric_name in metric_mapping.items(): result['metrics'].append({'name': metric_name, 'type': metric_tag, 'value': self.eval_results[metric_name]}) if 'task' in result and 'dataset' in result and ('metrics' in result): model_index['results'].append(result) else: logger.info(f'Dropping the following result as it does not have all the necessary fields:\n{result}') return [model_index] def create_metadata(self): metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) metadata = {} metadata = _insert_value(metadata, 'library_name', 'transformers') metadata = _insert_values_as_list(metadata, 'language', self.language) metadata = _insert_value(metadata, 'license', self.license) if self.finetuned_from is not None and isinstance(self.finetuned_from, str) and (len(self.finetuned_from) > 0): metadata = _insert_value(metadata, 'base_model', self.finetuned_from) metadata = _insert_values_as_list(metadata, 'tags', self.tags) metadata = _insert_values_as_list(metadata, 'datasets', self.dataset_tags) metadata = _insert_values_as_list(metadata, 'metrics', list(metric_mapping.keys())) metadata['model-index'] = self.create_model_index(metric_mapping) return metadata def to_model_card(self): model_card = '' metadata = yaml.dump(self.create_metadata(), sort_keys=False) if len(metadata) > 0: model_card = f'---\n{metadata}---\n' if self.source == 'trainer': model_card += AUTOGENERATED_TRAINER_COMMENT model_card += f'\n# {self.model_name}\n\n' if self.finetuned_from is None: model_card += 'This model was trained from scratch on ' else: model_card += f'This model is a fine-tuned version of [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on ' if self.dataset is None or (isinstance(self.dataset, list) and len(self.dataset) == 0): model_card += 'an unknown dataset.' elif isinstance(self.dataset, str): model_card += f'the {self.dataset} dataset.' elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1: model_card += f'the {self.dataset[0]} dataset.' else: model_card += ', '.join([f'the {ds}' for ds in self.dataset[:-1]]) + f' and the {self.dataset[-1]} datasets.' if self.eval_results is not None: model_card += '\nIt achieves the following results on the evaluation set:\n' model_card += '\n'.join([f'- {name}: {_maybe_round(value)}' for name, value in self.eval_results.items()]) model_card += '\n' model_card += '\n## Model description\n\nMore information needed\n' model_card += '\n## Intended uses & limitations\n\nMore information needed\n' model_card += '\n## Training and evaluation data\n\nMore information needed\n' model_card += '\n## Training procedure\n' model_card += '\n### Training hyperparameters\n' if self.hyperparameters is not None: model_card += '\nThe following hyperparameters were used during training:\n' model_card += '\n'.join([f'- {name}: {value}' for name, value in self.hyperparameters.items()]) model_card += '\n' else: model_card += '\nMore information needed\n' if self.eval_lines is not None: model_card += '\n### Training results\n\n' model_card += make_markdown_table(self.eval_lines) model_card += '\n' model_card += '\n### Framework versions\n\n' model_card += f'- Transformers {__version__}\n' if self.source == 'trainer' and is_torch_available(): import torch model_card += f'- Pytorch {torch.__version__}\n' if is_datasets_available(): import datasets model_card += f'- Datasets {datasets.__version__}\n' if is_tokenizers_available(): import tokenizers model_card += f'- Tokenizers {tokenizers.__version__}\n' return model_card @classmethod def from_trainer(cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None): one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None): default_tag = one_dataset.builder_name if default_tag not in ['csv', 'json', 'pandas', 'parquet', 'text']: if dataset_metadata is None: dataset_metadata = [{'config': one_dataset.config_name, 'split': str(one_dataset.split)}] if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [one_dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags if finetuned_from is None and hasattr(trainer.model.config, '_name_or_path') and (not os.path.isdir(trainer.model.config._name_or_path)): finetuned_from = trainer.model.config._name_or_path if tasks is None: model_class_name = trainer.model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task if model_name is None: model_name = Path(trainer.args.output_dir).name if len(model_name) == 0: model_name = finetuned_from if tags is None: tags = ['generated_from_trainer'] elif isinstance(tags, str) and tags != 'generated_from_trainer': tags = [tags, 'generated_from_trainer'] elif 'generated_from_trainer' not in tags: tags.append('generated_from_trainer') _, eval_lines, eval_results = parse_log_history(trainer.state.log_history) hyperparameters = extract_hyperparameters_from_trainer(trainer) return cls(language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset=dataset, dataset_tags=dataset_tags, dataset_args=dataset_args, dataset_metadata=dataset_metadata, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters)
@dataclass class TrainingSummary: def __post_init__(self): pass def create_model_index(self, metric_mapping): pass def create_metadata(self): pass def to_model_card(self): pass @classmethod def from_trainer(cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None): pass
8
0
53
7
44
4
11
0.08
0
7
0
0
4
0
6
6
343
46
282
82
243
23
183
54
172
17
0
4
65
432
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_attn_mask_utils.py
transformers.modeling_attn_mask_utils.AttentionMaskConverter
import torch from dataclasses import dataclass from .utils.import_utils import is_torchdynamo_compiling from typing import Optional, Union @dataclass class AttentionMaskConverter: """ A utility attention mask class that allows one to: - Create a causal 4d mask - Create a causal 4d mask with slided window - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores Examples: ```python >>> import torch >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter >>> converter = AttentionMaskConverter(True) >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32) tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]]) ``` Parameters: is_causal (`bool`): Whether the attention mask should be a uni-directional (causal) or bi-directional mask. sliding_window (`int`, *optional*): Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. """ is_causal: bool sliding_window: int def __init__(self, is_causal: bool, sliding_window: Optional[int]=None): self.is_causal = is_causal self.sliding_window = sliding_window if self.sliding_window is not None and self.sliding_window <= 0: raise ValueError(f'Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`') def to_causal_4d(self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: Union[torch.device, 'str']='cpu') -> Optional[torch.Tensor]: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative bias to upper right hand triangular matrix (causal mask). """ if not self.is_causal: raise ValueError(f'Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.') input_shape = (batch_size, query_length) past_key_values_length = key_value_length - query_length causal_4d_mask = None if input_shape[-1] > 1 or self.sliding_window is not None: causal_4d_mask = self._make_causal_mask(input_shape, dtype, device=device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window) return causal_4d_mask def to_4d(self, attention_mask_2d: torch.Tensor, query_length: int, dtype: torch.dtype, key_value_length: Optional[int]=None) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is causal, a causal mask will be added. """ input_shape = (attention_mask_2d.shape[0], query_length) causal_4d_mask = None if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: if key_value_length is None: raise ValueError('This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask.') past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask(input_shape, dtype, device=attention_mask_2d.device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window) elif self.sliding_window is not None: raise NotImplementedError('Sliding window is currently only implemented for causal masking') expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(attention_mask_2d.device) if causal_4d_mask is not None: expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min) expanded_4d_mask = expanded_attn_mask return expanded_4d_mask @staticmethod def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int=0, sliding_window: Optional[int]=None): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) if sliding_window is not None: diagonal = past_key_values_length - sliding_window - 1 context_mask = torch.tril(torch.ones_like(mask, dtype=torch.bool), diagonal=diagonal) if is_torchdynamo_compiling(): mask = mask.clone() mask.masked_fill_(context_mask, torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) @staticmethod def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = torch.tensor(1.0, dtype=dtype) - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) @staticmethod def _unmask_unattended(expanded_mask: torch.FloatTensor, min_dtype: float): """ Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. Details: https://github.com/pytorch/pytorch/issues/110213 `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len]. `attention_mask` is [bsz, src_seq_len]. The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias. For example, if `expanded_mask` is (e.g. here left-padding case) ``` [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]]] ``` then the modified `expanded_mask` will be ``` [[[[1, 1, 1], <-- modified [1, 1, 1], <-- modified [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], <-- modified [0, 1, 0], [0, 1, 1]]]] ``` """ if expanded_mask.dtype == torch.bool: raise ValueError('AttentionMaskConverter._unmask_unattended expects a float `expanded_mask`, got a BoolTensor.') return expanded_mask.mul(~torch.all(expanded_mask == min_dtype, dim=-1, keepdim=True)) @staticmethod def _ignore_causal_mask_sdpa(attention_mask: Optional[torch.Tensor], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None, is_training: bool=False) -> bool: """ Detects whether the optional user-specified attention_mask & the automatically created causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument. In case no token is masked in the `attention_mask` argument, if `query_length == 1` or `key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks, allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed). """ _, query_length = (inputs_embeds.shape[0], inputs_embeds.shape[1]) key_value_length = query_length + past_key_values_length is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy) or is_torchdynamo_compiling() ignore_causal_mask = False if attention_mask is None: if (is_training or not is_tracing) and (query_length == 1 or key_value_length == query_length) and (sliding_window is None or key_value_length < sliding_window): ignore_causal_mask = True elif sliding_window is None or key_value_length < sliding_window: if len(attention_mask.shape) == 4: return False elif not is_tracing and torch.all(attention_mask == 1): if query_length == 1 or key_value_length == query_length: ignore_causal_mask = True return ignore_causal_mask
@dataclass class AttentionMaskConverter: ''' A utility attention mask class that allows one to: - Create a causal 4d mask - Create a causal 4d mask with slided window - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores Examples: ```python >>> import torch >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter >>> converter = AttentionMaskConverter(True) >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32) tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]]) ``` Parameters: is_causal (`bool`): Whether the attention mask should be a uni-directional (causal) or bi-directional mask. sliding_window (`int`, *optional*): Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. ''' def __init__(self, is_causal: bool, sliding_window: Optional[int]=None): pass def to_causal_4d(self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: Union[torch.device, 'str']='cpu') -> Optional[torch.Tensor]: ''' Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative bias to upper right hand triangular matrix (causal mask). ''' pass def to_4d(self, attention_mask_2d: torch.Tensor, query_length: int, dtype: torch.dtype, key_value_length: Optional[int]=None) -> torch.Tensor: ''' Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is causal, a causal mask will be added. ''' pass @staticmethod def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int=0, sliding_window: Optional[int]=None): ''' Make causal mask used for bi-directional self-attention. ''' pass @staticmethod def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): ''' Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. ''' pass @staticmethod def _unmask_unattended(expanded_mask: torch.FloatTensor, min_dtype: float): ''' Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. Details: https://github.com/pytorch/pytorch/issues/110213 `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len]. `attention_mask` is [bsz, src_seq_len]. The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias. For example, if `expanded_mask` is (e.g. here left-padding case) ``` [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]]] ``` then the modified `expanded_mask` will be ``` [[[[1, 1, 1], <-- modified [1, 1, 1], <-- modified [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], <-- modified [0, 1, 0], [0, 1, 1]]]] ``` ''' pass @staticmethod def _ignore_causal_mask_sdpa(attention_mask: Optional[torch.Tensor], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None, is_training: bool=False) -> bool: ''' Detects whether the optional user-specified attention_mask & the automatically created causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument. In case no token is masked in the `attention_mask` argument, if `query_length == 1` or `key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks, allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed). ''' pass
13
7
33
4
17
12
4
0.84
0
7
0
0
3
0
7
7
277
42
128
60
88
107
69
28
61
7
0
3
25
433
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_flash_attention_utils.py
transformers.modeling_flash_attention_utils.FlashAttentionKwargs
import torch.nn.functional as F import torch from typing import Optional, TypedDict class FlashAttentionKwargs(TypedDict, total=False): """ Keyword arguments for Flash Attention with Compile. Attributes: cu_seq_lens_q (`torch.LongTensor`, *optional*) Gets cumulative sequence length for query state. cu_seq_lens_k (`torch.LongTensor`, *optional*) Gets cumulative sequence length for key state. max_length_q (`int`, *optional*): Maximum sequence length for query state. max_length_k (`int`, *optional*): Maximum sequence length for key state. """ cu_seq_lens_q: Optional[torch.LongTensor] cu_seq_lens_k: Optional[torch.LongTensor] max_length_q: Optional[int] max_length_k: Optional[int]
class FlashAttentionKwargs(TypedDict, total=False): ''' Keyword arguments for Flash Attention with Compile. Attributes: cu_seq_lens_q (`torch.LongTensor`, *optional*) Gets cumulative sequence length for query state. cu_seq_lens_k (`torch.LongTensor`, *optional*) Gets cumulative sequence length for key state. max_length_q (`int`, *optional*): Maximum sequence length for query state. max_length_k (`int`, *optional*): Maximum sequence length for key state. ''' pass
1
1
0
0
0
0
0
2.4
2
0
0
23
0
0
0
0
19
2
5
1
4
12
5
1
4
0
1
0
0
434
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.BloomTensorProcessor
import numpy as np class BloomTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if 'attn_qkv' in name: num_heads = self.config['n_head'] n_embed = self.config['hidden_size'] if 'weight' in name: weights = self._reverse_reshape_weights(weights, num_heads, n_embed) else: weights = self._reverse_reshape_bias(weights, num_heads, n_embed) return GGUFTensor(weights, name, {}) def _reverse_reshape_weights(self, weights: np.ndarray, n_head: int, n_embed: int): q, k, v = np.array_split(weights, 3, axis=0) q = q.reshape(n_head, n_embed // n_head, n_embed) k = k.reshape(n_head, n_embed // n_head, n_embed) v = v.reshape(n_head, n_embed // n_head, n_embed) qkv_weights = np.stack([q, k, v], axis=1) return qkv_weights.reshape(n_head * 3 * (n_embed // n_head), n_embed) def _reverse_reshape_bias(self, weights: np.ndarray, n_head: int, n_embed: int): q_bias, k_bias, v_bias = np.array_split(weights, 3) q_bias = q_bias.reshape(n_head, n_embed // n_head) k_bias = k_bias.reshape(n_head, n_embed // n_head) v_bias = v_bias.reshape(n_head, n_embed // n_head) qkv_bias = np.stack([q_bias, k_bias, v_bias], axis=1).flatten() return qkv_bias
class BloomTensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass def _reverse_reshape_weights(self, weights: np.ndarray, n_head: int, n_embed: int): pass def _reverse_reshape_bias(self, weights: np.ndarray, n_head: int, n_embed: int): pass
5
0
8
1
6
1
2
0.15
1
3
1
0
4
0
4
6
37
7
26
11
21
4
25
11
20
3
1
2
6
435
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.GGUFTensor
import numpy as np from typing import NamedTuple, Optional class GGUFTensor(NamedTuple): weights: np.ndarray name: str metadata: dict
class GGUFTensor(NamedTuple): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
4
0
4
1
3
0
4
1
3
0
1
0
0
436
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.GPT2TensorProcessor
import numpy as np class GPT2TensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if 'attn_qkv.weight' in name or 'ffn_down.weight' in name or 'ffn_up.weight' in name or ('attn_output.weight' in name): weights = weights.T if name == 'output.weight': name = 'lm_head.weight' parsed_parameters = kwargs.get('parsed_parameters', {}) parsed_parameters['tensors'][name] = torch.from_numpy(np.copy(weights)) name = None return GGUFTensor(weights, name, {})
class GPT2TensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass
3
0
11
1
8
3
2
0.35
1
2
1
0
2
0
2
4
24
2
17
4
14
6
12
4
9
3
1
1
4
437
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.Gemma2TensorProcessor
class Gemma2TensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if 'norm.weight' in name: weights = weights - 1 return GGUFTensor(weights, name, {})
class Gemma2TensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass
3
0
3
0
3
0
2
0.29
1
2
1
0
2
0
2
4
10
1
7
3
4
2
7
3
4
2
1
1
3
438
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.LlamaTensorProcessor
from typing import NamedTuple, Optional import numpy as np class LlamaTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if '.attn_k.' in name or '.attn_q.' in name: num_heads = self.config.get('num_attention_heads') num_kv_heads = self.config.get('num_key_value_heads') if None in (num_heads, num_kv_heads): return GGUFTensor(weights, name, {}) if '.attn_q.' in name: weights = self._reverse_permute_weights(weights, num_heads, num_heads) elif '.attn_k.' in name: weights = self._reverse_permute_weights(weights, num_heads, num_kv_heads) return GGUFTensor(weights, name, {}) def _reverse_permute_weights(self, weights: np.ndarray, n_head: int, num_kv_heads: Optional[int]=None) -> np.ndarray: if num_kv_heads is not None and n_head != num_kv_heads: n_head = num_kv_heads dim = weights.shape[0] // n_head // 2 w = weights.reshape(n_head, dim, 2, *weights.shape[1:]) return w.swapaxes(2, 1).reshape(weights.shape)
class LlamaTensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass def _reverse_permute_weights(self, weights: np.ndarray, n_head: int, num_kv_heads: Optional[int]=None) -> np.ndarray: pass
4
0
8
1
7
1
3
0.09
1
3
1
0
3
0
3
5
28
4
22
10
16
2
19
8
15
5
1
2
8
439
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.MambaTensorProcessor
import numpy as np class MambaTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if 'ssm_conv1d.weight' in name: weights = np.expand_dims(weights, axis=1) if 'ssm_a' in name: weights = np.log(-weights) return GGUFTensor(weights, name, {})
class MambaTensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass
3
0
6
0
4
2
2
0.44
1
2
1
0
2
0
2
4
14
1
9
3
6
4
9
3
6
3
1
1
4
440
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.NemotronTensorProcessor
class NemotronTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if 'norm.weight' in name: weights = weights - 1 return GGUFTensor(weights, name, {})
class NemotronTensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass
3
0
3
0
3
0
2
0.14
1
2
1
0
2
0
2
4
9
1
7
3
4
1
7
3
4
2
1
1
3
441
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.Qwen2MoeTensorProcessor
import numpy as np class Qwen2MoeTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if '_exp' in name: tensor_key_mapping = kwargs.get('tensor_key_mapping') parsed_parameters = kwargs.get('parsed_parameters') if tensor_key_mapping: self._split_moe_expert_tensor(weights, parsed_parameters, name, tensor_key_mapping) return GGUFTensor(weights, None, {}) if 'ffn_gate_inp_shexp' in name: weights = np.expand_dims(weights, axis=0) return GGUFTensor(weights, name, {}) def _split_moe_expert_tensor(self, weights: np.ndarray, parsed_parameters: dict[str, dict], name: str, tensor_key_mapping: dict): name = tensor_key_mapping[name] w_counter = self.config.get('num_experts', 60) for i in range(0, w_counter): temp_name = name.replace('mlp.experts.', f'mlp.experts.{i}.') exp_weight = weights[i] parsed_parameters['tensors'][temp_name] = torch.from_numpy(np.copy(exp_weight))
class Qwen2MoeTensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass def _split_moe_expert_tensor(self, weights: np.ndarray, parsed_parameters: dict[str, dict], name: str, tensor_key_mapping: dict): pass
4
0
8
0
7
1
2
0.18
1
5
1
0
3
0
3
5
28
2
22
12
16
4
20
10
16
4
1
2
7
442
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.T5TensorProcessor
class T5TensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): bid = None for chunk in name.split('.'): if chunk.isdigit(): bid = int(chunk) break return GGUFTensor(weights, name, {'bid': bid})
class T5TensorProcessor(TensorProcessor): def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass
3
0
5
0
5
0
2
0
1
3
1
0
2
0
2
4
11
1
10
5
7
0
10
5
7
3
1
2
4
443
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_gguf_pytorch_utils.py
transformers.modeling_gguf_pytorch_utils.TensorProcessor
class TensorProcessor: def __init__(self, config=None): self.config = config or {} def process(self, weights, name, **kwargs): return GGUFTensor(weights, name, {})
class TensorProcessor: def __init__(self, config=None): pass def process(self, weights, name, **kwargs): pass
3
0
2
0
2
0
1
0
0
1
1
8
2
1
2
2
6
1
5
4
2
0
5
4
2
1
0
0
2
444
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BackboneOutput
import torch from typing import Optional from dataclasses import dataclass from .utils import ModelOutput @dataclass class BackboneOutput(ModelOutput): """ Base class for outputs of backbones. Args: feature_maps (`tuple(torch.FloatTensor)` of shape `(batch_size, num_channels, height, width)`): Feature maps of the stages. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, num_channels, height, width)`, depending on the backbone. Hidden-states of the model at the output of each stage plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Only applicable if the backbone uses attention. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ feature_maps: Optional[tuple[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BackboneOutput(ModelOutput): ''' Base class for outputs of backbones. Args: feature_maps (`tuple(torch.FloatTensor)` of shape `(batch_size, num_channels, height, width)`): Feature maps of the stages. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, num_channels, height, width)`, depending on the backbone. Hidden-states of the model at the output of each stage plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Only applicable if the backbone uses attention. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
4
1
0
0
0
0
0
0
0
24
4
4
4
3
16
4
4
3
0
1
0
0
445
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutput
from .utils import ModelOutput from typing import Optional import torch from dataclasses import dataclass @dataclass class BaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutput(ModelOutput): ''' Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.75
1
0
0
2
0
0
0
0
23
4
4
4
3
15
4
4
3
0
1
0
0
446
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithCrossAttentions
from dataclasses import dataclass from .utils import ModelOutput import torch from typing import Optional @dataclass class BaseModelOutputWithCrossAttentions(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutputWithCrossAttentions(ModelOutput): ''' Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. ''' pass
2
1
0
0
0
0
0
4
1
0
0
6
0
0
0
0
30
5
5
5
4
20
5
5
4
0
1
0
0
447
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithNoAttention
from .utils import ModelOutput from dataclasses import dataclass from typing import Optional import torch @dataclass class BaseModelOutputWithNoAttention(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutputWithNoAttention(ModelOutput): ''' Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. ''' pass
2
1
0
0
0
0
0
3.33
1
0
0
0
0
0
0
0
16
3
3
3
2
10
3
3
2
0
1
0
0
448
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithPast
from typing import Optional from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput import torch from dataclasses import dataclass @dataclass class BaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutputWithPast(ModelOutput): ''' Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
5
1
0
0
0
0
0
0
0
36
6
5
5
4
25
5
5
4
0
1
0
0
449
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions
from dataclasses import dataclass from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput from typing import Optional import torch @dataclass class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): ''' Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. ''' pass
2
1
0
0
0
0
0
5
1
0
0
0
0
0
0
0
43
7
6
6
5
30
6
6
5
0
1
0
0
450
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithPooling
from typing import Optional from dataclasses import dataclass import torch from .utils import ModelOutput @dataclass class BaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutputWithPooling(ModelOutput): ''' Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
4
1
0
0
3
0
0
0
0
29
4
5
5
4
20
5
5
4
0
1
0
0
451
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions
from .cache_utils import Cache, EncoderDecoderCache from typing import Optional from dataclasses import dataclass import torch from .utils import ModelOutput @dataclass class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None past_key_values: Optional[Cache] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): ''' Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. ''' pass
2
1
0
0
0
0
0
4.71
1
0
0
0
0
0
0
0
46
6
7
7
6
33
7
7
6
0
1
0
0
452
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithPoolingAndNoAttention
from dataclasses import dataclass from .utils import ModelOutput from typing import Optional import torch @dataclass class BaseModelOutputWithPoolingAndNoAttention(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class BaseModelOutputWithPoolingAndNoAttention(ModelOutput): ''' Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. ''' pass
2
1
0
0
0
0
0
3
1
0
0
0
0
0
0
0
19
3
4
4
3
12
4
4
3
0
1
0
0
453
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.BaseModelOutputWithPoolingAndProjection
from .utils import ModelOutput from typing import Optional from dataclasses import dataclass import torch @dataclass class BaseModelOutputWithPoolingAndProjection(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. projection_state (`tuple(torch.FloatTensor)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` of shape `(batch_size,config.project_dim)`. Text embeddings before the projection layer, used to mimic the last hidden state of the teacher encoder. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None projection_state: Optional[tuple[torch.FloatTensor]] = None
@dataclass class BaseModelOutputWithPoolingAndProjection(ModelOutput): ''' Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. projection_state (`tuple(torch.FloatTensor)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` of shape `(batch_size,config.project_dim)`. Text embeddings before the projection layer, used to mimic the last hidden state of the teacher encoder. ''' pass
2
1
0
0
0
0
0
3.83
1
0
0
0
0
0
0
0
34
5
6
6
5
23
6
6
5
0
1
0
0
454
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.CausalLMOutput
from typing import Optional from dataclasses import dataclass from .utils import ModelOutput import torch @dataclass class CausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class CausalLMOutput(ModelOutput): ''' Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
26
4
5
5
4
17
5
5
4
0
1
0
0
455
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.CausalLMOutputWithCrossAttentions
from .utils import ModelOutput from .cache_utils import Cache, EncoderDecoderCache from dataclasses import dataclass import torch from typing import Optional @dataclass class CausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class CausalLMOutputWithCrossAttentions(ModelOutput): ''' Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. ''' pass
2
1
0
0
0
0
0
4
1
0
0
0
0
0
0
0
41
6
7
7
6
28
7
7
6
0
1
0
0
456
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.CausalLMOutputWithPast
from .utils import ModelOutput import torch from typing import Optional from .cache_utils import Cache, EncoderDecoderCache from dataclasses import dataclass @dataclass class CausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class CausalLMOutputWithPast(ModelOutput): ''' Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.67
1
0
0
0
0
0
0
0
33
5
6
6
5
22
6
6
5
0
1
0
0
457
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.DepthEstimatorOutput
import torch from typing import Optional from dataclasses import dataclass from .utils import ModelOutput @dataclass class DepthEstimatorOutput(ModelOutput): """ Base class for outputs of depth estimation models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. predicted_depth (`torch.FloatTensor` of shape `(batch_size, height, width)`): Predicted depth for each pixel. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None predicted_depth: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class DepthEstimatorOutput(ModelOutput): ''' Base class for outputs of depth estimation models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. predicted_depth (`torch.FloatTensor` of shape `(batch_size, height, width)`): Predicted depth for each pixel. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
27
5
5
5
4
17
5
5
4
0
1
0
0
458
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.ImageClassifierOutput
import torch from .utils import ModelOutput from typing import Optional from dataclasses import dataclass @dataclass class ImageClassifierOutput(ModelOutput): """ Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class ImageClassifierOutput(ModelOutput): ''' Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
2
0
0
0
0
25
3
5
5
4
17
5
5
4
0
1
0
0
459
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.ImageClassifierOutputWithNoAttention
from typing import Optional from dataclasses import dataclass import torch from .utils import ModelOutput @dataclass class ImageClassifierOutputWithNoAttention(ModelOutput): """ Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class ImageClassifierOutputWithNoAttention(ModelOutput): ''' Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. ''' pass
2
1
0
0
0
0
0
3
1
0
0
0
0
0
0
0
18
2
4
4
3
12
4
4
3
0
1
0
0
460
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.ImageSuperResolutionOutput
from dataclasses import dataclass import torch from .utils import ModelOutput from typing import Optional @dataclass class ImageSuperResolutionOutput(ModelOutput): """ Base class for outputs of image super resolution models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Reconstruction loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed images, possibly upscaled. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None reconstruction: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class ImageSuperResolutionOutput(ModelOutput): ''' Base class for outputs of image super resolution models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Reconstruction loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed images, possibly upscaled. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
25
3
5
5
4
17
5
5
4
0
1
0
0
461
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MaskedImageModelingOutput
import torch import warnings from dataclasses import dataclass from typing import Optional from .utils import ModelOutput @dataclass class MaskedImageModelingOutput(ModelOutput): """ Base class for outputs of masked image completion / in-painting models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Reconstruction loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed / completed images. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None reconstruction: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @property def logits(self): warnings.warn('logits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.', FutureWarning) return self.reconstruction
@dataclass class MaskedImageModelingOutput(ModelOutput): ''' Base class for outputs of masked image completion / in-painting models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Reconstruction loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed / completed images. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' @property def logits(self): pass
4
1
7
0
7
0
1
1.38
1
1
0
0
1
0
1
1
34
3
13
7
10
18
8
6
6
1
1
0
1
462
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MaskedLMOutput
from .utils import ModelOutput from dataclasses import dataclass from typing import Optional import torch @dataclass class MaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class MaskedLMOutput(ModelOutput): ''' Base class for masked language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
26
4
5
5
4
17
5
5
4
0
1
0
0
463
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MoECausalLMOutputWithPast
from .utils import ModelOutput from dataclasses import dataclass import torch from typing import Optional from .cache_utils import Cache, EncoderDecoderCache @dataclass class MoECausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden states terms, to train a MoE model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): z_loss for the sparse modules. aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse modules. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None z_loss: Optional[torch.FloatTensor] = None aux_loss: Optional[torch.FloatTensor] = None router_logits: Optional[tuple[torch.FloatTensor]] = None
@dataclass class MoECausalLMOutputWithPast(ModelOutput): ''' Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden states terms, to train a MoE model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): z_loss for the sparse modules. aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse modules. ''' pass
2
1
0
0
0
0
0
3.44
1
0
0
0
0
0
0
0
46
6
9
9
8
31
9
9
8
0
1
0
0
464
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MoEModelOutput
from dataclasses import dataclass import torch from .utils import ModelOutput from typing import Optional @dataclass class MoEModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary loss and the z_loss for Mixture of Experts models. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None router_probs: Optional[tuple[torch.FloatTensor]] = None
@dataclass class MoEModelOutput(ModelOutput): ''' Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary loss and the z_loss for Mixture of Experts models. ''' pass
2
1
0
0
0
0
0
3.8
1
0
0
0
0
0
0
0
29
5
5
5
4
19
5
5
4
0
1
0
0
465
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MoEModelOutputWithPastAndCrossAttentions
import torch from typing import Optional from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput from dataclasses import dataclass @dataclass class MoEModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as Mixture of Expert's router hidden states terms, to train a MoE model. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary loss and the z_loss for Mixture of Experts models. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None router_probs: Optional[tuple[torch.FloatTensor]] = None
@dataclass class MoEModelOutputWithPastAndCrossAttentions(ModelOutput): ''' Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as Mixture of Expert's router hidden states terms, to train a MoE model. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary loss and the z_loss for Mixture of Experts models. ''' pass
2
1
0
0
0
0
0
5
1
0
0
0
0
0
0
0
50
8
7
7
6
35
7
7
6
0
1
0
0
466
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MoeCausalLMOutputWithPast
from dataclasses import dataclass from typing import Optional import torch from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput @dataclass class MoeCausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) with mixture of experts outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary loss for Mixture of Experts models. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None aux_loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None router_logits: Optional[tuple[torch.FloatTensor]] = None
@dataclass class MoeCausalLMOutputWithPast(ModelOutput): ''' Base class for causal language model (or autoregressive) with mixture of experts outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary loss for Mixture of Experts models. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
46
10
8
8
7
28
8
8
7
0
1
0
0
467
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MoeModelOutputWithPast
from typing import Optional from dataclasses import dataclass from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput import torch @dataclass class MoeModelOutputWithPast(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary loss for Mixture of Experts models. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None router_logits: Optional[tuple[torch.FloatTensor]] = None
@dataclass class MoeModelOutputWithPast(ModelOutput): ''' Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary loss for Mixture of Experts models. ''' pass
2
1
0
0
0
0
0
4.5
1
0
0
0
0
0
0
0
39
6
6
6
5
27
6
6
5
0
1
0
0
468
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.MultipleChoiceModelOutput
from dataclasses import dataclass import torch from typing import Optional from .utils import ModelOutput @dataclass class MultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class MultipleChoiceModelOutput(ModelOutput): ''' Base class for outputs of multiple choice models. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.6
1
0
0
0
0
0
0
0
28
5
5
5
4
18
5
5
4
0
1
0
0
469
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.NextSentencePredictorOutput
from typing import Optional from .utils import ModelOutput from dataclasses import dataclass import torch @dataclass class NextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided): Next sequence prediction (classification) loss. logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class NextSentencePredictorOutput(ModelOutput): ''' Base class for outputs of models predicting if two sentences are consecutive or not. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided): Next sequence prediction (classification) loss. logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.6
1
0
0
0
0
0
0
0
27
4
5
5
4
18
5
5
4
0
1
0
0
470
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.QuestionAnsweringModelOutput
from dataclasses import dataclass from .utils import ModelOutput from typing import Optional import torch @dataclass class QuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: Optional[torch.FloatTensor] = None end_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class QuestionAnsweringModelOutput(ModelOutput): ''' Base class for outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.17
1
0
0
0
0
0
0
0
29
4
6
6
5
19
6
6
5
0
1
0
0
471
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.SemanticSegmenterOutput
from typing import Optional import torch from .utils import ModelOutput from dataclasses import dataclass @dataclass class SemanticSegmenterOutput(ModelOutput): """ Base class for outputs of semantic segmentation models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class SemanticSegmenterOutput(ModelOutput): ''' Base class for outputs of semantic segmentation models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
4.4
1
0
0
0
0
0
0
0
35
8
5
5
4
22
5
5
4
0
1
0
0
472
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqLMOutput
from .utils import ModelOutput from .cache_utils import Cache, EncoderDecoderCache from typing import Optional import torch from dataclasses import dataclass @dataclass class Seq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class Seq2SeqLMOutput(ModelOutput): ''' Base class for sequence-to-sequence language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.9
1
0
0
0
0
0
0
0
57
8
10
10
9
39
10
10
9
0
1
0
0
473
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqMoEModelOutput
import torch from typing import Optional from .utils import ModelOutput from dataclasses import dataclass from .cache_utils import Cache, EncoderDecoderCache @dataclass class Seq2SeqMoEModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse modules. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None decoder_router_logits: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_router_logits: Optional[tuple[torch.FloatTensor]] = None
@dataclass class Seq2SeqMoEModelOutput(ModelOutput): ''' Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse modules. ''' pass
2
1
0
0
0
0
0
4.27
1
0
0
0
0
0
0
0
69
11
11
11
10
47
11
11
10
0
1
0
0
474
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqMoEOutput
from typing import Optional from .cache_utils import Cache, EncoderDecoderCache import torch from dataclasses import dataclass from .utils import ModelOutput @dataclass class Seq2SeqMoEOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and z_loss for Mixture of Experts models. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None encoder_z_loss: Optional[torch.FloatTensor] = None decoder_z_loss: Optional[torch.FloatTensor] = None encoder_aux_loss: Optional[torch.FloatTensor] = None decoder_aux_loss: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None decoder_router_logits: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_router_logits: Optional[tuple[torch.FloatTensor]] = None
@dataclass class Seq2SeqMoEOutput(ModelOutput): ''' Base class for sequence-to-sequence language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and z_loss for Mixture of Experts models. ''' pass
2
1
0
0
0
0
0
2.88
1
0
0
0
0
0
0
0
72
10
16
16
15
46
16
16
15
0
1
0
0
475
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqModelOutput
from typing import Optional from .cache_utils import Cache, EncoderDecoderCache import torch from .utils import ModelOutput from dataclasses import dataclass @dataclass class Seq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class Seq2SeqModelOutput(ModelOutput): ''' Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
4.44
1
0
0
4
0
0
0
0
58
9
9
9
8
40
9
9
8
0
1
0
0
476
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput
from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput import torch from dataclasses import dataclass from typing import Optional @dataclass class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: Optional[torch.FloatTensor] = None end_logits: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): ''' Base class for outputs of sequence-to-sequence question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.73
1
0
0
0
0
0
0
0
60
8
11
11
10
41
11
11
10
0
1
0
0
477
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput
from typing import Optional from dataclasses import dataclass from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput import torch @dataclass class Seq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class Seq2SeqSequenceClassifierOutput(ModelOutput): ''' Base class for outputs of sequence-to-sequence sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.9
1
0
0
0
0
0
0
0
57
8
10
10
9
39
10
10
9
0
1
0
0
478
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqSpectrogramOutput
import torch from .cache_utils import Cache, EncoderDecoderCache from typing import Optional from .utils import ModelOutput from dataclasses import dataclass @dataclass class Seq2SeqSpectrogramOutput(ModelOutput): """ Base class for sequence-to-sequence spectrogram outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Spectrogram generation loss. spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): The predicted spectrogram. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None spectrogram: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class Seq2SeqSpectrogramOutput(ModelOutput): ''' Base class for sequence-to-sequence spectrogram outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Spectrogram generation loss. spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): The predicted spectrogram. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.9
1
0
0
0
0
0
0
0
57
8
10
10
9
39
10
10
9
0
1
0
0
479
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqTSModelOutput
from dataclasses import dataclass import torch from typing import Optional from .cache_utils import Cache, EncoderDecoderCache from .utils import ModelOutput @dataclass class Seq2SeqTSModelOutput(ModelOutput): """ Base class for time series model's encoder outputs that also contains pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Shift values of each time series' context window which is used to give the model inputs of the same magnitude and then used to shift back to the original magnitude. scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Scaling values of each time series' context window which is used to give the model inputs of the same magnitude and then used to rescale back to the original magnitude. static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): Static features of each time series' in a batch which are copied to the covariates at inference time. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None
@dataclass class Seq2SeqTSModelOutput(ModelOutput): ''' Base class for time series model's encoder outputs that also contains pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Shift values of each time series' context window which is used to give the model inputs of the same magnitude and then used to shift back to the original magnitude. scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Scaling values of each time series' context window which is used to give the model inputs of the same magnitude and then used to rescale back to the original magnitude. static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): Static features of each time series' in a batch which are copied to the covariates at inference time. ''' pass
2
1
0
0
0
0
0
4
1
0
0
0
0
0
0
0
69
9
12
12
11
48
12
12
11
0
1
0
0
480
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Seq2SeqTSPredictionOutput
from dataclasses import dataclass import torch from .utils import ModelOutput from typing import Optional from .cache_utils import Cache, EncoderDecoderCache @dataclass class Seq2SeqTSPredictionOutput(ModelOutput): """ Base class for time series model's decoder outputs that also contain the loss as well as the parameters of the chosen distribution. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided): Distributional loss. params (`torch.FloatTensor` of shape `(batch_size, num_samples, num_params)`): Parameters of the chosen distribution. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Shift values of each time series' context window which is used to give the model inputs of the same magnitude and then used to shift back to the original magnitude. scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Scaling values of each time series' context window which is used to give the model inputs of the same magnitude and then used to rescale back to the original magnitude. static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): Static features of each time series' in a batch which are copied to the covariates at inference time. """ loss: Optional[torch.FloatTensor] = None params: Optional[tuple[torch.FloatTensor]] = None past_key_values: Optional[EncoderDecoderCache] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None
@dataclass class Seq2SeqTSPredictionOutput(ModelOutput): ''' Base class for time series model's decoder outputs that also contain the loss as well as the parameters of the chosen distribution. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided): Distributional loss. params (`torch.FloatTensor` of shape `(batch_size, num_samples, num_params)`): Parameters of the chosen distribution. past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Shift values of each time series' context window which is used to give the model inputs of the same magnitude and then used to shift back to the original magnitude. scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Scaling values of each time series' context window which is used to give the model inputs of the same magnitude and then used to rescale back to the original magnitude. static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): Static features of each time series' in a batch which are copied to the covariates at inference time. ''' pass
2
1
0
0
0
0
0
3.69
1
0
0
0
0
0
0
0
69
8
13
13
12
48
13
13
12
0
1
0
0
481
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.SequenceClassifierOutput
from dataclasses import dataclass import torch from typing import Optional from .utils import ModelOutput @dataclass class SequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class SequenceClassifierOutput(ModelOutput): ''' Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
26
4
5
5
4
17
5
5
4
0
1
0
0
482
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.SequenceClassifierOutputWithPast
from typing import Optional import torch from .cache_utils import Cache, EncoderDecoderCache from dataclasses import dataclass from .utils import ModelOutput @dataclass class SequenceClassifierOutputWithPast(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class SequenceClassifierOutputWithPast(ModelOutput): ''' Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.67
1
0
0
0
0
0
0
0
33
5
6
6
5
22
6
6
5
0
1
0
0
483
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.TokenClassifierOutput
from typing import Optional from .utils import ModelOutput from dataclasses import dataclass import torch @dataclass class TokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class TokenClassifierOutput(ModelOutput): ''' Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
26
4
5
5
4
17
5
5
4
0
1
0
0
484
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.Wav2Vec2BaseModelOutput
from .utils import ModelOutput from typing import Optional import torch from dataclasses import dataclass @dataclass class Wav2Vec2BaseModelOutput(ModelOutput): """ Base class for models that have been trained with the Wav2Vec2 loss objective. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`): Sequence of extracted feature vectors of the last convolutional layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None extract_features: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class Wav2Vec2BaseModelOutput(ModelOutput): ''' Base class for models that have been trained with the Wav2Vec2 loss objective. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`): Sequence of extracted feature vectors of the last convolutional layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
26
4
5
5
4
17
5
5
4
0
1
0
0
485
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_outputs.py
transformers.modeling_outputs.XVectorOutput
from typing import Optional import torch from dataclasses import dataclass from .utils import ModelOutput @dataclass class XVectorOutput(ModelOutput): """ Output type of [`Wav2Vec2ForXVector`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): Classification hidden states before AMSoftmax. embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): Utterance embeddings used for vector similarity-based retrieval. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None embeddings: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass class XVectorOutput(ModelOutput): ''' Output type of [`Wav2Vec2ForXVector`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): Classification hidden states before AMSoftmax. embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): Utterance embeddings used for vector similarity-based retrieval. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.17
1
0
0
0
0
0
0
0
29
4
6
6
5
19
6
6
5
0
1
0
0
486
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_utils.py
transformers.modeling_utils.ModuleUtilsMixin
from torch import Tensor, nn from .utils import ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, CONFIG_NAME, DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, PushToHubMixin, cached_file, check_torch_load_is_safe, copy_func, download_url, extract_commit_hash, has_file, is_accelerate_available, is_bitsandbytes_available, is_flash_attn_2_available, is_flash_attn_3_available, is_kernels_available, is_offline_mode, is_optimum_available, is_peft_available, is_remote_url, is_safetensors_available, is_torch_flex_attn_available, is_torch_greater_or_equal, is_torch_mlu_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, is_torchao_available, logging import os import warnings from typing import Any, Callable, Optional, TypeVar, Union, get_type_hints import torch class ModuleUtilsMixin: """ A few utilities for `torch.nn.Modules`, to be used as a mixin. """ @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kwargs): try: import psutil except ImportError: raise ImportError('You need to install psutil (pip install psutil) to use memory tracing.') process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_pre_forward = mem.rss return None @staticmethod def _hook_rss_memory_post_forward(module, *args, **kwargs): try: import psutil except ImportError: raise ImportError('You need to install psutil (pip install psutil) to use memory tracing.') process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_post_forward = mem.rss mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, 'mem_rss_diff') else 0) return None def add_memory_hooks(self): """ Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero with `model.reset_memory_hooks_state()`. """ for module in self.modules(): module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) module.register_forward_hook(self._hook_rss_memory_post_forward) self.reset_memory_hooks_state() def reset_memory_hooks_state(self): """ Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]). """ for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ return get_parameter_device(self) @property def dtype(self) -> torch.dtype: """ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ return get_parameter_dtype(self) def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: """ Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (`torch.Tensor`): An attention mask. Returns: `torch.Tensor`: The inverted attention mask. """ if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min return encoder_extended_attention_mask @staticmethod def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): if device is not None: warnings.warn('The `device` argument is deprecated and will be removed in v5 of Transformers.', FutureWarning) else: device = attention_mask.device batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat([torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask], axis=-1) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] return extended_attention_mask def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: tuple[int], device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ if dtype is None: dtype = self.dtype if not (attention_mask.dim() == 2 and self.config.is_decoder): if device is not None: warnings.warn('The `device` argument is deprecated and will be removed in v5 of Transformers.', FutureWarning) if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: if self.config.is_decoder: extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder(input_shape, attention_mask, device) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})') extended_attention_mask = extended_attention_mask.to(dtype=dtype) extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min return extended_attention_mask def get_head_mask(self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool=False) -> Tensor: """ Prepare the head mask if needed. Args: head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (`int`): The number of hidden layers in the model. is_attention_chunked (`bool`, *optional*, defaults to `False`): Whether or not the attentions scores are computed by chunks or not. Returns: `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with `[None]` for each layer. """ if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) if is_attention_chunked is True: head_mask = head_mask.unsqueeze(-1) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) assert head_mask.dim() == 5, f'head_mask.dim != 5, instead {head_mask.dim()}' head_mask = head_mask.to(dtype=self.dtype) return head_mask def num_parameters(self, only_trainable: bool=False, exclude_embeddings: bool=False) -> int: """ Get number of (optionally, trainable or non-embeddings) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embeddings parameters Returns: `int`: The number of parameters. """ if exclude_embeddings: embedding_param_names = [f'{name}.weight' for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)] total_parameters = [parameter for name, parameter in self.named_parameters() if name not in embedding_param_names] else: total_parameters = list(self.parameters()) total_numel = [] is_loaded_in_4bit = getattr(self, 'is_loaded_in_4bit', False) if is_loaded_in_4bit: if is_bitsandbytes_available(): import bitsandbytes as bnb else: raise ValueError('bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. ') for param in total_parameters: if param.requires_grad or not only_trainable: if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): if hasattr(param, 'element_size'): num_bytes = param.element_size() elif hasattr(param, 'quant_storage'): num_bytes = param.quant_storage.itemsize else: num_bytes = 1 total_numel.append(param.numel() * 2 * num_bytes) else: total_numel.append(param.numel()) return sum(total_numel) def estimate_tokens(self, input_dict: dict[str, Union[torch.Tensor, Any]]) -> int: """ Helper function to estimate the total number of tokens from the model inputs. Args: inputs (`dict`): The model inputs. Returns: `int`: The total number of tokens. """ if not hasattr(self, 'warnings_issued'): self.warnings_issued = {} if self.main_input_name in input_dict: return input_dict[self.main_input_name].numel() elif 'estimate_tokens' not in self.warnings_issued: logger.warning('Could not estimate the number of tokens of the input, floating-point operations will not be computed') self.warnings_issued['estimate_tokens'] = True return 0 def floating_point_ops(self, input_dict: dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool=True) -> int: """ Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `12 * d_model << sequence_length`) as laid out in [this paper](https://huggingface.co/papers/2001.08361) section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. Args: batch_size (`int`): The batch size for the forward pass. sequence_length (`int`): The number of tokens in each line of the batch. exclude_embeddings (`bool`, *optional*, defaults to `True`): Whether or not to count embedding and softmax operations. Returns: `int`: The number of floating-point operations. """ return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)
class ModuleUtilsMixin: ''' A few utilities for `torch.nn.Modules`, to be used as a mixin. ''' @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kwargs): pass @staticmethod def _hook_rss_memory_post_forward(module, *args, **kwargs): pass def add_memory_hooks(self): ''' Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero with `model.reset_memory_hooks_state()`. ''' pass def reset_memory_hooks_state(self): ''' Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]). ''' pass @property def device(self) -> torch.device: ''' `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). ''' pass @property def dtype(self) -> torch.dtype: ''' `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). ''' pass def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: ''' Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (`torch.Tensor`): An attention mask. Returns: `torch.Tensor`: The inverted attention mask. ''' pass @staticmethod def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): pass def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: tuple[int], device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor: ''' Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. ''' pass def get_head_mask(self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool=False) -> Tensor: ''' Prepare the head mask if needed. Args: head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (`int`): The number of hidden layers in the model. is_attention_chunked (`bool`, *optional*, defaults to `False`): Whether or not the attentions scores are computed by chunks or not. Returns: `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with `[None]` for each layer. ''' pass def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): '''-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]''' pass def num_parameters(self, only_trainable: bool=False, exclude_embeddings: bool=False) -> int: ''' Get number of (optionally, trainable or non-embeddings) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embeddings parameters Returns: `int`: The number of parameters. ''' pass def estimate_tokens(self, input_dict: dict[str, Union[torch.Tensor, Any]]) -> int: ''' Helper function to estimate the total number of tokens from the model inputs. Args: inputs (`dict`): The model inputs. Returns: `int`: The total number of tokens. ''' pass def floating_point_ops(self, input_dict: dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool=True) -> int: ''' Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `12 * d_model << sequence_length`) as laid out in [this paper](https://huggingface.co/papers/2001.08361) section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. Args: batch_size (`int`): The batch size for the forward pass. sequence_length (`int`): The number of tokens in each line of the batch. exclude_embeddings (`bool`, *optional*, defaults to `True`): Whether or not to count embedding and softmax operations. Returns: `int`: The number of floating-point operations. ''' pass
20
12
20
2
11
7
3
0.65
0
11
0
2
11
2
14
14
309
44
163
52
134
106
117
39
99
9
0
4
44
487
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/modeling_utils.py
transformers.modeling_utils.PreTrainedModel
from .loss.loss_utils import LOSS_MAPPING from torch.utils.checkpoint import checkpoint import sys from torch.distributions import constraints import shutil import re from torch import Tensor, nn import os from .integrations.tensor_parallel import _get_parameter_tp_plan, distribute_model, initialize_tensor_parallelism, repack_weights, replace_state_dict_local_with_dtensor, shard_and_distribute_module, verify_tp_plan from packaging import version import json from huggingface_hub import split_torch_state_dict_into_shards import itertools import torch import inspect from .integrations.hub_kernels import is_kernel, load_and_register_kernel import importlib.metadata import gc import functools import copy from typing import Any, Callable, Optional, TypeVar, Union, get_type_hints from .utils.quantization_config import BitsAndBytesConfig, QuantizationMethod import collections from functools import partial, wraps from .integrations.accelerate import find_tied_parameters, init_empty_weights from .utils import ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, CONFIG_NAME, DUMMY_INPUTS, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, PushToHubMixin, cached_file, check_torch_load_is_safe, copy_func, download_url, extract_commit_hash, has_file, is_accelerate_available, is_bitsandbytes_available, is_flash_attn_2_available, is_flash_attn_3_available, is_kernels_available, is_offline_mode, is_optimum_available, is_peft_available, is_remote_url, is_safetensors_available, is_torch_flex_attn_available, is_torch_greater_or_equal, is_torch_mlu_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, is_torchao_available, logging from .utils.import_utils import ENV_VARS_TRUE_VALUES, is_huggingface_hub_greater_or_equal, is_sagemaker_mp_enabled, is_torch_fx_proxy, is_torchdynamo_compiling from .utils.hub import create_and_tag_model_card, get_checkpoint_shard_files from .quantizers.quantizers_utils import get_module_from_name from .integrations import PeftAdapterMixin, deepspeed_config, is_deepspeed_zero3_enabled, is_fsdp_enabled from .quantizers.auto import get_hf_quantizer from .generation import CompileConfig, GenerationConfig from .utils.generic import _CAN_RECORD_REGISTRY, GeneralInterface, OutputRecorder from .quantizers import HfQuantizer from .dynamic_module_utils import custom_object_save from .pytorch_utils import id_tensor_storage from .distributed import DistributedConfig import warnings from .modeling_flash_attention_utils import lazy_import_flash_attention from .configuration_utils import PretrainedConfig import tempfile class PreTrainedModel(nn.Module, EmbeddingAccessMixin, ModuleUtilsMixin, PushToHubMixin, PeftAdapterMixin): """ Base class for all models. [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). - **can_record_outputs** (dict): """ config_class = None base_model_prefix = '' main_input_name = 'input_ids' model_tags = None _checkpoint_conversion_mapping = {} _auto_class = None _no_split_modules = None _skip_keys_device_placement = None _keep_in_fp32_modules = None _keep_in_fp32_modules_strict = None _keys_to_ignore_on_load_missing = None _keys_to_ignore_on_load_unexpected = None _keys_to_ignore_on_save = None _tied_weights_keys = None is_parallelizable = False supports_gradient_checkpointing = False _is_stateful = False _supports_flash_attn = False _supports_sdpa = False _supports_flex_attn = False _can_compile_fullgraph = False _tp_plan = None _tp_size = None _pp_plan = None _supports_attention_backend = False _can_record_outputs = None @property @torch._dynamo.allow_in_graph def can_record_outputs(self) -> dict[str, OutputRecorder]: """ Maps output names (e.g., "attentions", "hidden_states") to either: - A module class (e.g., `LlamaDecoderLayer`), using default index conventions: * index=0 for "hidden_states" * index=1 for "attentions" - Or an `OutputRecorder(...)` with `target_class`, optional `index`, and `layer_name`. Examples: These two are equivalent: ```python _can_record_outputs = { "attentions": LlamaAttention, "hidden_states": LlamaDecoderLayer } _can_record_outputs = { "attentions": OutputRecorder(LlamaAttention, index=1), "hidden_states": OutputRecorder(LlamaDecoderLayer, index=0) } ``` This means you can record outputs from the same class, by specifying a layer name. Before collecting outputs, we check that they come from this layer. If you have cross attention that come from `LlamaAttention` and self attention that also come from `LlamaAttention` but from `self_attn` you can do this: ```python class LlamaModel(PreTrainedModel): _can_record_outputs = { "attentions": OutputRecorder(LlamaAttention, index=1, layer-name="self_attn"), "cross_attentions": OutputRecorder(LlamaAttention, index=1, layer_name="cross_attn") } ``` """ return self._can_record_outputs or {} @property def dummy_inputs(self) -> dict[str, torch.Tensor]: """ `dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. """ return {'input_ids': torch.tensor(DUMMY_INPUTS)} def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) child_annotation = cls.__dict__.get('__annotations__', {}).get('config', None) child_attribute = cls.__dict__.get('config_class', None) full_annotation = get_type_hints(cls).get('config', None) full_attribute = cls.config_class if child_attribute is not None: cls.config_class = child_attribute elif child_annotation is not None: cls.config_class = child_annotation elif full_attribute is not None: cls.config_class = full_attribute elif full_annotation is not None: cls.config_class = full_annotation def __init__(self, config: PretrainedConfig, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise TypeError(f'Parameter config in `{self.__class__.__name__}(config)` should be an instance of class `PretrainedConfig`. To create a model from a pretrained model use `model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`') self.config = config self.config._attn_implementation_internal = self._check_and_adjust_attn_implementation(self.config._attn_implementation, is_init_check=True) loss_type = self.__class__.__name__ if loss_type not in LOSS_MAPPING: loss_groups = f"({'|'.join(LOSS_MAPPING)})" loss_type = re.findall(loss_groups, self.__class__.__name__) if len(loss_type) > 0: loss_type = loss_type[0] else: loss_type = None self.loss_type = loss_type self.name_or_path = config.name_or_path self.warnings_issued = {} self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules) self._keep_in_fp32_modules_strict = copy.copy(self.__class__._keep_in_fp32_modules_strict) self._no_split_modules = self._no_split_modules or [] _CAN_RECORD_REGISTRY[str(self.__class__)] = self._can_record_outputs def post_init(self): """ A method executed at the end of each Transformer model initialization, to execute code that needs the model's modules properly initialized (such as weight initialization). This is also used when the user is running distributed code. We add hooks to the modules here, according to the model's tp_plan! """ self.init_weights() self._backward_compatibility_gradient_checkpointing() if self._keep_in_fp32_modules is not None or self._keep_in_fp32_modules_strict is not None: all_parameters = {name for name, _ in self.named_parameters() if len(name) > 0} unique_module_names = set() for param in all_parameters: unique_module_names.update([name for name in param.split('.') if not name.isnumeric() and name not in ['weight', 'bias']]) if self._keep_in_fp32_modules is not None: for module in self._keep_in_fp32_modules: if module not in unique_module_names: raise ValueError(f'{module} was specified in the `_keep_in_fp32_modules` list, but is not part of the modules in {self.__class__.__name__}') if self._keep_in_fp32_modules_strict is not None: for module in self._keep_in_fp32_modules_strict: if module not in unique_module_names: raise ValueError(f'{module} was specified in the `_keep_in_fp32_modules_strict` list, but is not part of the modules in {self.__class__.__name__}') self._pp_plan = self.config.base_model_pp_plan.copy() if self.config.base_model_pp_plan is not None else {} self._tp_plan = self.config.base_model_tp_plan.copy() if self.config.base_model_tp_plan is not None else {} self._ep_plan = self.config.base_model_ep_plan.copy() if self.config.base_model_ep_plan is not None else {} for name, module in self.named_children(): if (plan := getattr(module, '_ep_plan', None)): self._ep_plan.update({f'{name}.{k}': v for k, v in plan.copy().items()}) if (plan := getattr(module, '_tp_plan', None)): self._tp_plan.update({f'{name}.{k}': v for k, v in plan.copy().items()}) if (plan := getattr(module, '_pp_plan', None)): self._pp_plan.update({f'{name}.{k}': v for k, v in plan.copy().items()}) @property def tp_plan(self) -> dict[str, str]: """ The full tp plan for the model's modules """ if hasattr(self.config, 'distributed_config') and self.config.distributed_config.enable_expert_parallel: return self._ep_plan return self._tp_plan @property def pp_plan(self) -> dict[str, tuple[str, str]]: return self._pp_plan @tp_plan.setter def tp_plan(self, plan: dict[str, str]): if plan is not None: from .integrations.tensor_parallel import ALL_PARALLEL_STYLES for layer_pattern, parallel_style in plan.items(): if parallel_style not in ALL_PARALLEL_STYLES: raise ValueError(f"Unsupported tensor parallel style '{parallel_style}' for layer '{layer_pattern}'. Supported styles are {list(ALL_PARALLEL_STYLES.keys())}") if hasattr(self, 'named_parameters'): model_param_names = [name for name, _ in self.named_parameters()] if model_param_names: import re for layer_pattern in plan.keys(): regex_pattern = layer_pattern.replace('*', '\\d+') pattern_matched = False for param_name in model_param_names: if re.match(regex_pattern, param_name): pattern_matched = True break if not pattern_matched: pattern_parts = layer_pattern.split('.') flexible_matched = False for param_name in model_param_names: param_parts = param_name.split('.') if len(pattern_parts) <= len(param_parts): match_count = 0 for i, pattern_part in enumerate(pattern_parts): if pattern_part == '*': match_count += 1 elif i < len(param_parts) and pattern_part == param_parts[i]: match_count += 1 if match_count == len(pattern_parts): flexible_matched = True break if not flexible_matched: import warnings warnings.warn(f"Layer pattern '{layer_pattern}' does not match any parameters in the model. This rule may not be applied during tensor parallelization.") self._tp_plan = plan if plan is not None else {} @pp_plan.setter def pp_plan(self, plan: dict[str, tuple[str, str]]): self._pp_plan = plan def dequantize(self): """ Potentially dequantize the model in case it has been quantized by a quantization method that support dequantization. """ hf_quantizer = getattr(self, 'hf_quantizer', None) if hf_quantizer is None: raise ValueError('You need to first quantize your model in order to dequantize it') return hf_quantizer.dequantize(self) def _backward_compatibility_gradient_checkpointing(self): if self.supports_gradient_checkpointing and getattr(self.config, 'gradient_checkpointing', False): self.gradient_checkpointing_enable() delattr(self.config, 'gradient_checkpointing') def add_model_tags(self, tags: Union[list[str], str]) -> None: """ Add custom tags into the model that gets pushed to the Hugging Face Hub. Will not overwrite existing tags in the model. Args: tags (`Union[list[str], str]`): The desired tags to inject in the model Examples: ```python from transformers import AutoModel model = AutoModel.from_pretrained("google-bert/bert-base-cased") model.add_model_tags(["custom", "custom-bert"]) # Push the model to your namespace with the name "my-custom-bert". model.push_to_hub("my-custom-bert") ``` """ if isinstance(tags, str): tags = [tags] if self.model_tags is None: self.model_tags = [] for tag in tags: if tag not in self.model_tags: self.model_tags.append(tag) @classmethod @restore_default_dtype def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. Args: dtype (`torch.dtype`, *optional*): Override the default `dtype` and load the model under this dtype. """ dtype = kwargs.pop('dtype', config.dtype) if (torch_dtype := kwargs.pop('torch_dtype', None)) is not None: logger.warning_once('`torch_dtype` is deprecated! Use `dtype` instead!') dtype = dtype if dtype != config.dtype else torch_dtype if isinstance(dtype, str): dtype = getattr(torch, dtype) dtype_orig = None if dtype is not None: dtype_orig = cls._set_default_dtype(dtype) if 'attn_implementation' in kwargs: config._attn_implementation = kwargs.pop('attn_implementation') if is_deepspeed_zero3_enabled() and (not _is_quantized) and (not _is_ds_init_called): logger.info('Detected DeepSpeed ZeRO-3: activating zero.init() for this model') import deepspeed init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()] with ContextManagers(init_contexts): model = cls(config, **kwargs) else: model = cls(config, **kwargs) if dtype_orig is not None: torch.set_default_dtype(dtype_orig) return model @classmethod def _set_default_dtype(cls, dtype: torch.dtype) -> torch.dtype: """ Change the default dtype and return the previous one. This is needed when wanting to instantiate the model under specific dtype. Args: dtype (`torch.dtype`): a floating dtype to set to. Returns: `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was modified. If it wasn't, returns `None`. Note `set_default_dtype` currently only works with floating-point types and asserts if for example, `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception. """ if not dtype.is_floating_point: raise ValueError(f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype") logger.info(f'Instantiating {cls.__name__} model under default dtype {dtype}.') dtype_orig = torch.get_default_dtype() torch.set_default_dtype(dtype) return dtype_orig @property def base_model(self) -> nn.Module: """ `torch.nn.Module`: The main body of the model. """ return getattr(self, self.base_model_prefix, self) @classmethod def can_generate(cls) -> bool: """ Returns whether this model can generate sequences with `.generate()` from the `GenerationMixin`. Under the hood, on classes where this function returns True, some generation-specific changes are triggered: for instance, the model instance will have a populated `generation_config` attribute. Returns: `bool`: Whether this model can generate sequences with `.generate()`. """ if 'GenerationMixin' in str(cls.__bases__): return True for base in cls.__bases__: if not hasattr(base, 'can_generate'): continue if 'PreTrainedModel' not in str(base) and base.can_generate(): return True if hasattr(cls, 'prepare_inputs_for_generation'): logger.warning(f"{cls.__name__} has generative capabilities, as `prepare_inputs_for_generation` is explicitly defined. However, it doesn't directly inherit from `GenerationMixin`. From 👉v4.50👈 onwards, `PreTrainedModel` will NOT inherit from `GenerationMixin`, and this model will lose the ability to call `generate` and other related functions.\n - If you're using `trust_remote_code=True`, you can get rid of this warning by loading the model with an auto class. See https://huggingface.co/docs/transformers/en/model_doc/auto#auto-classes\n - If you are the owner of the model architecture code, please modify your model class such that it inherits from `GenerationMixin` (after `PreTrainedModel`, otherwise you'll get an exception).\n - If you are not the owner of the model architecture class, please contact the model code owner to update it.") return False def _flash_attn_2_can_dispatch(self, is_init_check: bool=False) -> bool: """ Check the availability of Flash Attention 2 for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. """ dtype = self.config.dtype if not (self._supports_flash_attn or getattr(self, '_supports_flash_attn_2', False)): raise ValueError(f'{self.__class__.__name__} does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co/{self.config._name_or_path}/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new') if not is_flash_attn_2_available(): preface = 'FlashAttention2 has been toggled on, but it cannot be used due to the following error:' install_message = 'Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2.' if is_torch_npu_available(): logger.info('Detect using FlashAttention2 on Ascend NPU.') return True if importlib.util.find_spec('flash_attn') is None: raise ImportError(f'{preface} the package flash_attn seems to be not installed. {install_message}') else: flash_attention_version = version.parse(importlib.metadata.version('flash_attn')) if torch.version.cuda: if flash_attention_version < version.parse('2.1.0'): raise ImportError(f'{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}') elif not torch.cuda.is_available(): raise ValueError(f'{preface} Flash Attention 2 is not available on CPU. Please make sure torch can access a CUDA device.') else: raise ImportError(f'{preface} Flash Attention 2 is not available. {install_message}') elif torch.version.hip: if flash_attention_version < version.parse('2.0.4'): raise ImportError(f'{preface} you need flash_attn package version to be greater or equal than 2.0.4. Detected version {flash_attention_version}. {install_message}') else: raise ImportError(f'{preface} Flash Attention 2 is not available. {install_message}') if dtype is None: logger.warning_once('You are attempting to use Flash Attention 2 without specifying a torch dtype. This might lead to unexpected behaviour') elif dtype is not None and dtype not in [torch.float16, torch.bfloat16]: logger.warning_once(f"""Flash Attention 2 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in {self.__class__.__name__} is {dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", dtype=torch.float16)`""") if not is_init_check: if getattr(self, 'use_bettertransformer', False): raise ValueError('Flash Attention 2 and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()') param_devices = list({param.device for param in self.parameters()}) if len(param_devices) == 1 and param_devices[0].type == 'cpu': if torch.cuda.is_available(): logger.warning_once("You are attempting to use Flash Attention 2 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`.") elif is_torch_mlu_available(): logger.warning_once("You are attempting to use Flash Attention 2 with a model not initialized on MLU. Make sure to move the model to MLU after initializing it on CPU with `model.to('mlu')`.") else: raise ValueError('You are attempting to use Flash Attention 2 with a model not initialized on GPU and with no GPU available. This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map or initialising the model on CPU and then moving it to GPU.') return True def _flash_attn_3_can_dispatch(self, is_init_check: bool=False) -> bool: """ Check the availability of Flash Attention 3 for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. """ dtype = self.config.dtype if not self._supports_flash_attn: raise ValueError(f'{self.__class__.__name__} does not support Flash Attention 3 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co/{self.config._name_or_path}/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new') if not is_flash_attn_3_available(): preface = 'FlashAttention3 has been toggled on, but it cannot be used due to the following error:' if importlib.util.find_spec('flash_attn_3') is None: raise ImportError(f'{preface} the package flash_attn_3 seems to be not installed.') if torch.cuda.is_available(): major, _ = torch.cuda.get_device_capability() if major < 9: raise ValueError(f'{preface} Flash Attention 3 requires compute capability >= 9.0, but found {torch.cuda.get_device_capability()} with compute capability {major}.0.') else: raise ImportError(f'{preface} Flash Attention 3 is not available.') else: raise ValueError(f'{preface} Flash Attention 3 is not available on CPU. Please make sure torch can access a CUDA device.') if dtype is None: logger.warning_once('You are attempting to use Flash Attention 3 without specifying a torch dtype. This might lead to unexpected behaviour') elif dtype is not None and dtype not in [torch.float16, torch.bfloat16]: logger.warning_once(f"""Flash Attention 3 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in {self.__class__.__name__} is {dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `dtype` argument. Example: `model = AutoModel.from_pretrained("meta-llama/Llama-3.2-1B", attn_implementation="flash_attention_3", dtype=torch.float16)`""") if getattr(self.config, 'alibi', False) or getattr(self.config, 'use_alibi', False): raise ValueError('Model is configured to use ALiBi, which is not supported by Flash Attention 3.') if hasattr(self.config, 'attention_dropout') and self.config.attention_dropout > 0: raise ValueError(f'Model has attention_dropout={self.config.attention_dropout}, which is not supported by Flash Attention 3.') if not is_init_check: param_devices = list({param.device for param in self.parameters()}) if len(param_devices) == 1 and param_devices[0].type == 'cpu': if torch.cuda.is_available(): logger.warning_once("You are attempting to use Flash Attention 3 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`.") else: raise ValueError('You are attempting to use Flash Attention 3 with a model not initialized on GPU and with no GPU available. This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map or initialising the model on CPU and then moving it to GPU.') return True def _sdpa_can_dispatch(self, is_init_check: bool=False) -> bool: """ Check the availability of SDPA for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. """ if not self._supports_sdpa: raise ValueError(f'{self.__class__.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`') if torch.version.hip is not None and torch.cuda.device_count() > 1 and (version.parse(torch.__version__) < version.parse('2.4.1')): logger.warning_once('Using the `SDPA` attention implementation on multi-gpu setup with ROCM may lead to performance issues due to the FA backend. Disabling it to use alternative backends.') torch.backends.cuda.enable_flash_sdp(False) if not is_init_check: if getattr(self, 'use_bettertransformer', False): raise ValueError('SDPA and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()') return True def _flex_attn_can_dispatch(self, is_init_check: bool=False) -> bool: """ Check the availability of Flex Attention for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. """ if not self._supports_flex_attn: raise ValueError(f"""{self.__class__.__name__} does not support an attention implementation through torch's flex_attention. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/34809. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`""") if not is_torch_flex_attn_available(): raise ImportError('PyTorch Flex Attention requirements in Transformers are not met. Please install torch>=2.5.0.') if not is_init_check: if getattr(self, 'use_bettertransformer', False): raise ValueError('FlexAttention and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()') return True def _check_and_adjust_attn_implementation(self, attn_implementation: Optional[str], is_init_check: bool=False) -> str: """ Check that the `attn_implementation` exists and is supported by the models, and try to get the kernel from hub if it matches hf kernels pattern. Args: attn_implementation (`str` or `None`): The attention implementation to check for existence/validity. is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. Returns: `str`: The final attention implementation to use, including potential fallbacks from sdpa to eager, or from None to sdpa (to potentially eager). """ applicable_attn_implementation = attn_implementation if applicable_attn_implementation == 'flash_attention_2' and self._supports_flash_attn and (not is_flash_attn_2_available()) and is_kernels_available(): applicable_attn_implementation = 'kernels-community/flash-attn' if is_kernel(applicable_attn_implementation): try: load_and_register_kernel(applicable_attn_implementation) if attn_implementation == 'flash_attention_2': logger.warning_once('You do not have `flash_attn` installed, using `kernels-community/flash-attn` from the `kernels` library instead!') except Exception as e: if attn_implementation == 'flash_attention_2': self._flash_attn_2_can_dispatch() logger.warning_once(f'Could not find a kernel matching `{applicable_attn_implementation}` compatible with your device in the hub:\n{e}.\nUsing default attention implementation instead (sdpa if available, eager otherwise).') try: self._sdpa_can_dispatch(is_init_check) applicable_attn_implementation = 'sdpa' except (ValueError, ImportError) as e: applicable_attn_implementation = 'eager' else: applicable_attn_implementation = self.get_correct_attn_implementation(applicable_attn_implementation, is_init_check) if applicable_attn_implementation.startswith('flash_attention'): lazy_import_flash_attention(applicable_attn_implementation) return applicable_attn_implementation def get_correct_attn_implementation(self, requested_attention: Optional[str], is_init_check: bool=False) -> str: applicable_attention = 'sdpa' if requested_attention is None else requested_attention if applicable_attention not in ['eager'] + ALL_ATTENTION_FUNCTIONS.valid_keys(): message = f'Specified `attn_implementation="{applicable_attention}"` is not supported. The only possible arguments are `attn_implementation="eager"`' if self._supports_flash_attn or getattr(self, '_supports_flash_attn_2', False): message += ', `"attn_implementation=flash_attention_3"`, `"attn_implementation=flash_attention_2"`' if self._supports_sdpa: message += ', `"attn_implementation=sdpa"' if self._supports_flex_attn: message += ', `"attn_implementation=flex_attention"`' raise ValueError(message + '.') if applicable_attention == 'flash_attention_2': self._flash_attn_2_can_dispatch(is_init_check) elif applicable_attention == 'flash_attention_3': self._flash_attn_3_can_dispatch(is_init_check) elif applicable_attention == 'flex_attention': self._flex_attn_can_dispatch(is_init_check) elif applicable_attention == 'sdpa': try: self._sdpa_can_dispatch(is_init_check) except (ValueError, ImportError) as e: if requested_attention == 'sdpa': raise e applicable_attention = 'eager' return applicable_attention @classmethod def _can_set_attn_implementation(cls) -> bool: """Detect whether the class supports setting its attention implementation dynamically. It is an ugly check based on opening the file, but avoids maintaining yet another property flag. """ class_file = sys.modules[cls.__module__].__file__ with open(class_file, 'r') as f: code = f.read() if re.search('class \\w+Attention\\(nn.Module\\)', code): return 'eager_attention_forward' in code and 'ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]' in code else: return True def set_attn_implementation(self, attn_implementation: Union[str, dict]): """ Set the requested `attn_implementation` for this model. Args: attn_implementation (`str` or `dict`): The attention implementation to set for this model. It can be either a `str`, in which case it will be dispatched to all submodels if relevant, or a `dict` where keys are the sub_configs name, in which case each submodel will dispatch the corresponding value. """ requested_implementation = attn_implementation if not isinstance(attn_implementation, dict) else attn_implementation.get('', self.config._attn_implementation) if requested_implementation != self.config._attn_implementation: if not self._can_set_attn_implementation(): logger.warning(f'{self.__class__.__name__} does not support setting its attention implementation dynamically, because it does not follow the functional approach based on AttentionInterface (see https://huggingface.co/docs/transformers/en/attention_interface)') else: requested_implementation = self._check_and_adjust_attn_implementation(requested_implementation, is_init_check=False) self.config._attn_implementation_internal = requested_implementation for submodule in self.modules(): if submodule is not self and isinstance(submodule, PreTrainedModel) and (submodule.config.__class__ != self.config.__class__) and (not hasattr(submodule.config, '_attn_was_changed')): if not submodule._can_set_attn_implementation(): logger.warning(f'{submodule.__class__.__name__} does not support setting its attention implementation dynamically, because it does not follow the functional approach based on AttentionInterface (see https://huggingface.co/docs/transformers/en/attention_interface)') else: sub_implementation = requested_implementation if isinstance(attn_implementation, dict): for subconfig_key in self.config.sub_configs: if getattr(self.config, subconfig_key) is submodule.config: sub_implementation = attn_implementation.get(subconfig_key, submodule.config._attn_implementation) break sub_implementation = submodule.get_correct_attn_implementation(sub_implementation) submodule.config._attn_implementation_internal = sub_implementation submodule.config._attn_was_changed = True for subconfig_key in self.config.sub_configs: subconfig = getattr(self.config, subconfig_key) sub_implementation = requested_implementation if not isinstance(attn_implementation, dict) else attn_implementation.get(subconfig_key, subconfig._attn_implementation) if not hasattr(subconfig, '_attn_was_changed') and sub_implementation != subconfig._attn_implementation: if sub_implementation not in ['eager'] + ALL_ATTENTION_FUNCTIONS.valid_keys(): raise ValueError(f'Specified `attn_implementation="{sub_implementation}"` is not supported for {subconfig_key}. The only possible arguments are "eager" (manual attention implementation)or one of the following: {list(ALL_ATTENTION_FUNCTIONS.valid_keys())}') subconfig._attn_implementation_internal = sub_implementation logger.warning(f'We set the attention implementation for the sub-config `{subconfig_key}` to `{sub_implementation}` without finding the associated sub-model. For this reason we could not check if the model supports it. You may encounter undefined behavior.') elif hasattr(subconfig, '_attn_was_changed'): del subconfig._attn_was_changed def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) def disable_input_require_grads(self): """ Removes the `_require_grads_hook`. """ self._require_grads_hook.remove() def get_decoder(self): """ Best-effort lookup of the *decoder* module. Order of attempts (covers ~85 % of current usages): 1. `self.decoder` 2. `self.model` (many wrappers store the decoder here) 3. `self.model.get_decoder()` (nested wrappers) 4. fallback: raise for the few exotic models that need a bespoke rule """ if hasattr(self, 'decoder'): return self.decoder if hasattr(self, 'model'): inner = self.model if hasattr(inner, 'get_decoder') and type(inner) is not type(self): return inner.get_decoder() return inner return self def set_decoder(self, decoder): """ Symmetric setter. Mirrors the lookup logic used in `get_decoder`. """ if hasattr(self, 'decoder'): self.decoder = decoder return if hasattr(self, 'model'): inner = self.model if hasattr(inner, 'set_decoder'): inner.set_decoder(decoder) else: self.model = decoder return return def _init_weights(self, module): """ Initialize the weights. This is quite general on purpose, in the spirit of what we usually do. For more complex initialization scheme, it should be overridden by the derived `PreTrainedModel` class. In case a model adds an explicit `nn.Parameter`, this method should also be overridden in order to initialize it correctly. """ if hasattr(self.config, 'initializer_range'): std = self.config.initializer_range else: std = getattr(self.config.get_text_config(), 'initializer_range', 0.02) if isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.MultiheadAttention): module._reset_parameters() elif isinstance(module, (nn.GroupNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)) or 'LayerNorm' in module.__class__.__name__ or 'RMSNorm' in module.__class__.__name__: if hasattr(module, 'weight') and module.weight is not None: module.weight.data.fill_(1.0) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() def _initialize_weights(self, module): """ Initialize the weights if they are not already initialized. """ if getattr(module, '_is_hf_initialized', False): return self._init_weights(module) module._is_hf_initialized = True @torch.no_grad() def initialize_weights(self): """ This is equivalent to calling `self.apply(self._initialize_weights)`, but correctly handles composite models. This function dynamically dispatches the correct `init_weights` function to the modules as we advance in the module graph along the recursion. It can handle an arbitrary number of sub-models. Without it, every composite model would have to recurse a second time on all sub-models explicitly in the outer-most `_init_weights`, which is extremely error prone and inefficient. Note that the `torch.no_grad()` decorator is very important as well, as most of our `_init_weights` do not use `torch.nn.init` functions (which are all no_grad by default), but simply do in-place ops such as `module.weight.data.zero_()`. """ if not hasattr(torch.nn.Module, 'smart_apply'): def smart_apply(self, fn): for module in self.children(): if isinstance(module, PreTrainedModel): module.smart_apply(module._initialize_weights) else: module.smart_apply(fn) fn(self) return self torch.nn.Module.smart_apply = smart_apply self.smart_apply(self._initialize_weights) def tie_embeddings_and_encoder_decoder(self): """ If set in the config, tie the weights between the input embeddings and the output embeddings, and the encoder and decoder. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ if getattr(self.config.get_text_config(decoder=True), 'tie_word_embeddings', True): output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) if getattr(self.config, 'is_encoder_decoder', False) and getattr(self.config, 'tie_encoder_decoder', False): if hasattr(self, self.base_model_prefix): self = getattr(self, self.base_model_prefix) tied_weights = self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix, 'encoder') self._dynamic_tied_weights_keys = tied_weights def tie_weights(self): """ Recursively (for all submodels) tie all the weights of the model. """ for module in self.modules(): if isinstance(module, PreTrainedModel): module.tie_embeddings_and_encoder_decoder() if hasattr(module, '_tie_weights'): module._tie_weights() @staticmethod def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, base_encoder_name: str): uninitialized_encoder_weights: list[str] = [] tied_weights: list[str] = [] if decoder.__class__ != encoder.__class__: logger.info(f'{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized.') def tie_encoder_to_decoder_recursively(decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, base_encoder_name: str, uninitialized_encoder_weights: list[str], depth=0, total_decoder_name='', total_encoder_name=''): assert isinstance(decoder_pointer, nn.Module) and isinstance(encoder_pointer, nn.Module), f'{decoder_pointer} and {encoder_pointer} have to be of type nn.Module' if hasattr(decoder_pointer, 'weight'): assert hasattr(encoder_pointer, 'weight') encoder_pointer.weight = decoder_pointer.weight tied_weights.append(f'{base_encoder_name}{total_encoder_name}.weight') if hasattr(decoder_pointer, 'bias'): assert hasattr(encoder_pointer, 'bias') tied_weights.append(f'{base_encoder_name}{total_encoder_name}.bias') encoder_pointer.bias = decoder_pointer.bias return encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if len(decoder_modules) > 0: assert len(encoder_modules) > 0, f'Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}' all_encoder_weights = {module_name + '/' + sub_name for sub_name in encoder_modules} encoder_layer_pos = 0 for name in decoder_modules: if name.isdigit(): encoder_name = str(int(name) + encoder_layer_pos) decoder_name = name if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(encoder_modules) != len(decoder_modules): encoder_layer_pos -= 1 continue elif name not in encoder_modules: continue elif depth > 500: raise ValueError('Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model.') else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively(decoder_modules[decoder_name], encoder_modules[encoder_name], module_name + '/' + name, base_encoder_name, uninitialized_encoder_weights, depth=depth + 1, total_encoder_name=f'{total_encoder_name}.{encoder_name}', total_decoder_name=f'{total_decoder_name}.{decoder_name}') all_encoder_weights.remove(module_name + '/' + encoder_name) uninitialized_encoder_weights += list(all_encoder_weights) tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, base_encoder_name, uninitialized_encoder_weights) if len(uninitialized_encoder_weights) > 0: logger.warning(f'The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}') return tied_weights def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """Tie or clone module weights depending of whether we are using TorchScript or not""" if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if hasattr(input_embeddings, '_is_hooked') and getattr(input_embeddings, '_hf_tp_plan', None): output_embeddings._is_hooked = input_embeddings._is_hooked output_embeddings._hf_tp_plan = input_embeddings._hf_tp_plan output_embeddings._forward_hooks = input_embeddings._forward_hooks output_embeddings._forward_pre_hooks = input_embeddings._forward_pre_hooks output_embeddings.__repr__ = lambda: f'{output_embeddings.__repr__()}\nTP Plan: {output_embeddings._hf_tp_plan}' if getattr(output_embeddings, 'bias', None) is not None: output_embeddings.bias.data = nn.functional.pad(output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), 'constant', 0) if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'): output_embeddings.out_features = input_embeddings.num_embeddings def _get_no_split_modules(self, device_map: str): """ Get the modules of the model that should not be spit when using device_map. We iterate through the modules to get the underlying `_no_split_modules`. Args: device_map (`str`): The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"] Returns: `list[str]`: List of modules that should not be split """ _no_split_modules = set() modules_to_check = [self] while len(modules_to_check) > 0: module = modules_to_check.pop(-1) if module.__class__.__name__ not in _no_split_modules: if isinstance(module, PreTrainedModel): if module._no_split_modules is None: raise ValueError(f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model class needs to implement the `_no_split_modules` attribute.") else: _no_split_modules = _no_split_modules | set(module._no_split_modules) modules_to_check += list(module.children()) return list(_no_split_modules) def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The new number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(model_embeds.weight, modifier_rank=None): vocab_size = model_embeds.weight.shape[0] else: vocab_size = model_embeds.weight.shape[0] self.config.get_text_config().vocab_size = vocab_size self.vocab_size = vocab_size self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of, mean_resizing) if hasattr(old_embeddings, '_hf_hook'): hook = old_embeddings._hf_hook add_hook_to_module(new_embeddings, hook) old_embeddings_requires_grad = old_embeddings.weight.requires_grad new_embeddings.requires_grad_(old_embeddings_requires_grad) self.set_input_embeddings(new_embeddings) is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if pad_to_multiple_of is not None: if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): new_num_tokens = new_embeddings.weight.shape[0] else: new_num_tokens = new_embeddings.weight.shape[0] if self.get_output_embeddings() is not None and (not self.config.get_text_config(decoder=True).tie_word_embeddings): old_lm_head = self.get_output_embeddings() if isinstance(old_lm_head, torch.nn.Embedding): new_lm_head = self._get_resized_embeddings(old_lm_head, new_num_tokens, mean_resizing=mean_resizing) else: new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens, mean_resizing=mean_resizing) if hasattr(old_lm_head, '_hf_hook'): hook = old_lm_head._hf_hook add_hook_to_module(new_lm_head, hook) old_lm_head_requires_grad = old_lm_head.weight.requires_grad new_lm_head.requires_grad_(old_lm_head_requires_grad) self.set_output_embeddings(new_lm_head) return self.get_input_embeddings() def _get_resized_embeddings(self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (`torch.nn.Embedding`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` """ if pad_to_multiple_of is not None: if not isinstance(pad_to_multiple_of, int): raise ValueError(f'Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer') if new_num_tokens is None: new_num_tokens = old_embeddings.weight.shape[0] new_num_tokens = (new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of * pad_to_multiple_of else: logger.info(f'You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc') if new_num_tokens is None: return old_embeddings is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): old_num_tokens, old_embedding_dim = old_embeddings.weight.size() else: old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens and (not is_deepspeed_zero3_enabled()): return old_embeddings if not isinstance(old_embeddings, nn.Embedding): raise TypeError(f'Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}.') new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim, device=old_embeddings.weight.device, dtype=old_embeddings.weight.dtype) if new_num_tokens > old_num_tokens and (not mean_resizing): self._init_weights(new_embeddings) elif new_num_tokens > old_num_tokens and mean_resizing: logger.warning_once("The new embeddings will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. To disable this, use `mean_resizing=False`") added_num_tokens = new_num_tokens - old_num_tokens if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters([old_embeddings.weight], modifier_rank=None): self._init_added_embeddings_weights_with_mean(old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens) else: self._init_added_embeddings_weights_with_mean(old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens) n = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] else: new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): old_embeddings.weight = new_embeddings.weight old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] if old_embeddings.padding_idx is not None and new_num_tokens - 1 < old_embeddings.padding_idx: old_embeddings.padding_idx = None else: old_embeddings.weight.data = new_embeddings.weight.data old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] if old_embeddings.padding_idx is not None and new_num_tokens - 1 < old_embeddings.padding_idx: old_embeddings.padding_idx = None return old_embeddings def _get_resized_lm_head(self, old_lm_head: nn.Linear, new_num_tokens: Optional[int]=None, transposed: Optional[bool]=False, mean_resizing: bool=True) -> nn.Linear: """ Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head (`torch.nn.Linear`): Old lm head liner layer to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, vocab_size` else `vocab_size, lm_head_dim`. mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is `None` """ if new_num_tokens is None: return old_lm_head is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): old_num_tokens, old_lm_head_dim = old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() else: old_num_tokens, old_lm_head_dim = old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() if old_num_tokens == new_num_tokens and (not is_deepspeed_zero3_enabled()): return old_lm_head if not isinstance(old_lm_head, nn.Linear): raise TypeError(f'Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You should either use a different resize function or make sure that `old_lm_head` are an instance of {nn.Linear}.') new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) has_new_lm_head_bias = old_lm_head.bias is not None new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias, device=old_lm_head.weight.device, dtype=old_lm_head.weight.dtype) if new_num_tokens > old_num_tokens and (not mean_resizing): self._init_weights(new_lm_head) elif new_num_tokens > old_num_tokens and mean_resizing: logger.warning_once("The new lm_head weights will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. To disable this, use `mean_resizing=False`") added_num_tokens = new_num_tokens - old_num_tokens if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed params = [old_lm_head.weight] if has_new_lm_head_bias: params += [old_lm_head.bias] with deepspeed.zero.GatheredParameters(params, modifier_rank=None): self._init_added_lm_head_weights_with_mean(old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed) if has_new_lm_head_bias: self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens) else: self._init_added_lm_head_weights_with_mean(old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed) if has_new_lm_head_bias: self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): self._copy_lm_head_original_to_resized(new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias) else: self._copy_lm_head_original_to_resized(new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias) return new_lm_head def _init_added_embeddings_weights_with_mean(self, old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens): old_embeddings_weight = old_embeddings.weight.data.to(torch.float32) mean_embeddings = torch.mean(old_embeddings_weight, axis=0) old_centered_embeddings = old_embeddings_weight - mean_embeddings covariance = old_centered_embeddings.T @ old_centered_embeddings / old_num_tokens epsilon = 1e-09 is_covariance_psd = constraints.positive_definite.check(epsilon * covariance).all() if is_covariance_psd: distribution = torch.distributions.multivariate_normal.MultivariateNormal(mean_embeddings, covariance_matrix=epsilon * covariance) new_embeddings.weight.data[-1 * added_num_tokens:, :] = distribution.sample(sample_shape=(added_num_tokens,)).to(old_embeddings.weight.dtype) else: new_embeddings.weight.data[-1 * added_num_tokens:, :] = mean_embeddings[None, :].repeat(added_num_tokens, 1).to(old_embeddings.weight.dtype) def _init_added_lm_head_weights_with_mean(self, old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed=False): if transposed: new_lm_head.weight.data = new_lm_head.weight.data.T old_lm_head.weight.data = old_lm_head.weight.data.T self._init_added_embeddings_weights_with_mean(old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens) if transposed: new_lm_head.weight.data = new_lm_head.weight.data.T old_lm_head.weight.data = old_lm_head.weight.data.T def _init_added_lm_head_bias_with_mean(self, old_lm_head, new_lm_head, added_num_tokens): bias_mean = torch.mean(old_lm_head.bias.data, axis=0, dtype=torch.float32) bias_std = torch.std(old_lm_head.bias.data, axis=0).to(torch.float32) new_lm_head.bias.data[-1 * added_num_tokens:].normal_(mean=bias_mean, std=1e-09 * bias_std) def _copy_lm_head_original_to_resized(self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias): if not transposed: new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] else: new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] if has_new_lm_head_bias: new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] def resize_position_embeddings(self, new_num_position_embeddings: int): raise NotImplementedError(f'`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`') def get_position_embeddings(self) -> Union[nn.Embedding, tuple[nn.Embedding]]: raise NotImplementedError(f'`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`') def init_weights(self): """ If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any initialization logic in `_init_weights`. """ if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) if _init_weights: self.initialize_weights() self.tie_weights() def prune_heads(self, heads_to_prune: dict[int, list[int]]): """ Prunes heads of the base model. Arguments: heads_to_prune (`dict[int, list[int]]`): Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) self.base_model._prune_heads(heads_to_prune) def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): """ Activates gradient checkpointing for the current model. We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 Args: gradient_checkpointing_kwargs (dict, *optional*): Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function. """ if not self.supports_gradient_checkpointing: raise ValueError(f'{self.__class__.__name__} does not support gradient checkpointing.') if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {'use_reentrant': True} gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) _is_using_old_format = 'value' in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) else: self.apply(partial(self._set_gradient_checkpointing, value=True)) logger.warning('You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it).Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model.') if getattr(self, '_hf_peft_config_loaded', False): self.enable_input_require_grads() def _set_gradient_checkpointing(self, enable: bool=True, gradient_checkpointing_func: Callable=checkpoint): is_gradient_checkpointing_set = False if hasattr(self, 'gradient_checkpointing'): self._gradient_checkpointing_func = gradient_checkpointing_func self.gradient_checkpointing = enable is_gradient_checkpointing_set = True for module in self.modules(): if hasattr(module, 'gradient_checkpointing'): module._gradient_checkpointing_func = gradient_checkpointing_func module.gradient_checkpointing = enable is_gradient_checkpointing_set = True if not is_gradient_checkpointing_set: raise ValueError(f'{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute `gradient_checkpointing` to modules of the model that uses checkpointing.') def gradient_checkpointing_disable(self): """ Deactivates gradient checkpointing for the current model. """ if self.supports_gradient_checkpointing: _is_using_old_format = 'value' in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=False) else: logger.warning('You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it).Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model.') self.apply(partial(self._set_gradient_checkpointing, value=False)) if getattr(self, '_hf_peft_config_loaded', False): self.disable_input_require_grads() @property def is_gradient_checkpointing(self) -> bool: """ Whether gradient checkpointing is activated for this model or not. """ return any((hasattr(m, 'gradient_checkpointing') and m.gradient_checkpointing for m in self.modules())) def save_pretrained(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]='5GB', safe_serialization: bool=True, variant: Optional[str]=None, token: Optional[Union[str, bool]]=None, save_peft_format: bool=True, **kwargs): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~PreTrainedModel.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. state_dict (nested dictionary of `torch.Tensor`): The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism). save_function (`Callable`): The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). We default it to 5GB in order for models to be able to run easily on free-tier google colab instances without CPU OOM issues. <Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip> safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model.<variant>.bin. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). save_peft_format (`bool`, *optional*, defaults to `True`): For backward compatibility with PEFT library, in case adapter weights are attached to the model, all keys of the state dict of adapters needs to be prepended with `base_model.model`. Advanced users can disable this behaviours by setting `save_peft_format` to `False`. kwargs (`dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop('use_auth_token', None) ignore_metadata_errors = kwargs.pop('ignore_metadata_errors', False) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if token is not None: kwargs['token'] = token _hf_peft_config_loaded = getattr(self, '_hf_peft_config_loaded', False) hf_quantizer = getattr(self, 'hf_quantizer', None) quantization_serializable = hf_quantizer is not None and isinstance(hf_quantizer, HfQuantizer) and hf_quantizer.is_serializable(safe_serialization=safe_serialization) if hf_quantizer is not None and (not _hf_peft_config_loaded) and (not quantization_serializable): raise ValueError(f'The model is quantized with {hf_quantizer.quantization_config.quant_method} and is not serializable - check out the warnings from the logger on the traceback to understand the reason why the quantized model is not serializable.') if 'save_config' in kwargs: warnings.warn('`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead.') is_main_process = kwargs.pop('save_config') if safe_serialization and (not is_safetensors_available()): raise ImportError('`safe_serialization` requires the `safetensors library: `pip install safetensors`.') if self._tp_size is not None and (not is_huggingface_hub_greater_or_equal('0.31.4')): raise ImportError('Saving a model with tensor parallelism requires `huggingface_hub` version 0.31.4 or higher.') if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) create_pr = kwargs.pop('create_pr', False) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) metadata = {} if hf_quantizer is not None: state_dict, metadata = hf_quantizer.get_state_dict_and_metadata(self, safe_serialization) metadata['format'] = 'pt' model_to_save = unwrap_model(self) dtype = get_parameter_dtype(model_to_save) model_to_save.config.dtype = str(dtype).split('.')[1] model_to_save.config.architectures = [model_to_save.__class__.__name__.removeprefix('FSDP')] if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) if is_main_process: if not _hf_peft_config_loaded: misplaced_generation_parameters = model_to_save.config._get_non_default_generation_parameters() if self.can_generate() and len(misplaced_generation_parameters) > 0: warnings.warn(f"Moving the following attributes in the config to the generation config: {misplaced_generation_parameters}. You are seeing this warning because you've set generation parameters in the model config, as opposed to in the generation config.", UserWarning) for param_name, param_value in misplaced_generation_parameters.items(): setattr(model_to_save.generation_config, param_name, param_value) setattr(model_to_save.config, param_name, None) model_to_save.config.save_pretrained(save_directory) if self.can_generate(): model_to_save.generation_config.save_pretrained(save_directory) if _hf_peft_config_loaded: logger.info('Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved.') state_dict = model_to_save.get_adapter_state_dict(state_dict=state_dict) if save_peft_format: logger.info('To match the expected format of the PEFT library, all keys of the state dict of adapters will be prepended with `base_model.model`.') peft_state_dict = {} for key, value in state_dict.items(): peft_state_dict[f'base_model.model.{key}'] = value state_dict = peft_state_dict active_adapter = self.active_adapters() if len(active_adapter) > 1: raise ValueError('Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`') active_adapter = active_adapter[0] current_peft_config = self.peft_config[active_adapter] current_peft_config.save_pretrained(save_directory) module_map = {} if state_dict is None: if hasattr(self, 'hf_device_map') and len(set(self.hf_device_map.values())) > 1 and ('cpu' in self.hf_device_map.values() or 'disk' in self.hf_device_map.values()): warnings.warn('Attempting to save a model with offloaded modules. Ensure that unallocated cpu memory exceeds the `shard_size` (5GB default)') for name, module in model_to_save.named_modules(): if name == '': continue module_state_dict = module.state_dict() for key in module_state_dict: module_map[name + f'.{key}'] = module state_dict = model_to_save.state_dict() if any((allowed_name in class_name.__name__.lower() for class_name in self.__class__.__mro__[:-1] for allowed_name in VLMS)): reverse_key_mapping = {v: k for k, v in self._checkpoint_conversion_mapping.items()} original_state_dict = {} for key, value in state_dict.items(): for pattern, replacement in reverse_key_mapping.items(): replacement = replacement.lstrip('^') replacement = re.sub('\\(.*\\)', '', replacement) key, n_replace = re.subn(pattern, replacement, key) if n_replace > 0: break original_state_dict[key] = value state_dict = original_state_dict if IS_SAGEMAKER_MP_POST_1_10: for smp_to_hf, _ in smp.state.module_manager.translate_functions: state_dict = smp_to_hf(state_dict) if self._keys_to_ignore_on_save is not None: for ignore_key in self._keys_to_ignore_on_save: if ignore_key in state_dict: del state_dict[ignore_key] state_dict = self._fix_state_dict_keys_on_save(state_dict) if self._tp_size is not None: state_dict = replace_state_dict_local_with_dtensor(state_dict, self._tp_plan, self._device_mesh) if safe_serialization: ptrs = collections.defaultdict(list) for name, tensor in state_dict.items(): if not isinstance(tensor, torch.Tensor): ptrs[id(tensor)].append(name) elif tensor.device.type == 'meta': tensor = self.get_parameter(name) ptrs[id(tensor)].append(name) else: ptrs[id_tensor_storage(tensor)].append(name) shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} _tied_weights_keys = _get_tied_weight_keys(self) error_names = [] to_delete_names = set() for names in shared_ptrs.values(): if _tied_weights_keys is not None: found = 0 for name in sorted(names): matches_pattern = any((re.search(pat, name) for pat in _tied_weights_keys)) if matches_pattern and name in state_dict: found += 1 if found < len(names): to_delete_names.add(name) shared_names, disjoint_names = _find_disjoint(shared_ptrs.values(), state_dict) for name in disjoint_names: state_dict[name] = state_dict[name].clone() shared_names, identical_names = _find_identical(shared_names, state_dict) for inames in identical_names: known = inames.intersection(to_delete_names) for name in known: del state_dict[name] unknown = inames.difference(to_delete_names) if len(unknown) > 1: error_names.append(unknown) if shared_names: error_names.extend(shared_names) if len(error_names) > 0: raise RuntimeError(f'The weights trying to be saved contained shared tensors {error_names} that are mismatching the transformers base configuration. Try saving using `safe_serialization=False`, setting the `_dynamic_tied_weights_keys` attribute for affected modules, or remove this tensor sharing.') if not _hf_peft_config_loaded: weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME weights_name = _add_variant(weights_name, variant) else: weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME filename_pattern = weights_name.replace('.bin', '{suffix}.bin').replace('.safetensors', '{suffix}.safetensors') state_dict_split = split_torch_state_dict_into_shards(state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size) index = None if state_dict_split.is_sharded: index = {'metadata': {'total_parameters': self.num_parameters(), **state_dict_split.metadata}, 'weight_map': state_dict_split.tensor_to_filename} for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) weights_no_suffix = weights_name.replace('.bin', '').replace('.safetensors', '') filename_no_suffix = filename.replace('.bin', '').replace('.safetensors', '') reg = re.compile('(.*?)-\\d{5}-of-\\d{5}') if filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and (filename not in state_dict_split.filename_to_tensors) and is_main_process and (reg.fullmatch(filename_no_suffix) is not None): os.remove(full_filename) filename_to_tensors = state_dict_split.filename_to_tensors.items() if module_map: filename_to_tensors = logging.tqdm(filename_to_tensors, desc='Saving checkpoint shards') for shard_file, tensors in filename_to_tensors: shard = {} for tensor in tensors: if _is_dtensor_available and isinstance(state_dict[tensor], DTensor): full_tensor = state_dict[tensor].full_tensor() if _get_parameter_tp_plan(tensor, self._tp_plan) in ('local_packed_rowwise',): full_tensor = repack_weights(full_tensor, -1, self._tp_size, 2) shard[tensor] = full_tensor.contiguous() else: shard[tensor] = state_dict[tensor].contiguous() del state_dict[tensor] if module_map: if accelerate_version < version.parse('0.31'): raise ImportError(f'You need accelerate version to be greater or equal than 0.31 to save models with offloaded parameters. Detected version {accelerate_version}. Please upgrade accelerate with `pip install -U accelerate`') shard_state_dict = dict.fromkeys(shard, '') for module_name in shard: tensor = shard_state_dict[module_name] if tensor == '' or (isinstance(tensor, torch.Tensor) and tensor.device.type == 'meta'): module = module_map[module_name] shard_state_dict = get_state_dict_from_offload(module, module_name, shard_state_dict) shard = shard_state_dict del shard_state_dict gc.collect() if safe_serialization: safe_save_file(shard, os.path.join(save_directory, shard_file), metadata=metadata) else: save_function(shard, os.path.join(save_directory, shard_file)) del state_dict if index is None: path_to_weights = os.path.join(save_directory, weights_name) logger.info(f'Model weights saved in {path_to_weights}') else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) with open(save_index_file, 'w', encoding='utf-8') as f: content = json.dumps(index, indent=2, sort_keys=True) + '\n' f.write(content) logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the index located at {save_index_file}.') if push_to_hub: model_card = create_and_tag_model_card(repo_id, self.model_tags, token=token, ignore_metadata_errors=ignore_metadata_errors) model_card.save(os.path.join(save_directory, 'README.md')) self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr) @wraps(PushToHubMixin.push_to_hub) def push_to_hub(self, *args, **kwargs): tags = self.model_tags if self.model_tags is not None else [] tags_kwargs = kwargs.get('tags', []) if isinstance(tags_kwargs, str): tags_kwargs = [tags_kwargs] for tag in tags_kwargs: if tag not in tags: tags.append(tag) if tags: kwargs['tags'] = tags return super().push_to_hub(*args, **kwargs) def get_memory_footprint(self, return_buffers=True): """ Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2 Arguments: return_buffers (`bool`, *optional*, defaults to `True`): Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 """ mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) if return_buffers: mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) mem = mem + mem_bufs return mem @wraps(torch.nn.Module.cuda) def cuda(self, *args, **kwargs): if getattr(self, 'quantization_method', None) == QuantizationMethod.HQQ: from hqq.core.quantize import HQQLinear super().cuda(*args, **kwargs) for module in self.modules(): if isinstance(module, HQQLinear): if len(args) > 0: device = args[0] else: device = kwargs.get('device', 'cuda') module.cuda(device) return self if getattr(self, 'quantization_method', None) == QuantizationMethod.BITS_AND_BYTES: if getattr(self, 'is_loaded_in_8bit', False): raise ValueError('Calling `cuda()` is not supported for `8-bit` quantized models. Please use the model as it is, since the model has already been set to the correct devices.') elif version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.43.2'): raise ValueError(f'Calling `cuda()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2.') return super().cuda(*args, **kwargs) @wraps(torch.nn.Module.to) def to(self, *args, **kwargs): dtype_present_in_args = 'dtype' in kwargs if not dtype_present_in_args: for arg in args: if isinstance(arg, torch.dtype): dtype_present_in_args = True break if getattr(self, 'quantization_method', None) == QuantizationMethod.HQQ: from hqq.core.quantize import HQQLinear super().to(*args, **kwargs) for module in self.modules(): if isinstance(module, HQQLinear): if 'device' in kwargs: device = kwargs['device'] else: device = args[0] if 'dtype' in kwargs: dtype = kwargs['dtype'] elif dtype_present_in_args: dtype = arg else: dtype = None if dtype is not None: module.compute_dtype = dtype module.cuda(device) return self if dtype_present_in_args and getattr(self, 'quantization_method', None) == QuantizationMethod.QUARK: raise ValueError('Casting a Quark quantized model to a new `dtype` is not supported.') if getattr(self, 'quantization_method', None) == QuantizationMethod.BITS_AND_BYTES: if dtype_present_in_args: raise ValueError('You cannot cast a bitsandbytes model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired `dtype` by passing the correct `dtype` argument.') if getattr(self, 'is_loaded_in_8bit', False): raise ValueError('`.to` is not supported for `8-bit` bitsandbytes models. Please use the model as it is, since the model has already been set to the correct devices and casted to the correct `dtype`.') elif version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.43.2'): raise ValueError(f'Calling `to()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2.') elif getattr(self, 'quantization_method', None) == QuantizationMethod.GPTQ: if dtype_present_in_args: raise ValueError('You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired `dtype` by passing the correct `dtype` argument.') return super().to(*args, **kwargs) def half(self, *args): if getattr(self, 'is_quantized', False): raise ValueError('`.half()` is not supported for quantized model. Please use the model as it is, since the model has already been casted to the correct `dtype`.') else: return super().half(*args) def float(self, *args): if getattr(self, 'is_quantized', False): raise ValueError('`.float()` is not supported for quantized model. Please use the model as it is, since the model has already been casted to the correct `dtype`.') else: return super().float(*args) @classmethod def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool): if is_deepspeed_zero3_enabled(): import deepspeed init_contexts = [no_init_weights()] if not is_quantized and (not _is_ds_init_called): logger.info('Detected DeepSpeed ZeRO-3: activating zero.init() for this model') init_contexts.extend([deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]) elif is_quantized: init_contexts.extend([init_empty_weights(), set_quantized_state()]) else: init_contexts = [no_init_weights(), init_empty_weights()] return init_contexts @classmethod @restore_default_dtype def from_pretrained(cls: type[SpecificPreTrainedModelType], pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, ignore_mismatched_sizes: bool=False, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', use_safetensors: Optional[bool]=None, weights_only: bool=True, **kwargs) -> SpecificPreTrainedModelType: """ Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - `None` if you are both providing the configuration and state dictionary (resp. with keyword arguments `config` and `state_dict`). model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): Can be either: - an instance of a class derived from [`PretrainedConfig`], - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. state_dict (`dict[str, torch.Tensor]`, *optional*): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and [`~PreTrainedModel.from_pretrained`] is not a simpler option. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> attn_implementation (`str`, *optional*): The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)), or `"flash_attention_3"` (using [Dao-AILab/flash-attention/hopper](https://github.com/Dao-AILab/flash-attention/tree/main/hopper)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. Accept HF kernel references in the form: <namespace>/<repo_name>[@<revision>][:<kernel_name>] - <namespace> and <repo_name> are any non-"/" and non-":" sequences. - "@<revision>" is optional (branch, tag, or commit-ish), e.g. "@main", "@v1.2.0", "@abc123". - ":<kernel_name>" is optional and selects a function inside the kernel repo. - Both options can appear together and in this order only: @revision first, then :kernel_name. - We intentionally allow a leading "<wrapper>|" prefix (e.g., "flash|...") because the code strips it before loading; '|' is not excluded in the character classes here. Examples that match: "org/model" "org/model@main" "org/model:custom_kernel" "org/model@v1.2.3:custom_kernel" > Parameters for big model inference dtype (`str` or `torch.dtype`, *optional*): Override the default `torch_dtype` and load the model under a specific `dtype`. The different options are: 1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified `dtype`, ignoring the model's `config.dtype` if one exists. If not specified - the model will get loaded in `torch.float` (fp32). 2. `"auto"` - A `dtype` or `torch_dtype` entry in the `config.json` file of the model will be attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in the checkpoint that's of a floating point type and use that as `dtype`. This will load the model using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. 3. A string that is a valid `torch.dtype`. E.g. "float32" loads the model in `torch.float32`, "float16" loads in `torch.float16` etc. <Tip> For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or reach out to the authors and ask them to add this information to the model's card and to insert the `dtype` or `torch_dtype` entry in `config.json` on the hub. </Tip> device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory if using `device_map`. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. tp_plan (`str`, *optional*): A torch tensor parallel plan, see [here](https://pytorch.org/tutorials/intermediate/TP_tutorial.html). Currently, it only accepts `tp_plan="auto"` to use predefined plan based on the model. Note that if you use it, you should launch your script accordingly with `torchrun [args] script.py`. This will be much faster than using a `device_map`, but has limitations. tp_size (`str`, *optional*): A torch tensor parallel degree. If not provided would default to world size. device_mesh (`torch.distributed.DeviceMesh`, *optional*): A torch device mesh. If not provided would default to world size. Used only for tensor parallel for now. If provided, it has to contain dimension named `"tp"` in case it's > 1 dimensional, this dimension will be used for tensor parallelism offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. offload_buffers (`bool`, *optional*): Whether or not to offload the buffers with the model parameters. quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*): A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g bitsandbytes, gptq). There may be other quantization-related kwargs, including `load_in_4bit` and `load_in_8bit`, which are parsed by QuantizationConfigParser. Supported only for bitsandbytes quantizations and not preferred. consider inserting all such arguments into quantization_config instead. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. variant (`str`, *optional*): If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. use_safetensors (`bool`, *optional*, defaults to `None`): Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` is not installed, it will be set to `False`. weights_only (`bool`, *optional*, defaults to `True`): Indicates whether unpickler should be restricted to loading only tensors, primitive types, dictionaries and any types added via torch.serialization.add_safe_globals(). When set to False, we can load wrapper tensor subclass weights. key_mapping (`dict[str, str], *optional*): A potential mapping of the weight names if using a model on the Hub which is compatible to a Transformers architecture, but was not converted accordingly. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```python >>> from transformers import BertConfig, BertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = BertModel.from_pretrained("./test/saved_model/") >>> # Update configuration during loading. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) >>> assert model.config.output_attentions == True ``` """ state_dict = kwargs.pop('state_dict', None) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) use_auth_token = kwargs.pop('use_auth_token', None) from_pipeline = kwargs.pop('_from_pipeline', None) from_auto_class = kwargs.pop('_from_auto', False) dtype = kwargs.pop('dtype', None) torch_dtype = kwargs.pop('torch_dtype', None) device_map = kwargs.pop('device_map', None) max_memory = kwargs.pop('max_memory', None) offload_folder = kwargs.pop('offload_folder', None) offload_state_dict = kwargs.pop('offload_state_dict', False) offload_buffers = kwargs.pop('offload_buffers', False) load_in_8bit = kwargs.pop('load_in_8bit', False) load_in_4bit = kwargs.pop('load_in_4bit', False) quantization_config = kwargs.pop('quantization_config', None) subfolder = kwargs.pop('subfolder', '') commit_hash = kwargs.pop('_commit_hash', None) variant = kwargs.pop('variant', None) adapter_kwargs = kwargs.pop('adapter_kwargs', {}) adapter_name = kwargs.pop('adapter_name', 'default') generation_config = kwargs.pop('generation_config', None) gguf_file = kwargs.pop('gguf_file', None) tp_plan = kwargs.pop('tp_plan', None) tp_size = kwargs.pop('tp_size', None) distributed_config: DistributedConfig = kwargs.pop('distributed_config', None) device_mesh = kwargs.pop('device_mesh', None) trust_remote_code = kwargs.pop('trust_remote_code', None) use_kernels = kwargs.pop('use_kernels', False) key_mapping = kwargs.pop('key_mapping', None) if key_mapping is None and any((allowed_name in class_name.__name__.lower() for class_name in cls.__mro__[:-1] for allowed_name in VLMS)): key_mapping = cls._checkpoint_conversion_mapping if distributed_config is not None: tp_plan = 'auto' _ = kwargs.pop('resume_download', None) _ = kwargs.pop('mirror', None) _ = kwargs.pop('_fast_init', None) _ = kwargs.pop('low_cpu_mem_usage', None) _ = kwargs.pop('from_tf', None) _ = kwargs.pop('from_flax', None) if torch_dtype is not None: logger.warning_once('`torch_dtype` is deprecated! Use `dtype` instead!') dtype = dtype if dtype is not None else torch_dtype if state_dict is not None and (pretrained_model_name_or_path is not None or gguf_file is not None): raise ValueError('`state_dict` cannot be passed together with a model name or a `gguf_file`. Use one of the two loading strategies.') if tp_size is not None and tp_plan is None: raise ValueError('tp_plan has to be set when tp_size is passed.') if tp_plan is not None and tp_plan != 'auto': raise ValueError(f"tp_plan supports 'auto' only for now but got {tp_plan}.") if tp_plan is not None and device_map is not None: raise ValueError('`tp_plan` and `device_map` are mutually exclusive. Choose either one for parallelization.') if device_map == 'auto' and int(os.environ.get('WORLD_SIZE', '0')): logger.info("You've set device_map=`auto` while triggering a distributed run with torchrun. This might lead to unexpected behavior. If your plan is to load the model on each device, you should set device_map={: PartialState().process_index} where PartialState comes from accelerate library") if tp_plan is not None: if device_mesh is None: tp_plan, device_map, device_mesh, tp_size = initialize_tensor_parallelism(tp_plan, tp_size=tp_size) else: if device_mesh.ndim > 1: if 'tp' not in device_mesh.mesh_dim_names: raise ValueError("When using `tp_plan` and n-d `device_mesh`, it must contain a 'tp' dimension. Please provide a valid `device_mesh`.") device_mesh = device_mesh['tp'] tp_size = device_mesh.size() device_map = torch.device(f"{device_mesh.device_type}:{int(os.environ['LOCAL_RANK'])}") if tp_size is None: tp_size = torch.distributed.get_world_size() if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if token is not None and adapter_kwargs is not None and ('token' not in adapter_kwargs): adapter_kwargs['token'] = token if use_safetensors is None and (not is_safetensors_available()): use_safetensors = False if gguf_file is not None and (not is_accelerate_available()): raise ValueError('accelerate is required when loading a GGUF file `pip install accelerate`.') if commit_hash is None: if not isinstance(config, PretrainedConfig): resolved_config_file = cached_file(pretrained_model_name_or_path, CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) else: commit_hash = getattr(config, '_commit_hash', None) if is_peft_available(): _adapter_model_path = adapter_kwargs.pop('_adapter_model_path', None) if _adapter_model_path is None: _adapter_model_path = find_adapter_config_file(pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, _commit_hash=commit_hash, **adapter_kwargs) if _adapter_model_path is not None and os.path.isfile(_adapter_model_path): with open(_adapter_model_path, 'r', encoding='utf-8') as f: _adapter_model_path = pretrained_model_name_or_path pretrained_model_name_or_path = json.load(f)['base_model_name_or_path'] else: _adapter_model_path = None if device_map is None and (not is_deepspeed_zero3_enabled()): device_in_context = get_torch_context_manager_or_global_device() if device_in_context == torch.device('meta'): raise RuntimeError("You are using `from_pretrained` with a meta device context manager or `torch.set_default_device('meta')`.\nThis is an anti-pattern as `from_pretrained` wants to load existing weights.\nIf you want to initialize an empty model on the meta device, use the context manager or global device with `from_config`, or `ModelClass(config)`") device_map = device_in_context if isinstance(device_map, torch.device): device_map = {'': device_map} elif isinstance(device_map, str) and device_map not in ['auto', 'balanced', 'balanced_low_0', 'sequential']: try: device_map = {'': torch.device(device_map)} except RuntimeError: raise ValueError(f"When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or 'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}.") elif isinstance(device_map, int): if device_map < 0: raise ValueError("You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' ") else: device_map = {'': device_map} if device_map is not None: if is_deepspeed_zero3_enabled(): raise ValueError('DeepSpeed Zero-3 is not compatible with passing a `device_map`.') if not is_accelerate_available(): raise ValueError('Using a `device_map`, `tp_plan`, `torch.device` context manager or setting `torch.set_default_device(device)` requires `accelerate`. You can install it with `pip install accelerate`') if load_in_4bit or load_in_8bit: if quantization_config is not None: raise ValueError("You can't pass `load_in_4bit`or `load_in_8bit` as a kwarg when passing `quantization_config` argument at the same time.") config_dict = {k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters} config_dict = {**config_dict, 'load_in_4bit': load_in_4bit, 'load_in_8bit': load_in_8bit} quantization_config, kwargs = BitsAndBytesConfig.from_dict(config_dict=config_dict, return_unused_kwargs=True, **kwargs) logger.warning('The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead.') user_agent = {'file_type': 'model', 'framework': 'pytorch', 'from_auto_class': from_auto_class} if from_pipeline is not None: user_agent['using_pipeline'] = from_pipeline if is_offline_mode() and (not local_files_only): logger.info('Offline mode: forcing local_files_only=True') local_files_only = True if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained(config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, gguf_file=gguf_file, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs) if 'gguf_file' in model_kwargs: model_kwargs.pop('gguf_file') else: config = copy.deepcopy(config) model_kwargs = kwargs if 'attn_implementation' in kwargs: config._attn_implementation = kwargs.pop('attn_implementation') transformers_explicit_filename = getattr(config, 'transformers_weights', None) if transformers_explicit_filename is not None: if not transformers_explicit_filename.endswith('.safetensors') and (not transformers_explicit_filename.endswith('.safetensors.index.json')): raise ValueError(f'The transformers file in the config seems to be incorrect: it is neither a safetensors file (*.safetensors) nor a safetensors index file (*.safetensors.index.json): {transformers_explicit_filename}') hf_quantizer, config, dtype, device_map = get_hf_quantizer(config, quantization_config, dtype, device_map, weights_only, user_agent) if gguf_file is not None and hf_quantizer is not None: raise ValueError('You cannot combine Quantization and loading a model from a GGUF file, try again by making sure you did not passed a `quantization_config` or that you did not load a quantized model from the Hub.') if gguf_file and device_map is not None and (isinstance(device_map, dict) and 'disk' in device_map.values() or 'disk' in device_map): raise RuntimeError('One or more modules is configured to be mapped to disk. Disk offload is not supported for models loaded from GGUF files.') checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, variant=variant, gguf_file=gguf_file, use_safetensors=use_safetensors, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, commit_hash=commit_hash, is_remote_code=cls._auto_class is not None, transformers_explicit_filename=transformers_explicit_filename) is_quantized = hf_quantizer is not None is_from_file = pretrained_model_name_or_path is not None or gguf_file is not None if is_safetensors_available() and is_from_file and checkpoint_files[0].endswith('.safetensors'): with safe_open(checkpoint_files[0], framework='pt') as f: metadata = f.metadata() if metadata is not None and metadata.get('format') in ['tf', 'flax']: logger.warning("The safetensors checkpoint found has format `tf` or `flax`. This mean that the keys will verylikely not match to the model you are trying to load, and will be newly initialized. If it's the case another warning will be raised later. Consider converting your checkpoint to the correct format.") if gguf_file: from .modeling_gguf_pytorch_utils import load_gguf_checkpoint with torch.device('meta'): dummy_model = cls(config) state_dict = load_gguf_checkpoint(checkpoint_files[0], return_tensors=True, model_to_load=dummy_model)['tensors'] config, dtype, dtype_orig = _get_dtype(cls, dtype, checkpoint_files, config, sharded_metadata, state_dict, weights_only) config.name_or_path = pretrained_model_name_or_path model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called) config = copy.deepcopy(config) with ContextManagers(model_init_context): model = cls(config, *model_args, **model_kwargs) model.tie_weights() config = model.config keep_in_fp32_modules = [] if model._keep_in_fp32_modules is not None and (dtype == torch.float16 or getattr(hf_quantizer, 'use_keep_in_fp32_modules', False)): keep_in_fp32_modules.extend(model._keep_in_fp32_modules) if model._keep_in_fp32_modules_strict is not None and (dtype == torch.float16 or dtype == torch.bfloat16): keep_in_fp32_modules.extend(model._keep_in_fp32_modules_strict) keep_in_fp32_regex = None if keep_in_fp32_modules: keep_in_fp32_regex = re.compile('|'.join([f'((^|\\.){module}($|\\.))' for module in keep_in_fp32_modules])) if hf_quantizer is not None: hf_quantizer.preprocess_model(model=model, device_map=device_map, keep_in_fp32_modules=model._keep_in_fp32_modules, config=config, use_kernels=use_kernels) original_dtype = dtype if dtype is not None else torch.get_default_dtype() def _assign_original_dtype(module): for child in module.children(): if isinstance(child, PreTrainedModel): child.config._pre_quantization_dtype = original_dtype _assign_original_dtype(child) config._pre_quantization_dtype = original_dtype _assign_original_dtype(model) if _torch_distributed_available and device_mesh is not None: model = distribute_model(model, distributed_config, device_mesh, tp_size) if device_map is not None: device_map = _get_device_map(model, device_map, max_memory, hf_quantizer, dtype, keep_in_fp32_regex) if dtype_orig is not None: torch.set_default_dtype(dtype_orig) model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs = cls._load_pretrained_model(model, state_dict, checkpoint_files, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, sharded_metadata=sharded_metadata, device_map=device_map, disk_offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=dtype, hf_quantizer=hf_quantizer, keep_in_fp32_regex=keep_in_fp32_regex, device_mesh=device_mesh, key_mapping=key_mapping, weights_only=weights_only) model.tie_weights() model.eval() if use_kernels: model.use_kernels = True if model.can_generate() and generation_config is not None: logger.info('The user-defined `generation_config` will be used to override the default generation config.') model.generation_config = model.generation_config.from_dict(generation_config.to_dict()) elif model.can_generate() and pretrained_model_name_or_path is not None: repo_loading_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'local_files_only': local_files_only, 'token': token, 'revision': revision, 'subfolder': subfolder, **kwargs} try: model.generation_config = GenerationConfig.from_pretrained(pretrained_model_name_or_path, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **repo_loading_kwargs) except OSError: logger.info('Generation config file not found, using a generation config created from the model config.') pass if hasattr(model, 'load_custom_generate'): try: custom_generate = model.load_custom_generate(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **repo_loading_kwargs) model.generate = functools.partial(custom_generate, model=model) except OSError: pass if device_map is not None and device_mesh is None: device_map_kwargs = {'device_map': device_map, 'offload_dir': offload_folder, 'offload_index': offload_index, 'offload_buffers': offload_buffers} if 'skip_keys' in inspect.signature(dispatch_model).parameters: device_map_kwargs['skip_keys'] = model._skip_keys_device_placement if 'force_hooks' in inspect.signature(dispatch_model).parameters and hf_quantizer is not None and (hf_quantizer.quantization_config.quant_method == QuantizationMethod.HQQ): device_map_kwargs['force_hooks'] = True if hf_quantizer is not None and hf_quantizer.quantization_config.quant_method == QuantizationMethod.FBGEMM_FP8 and isinstance(device_map, dict) and ('cpu' in device_map.values() or 'disk' in device_map.values()): device_map_kwargs['offload_buffers'] = True if not is_fsdp_enabled() and (not is_deepspeed_zero3_enabled()): dispatch_model(model, **device_map_kwargs) if hf_quantizer is not None: model.hf_quantizer = hf_quantizer hf_quantizer.postprocess_model(model, config=config) if _adapter_model_path is not None: adapter_kwargs['key_mapping'] = key_mapping model.load_adapter(_adapter_model_path, adapter_name=adapter_name, token=token, adapter_kwargs=adapter_kwargs) if output_loading_info: loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'mismatched_keys': mismatched_keys, 'error_msgs': error_msgs} return (model, loading_info) return model @staticmethod def _fix_state_dict_key_on_load(key: str) -> tuple[str, bool]: """Replace legacy parameter names with their modern equivalents. E.g. beta -> bias, gamma -> weight.""" if key.endswith('LayerNorm.beta'): return (key.replace('LayerNorm.beta', 'LayerNorm.bias'), True) if key.endswith('LayerNorm.gamma'): return (key.replace('LayerNorm.gamma', 'LayerNorm.weight'), True) if hasattr(nn.utils.parametrizations, 'weight_norm'): if key.endswith('weight_g'): return (key.replace('weight_g', 'parametrizations.weight.original0'), True) if key.endswith('weight_v'): return (key.replace('weight_v', 'parametrizations.weight.original1'), True) else: if key.endswith('parametrizations.weight.original0'): return (key.replace('parametrizations.weight.original0', 'weight_g'), True) if key.endswith('parametrizations.weight.original1'): return (key.replace('parametrizations.weight.original1', 'weight_v'), True) return (key, False) def _get_key_renaming_mapping(self, checkpoint_keys: list[str], key_mapping: Optional[dict[str, str]]=None, loading_base_model_from_task_state_dict: bool=False, loading_task_model_from_base_state_dict: bool=False): """ Compute a mapping between the serialized keys on disk `checkpoint_keys`, and the keys that the model that we are loading expects. This is the single entry point for key renaming that will be used during loading. Log if any parameters have been renamed. """ prefix = self.base_model_prefix _prefix = f'{prefix}.' renamed_keys = {} key_renaming_mapping = {} for key in checkpoint_keys: new_key, has_changed = self._fix_state_dict_key_on_load(key) if key_mapping is not None: for pattern, replacement in key_mapping.items(): new_key, n_replace = re.subn(pattern, replacement, new_key) if n_replace > 0: has_changed = True break if loading_task_model_from_base_state_dict: new_key = '.'.join([prefix, new_key]) elif loading_base_model_from_task_state_dict: if not new_key.startswith(_prefix): continue new_key = new_key[len(_prefix):] key_renaming_mapping[key] = new_key if has_changed: if key.endswith('LayerNorm.gamma'): renamed_keys['LayerNorm.gamma'] = (key, new_key) elif key.endswith('LayerNorm.beta'): renamed_keys['LayerNorm.beta'] = (key, new_key) if renamed_keys: warning_msg = f'A pretrained model of type `{self.__class__.__name__}` ' warning_msg += 'contains parameters that have been renamed internally (a few are listed below but more are present in the model):\n' for old_key, new_key in renamed_keys.values(): warning_msg += f'* `{old_key}` -> `{new_key}`\n' warning_msg += 'If you are using a model from the Hub, consider submitting a PR to adjust these weights and help future users.' logger.info_once(warning_msg) return key_renaming_mapping @staticmethod def _fix_state_dict_key_on_save(key) -> tuple[str, bool]: """ Similar to `_fix_state_dict_key_on_load` allows to define hook for state dict key renaming on model save. Do nothing by default, but can be overridden in particular models. """ return (key, False) def _fix_state_dict_keys_on_save(self, state_dict): """ Similar to `_fix_state_dict_keys_on_load` allows to define hook for state dict key renaming on model save. Apply `_fix_state_dict_key_on_save` to all keys in `state_dict`. """ return {self._fix_state_dict_key_on_save(key)[0]: value for key, value in state_dict.items()} @classmethod def _load_pretrained_model(cls, model: 'PreTrainedModel', state_dict: Optional[dict], checkpoint_files: Optional[list[str]], pretrained_model_name_or_path: Optional[str], ignore_mismatched_sizes: bool=False, sharded_metadata: Optional[dict]=None, device_map: Optional[dict]=None, disk_offload_folder: Optional[str]=None, offload_state_dict: Optional[bool]=None, dtype: Optional[torch.dtype]=None, hf_quantizer: Optional[HfQuantizer]=None, keep_in_fp32_regex: Optional[re.Pattern]=None, device_mesh: Optional['torch.distributed.device_mesh.DeviceMesh']=None, key_mapping: Optional[dict[str, str]]=None, weights_only: bool=True): is_quantized = hf_quantizer is not None is_hqq_or_quark = is_quantized and hf_quantizer.quantization_config.quant_method in {QuantizationMethod.HQQ, QuantizationMethod.QUARK} is_hqq_or_bnb = is_quantized and hf_quantizer.quantization_config.quant_method in {QuantizationMethod.HQQ, QuantizationMethod.BITS_AND_BYTES} if sharded_metadata is not None: original_checkpoint_keys = sharded_metadata['all_checkpoint_keys'] elif state_dict is not None: original_checkpoint_keys = list(state_dict.keys()) else: original_checkpoint_keys = list(load_state_dict(checkpoint_files[0], map_location='meta', weights_only=weights_only).keys()) prefix = model.base_model_prefix _prefix = f'{prefix}.' has_prefix_module = any((s.startswith(prefix) for s in original_checkpoint_keys)) if len(prefix) > 0 else False expects_prefix_module = hasattr(model, prefix) if len(prefix) > 0 else False loading_task_model_from_base_state_dict = not has_prefix_module and expects_prefix_module loading_base_model_from_task_state_dict = has_prefix_module and (not expects_prefix_module) key_renaming_mapping = model._get_key_renaming_mapping(original_checkpoint_keys, key_mapping, loading_base_model_from_task_state_dict, loading_task_model_from_base_state_dict) checkpoint_keys = list(key_renaming_mapping.values()) missing_keys, unexpected_keys = _find_missing_and_unexpected_keys(cls, model, original_checkpoint_keys, checkpoint_keys, loading_base_model_from_task_state_dict, hf_quantizer, device_map) mismatched_keys, mismatched_shapes = _find_mismatched_keys(model, state_dict, checkpoint_files, ignore_mismatched_sizes, key_renaming_mapping, is_quantized, weights_only) key_renaming_mapping = {k: v for k, v in key_renaming_mapping.items() if v not in mismatched_keys} checkpoint_keys = list(key_renaming_mapping.values()) model._move_missing_keys_from_meta_to_cpu(missing_keys + mismatched_keys, unexpected_keys, dtype, hf_quantizer) model._initialize_missing_keys(checkpoint_keys, ignore_mismatched_sizes, is_quantized) if keep_in_fp32_regex is not None: for name, param in model.named_parameters(): if keep_in_fp32_regex.search(name): param.data = param.data.to(torch.float32) model_to_load = model if loading_task_model_from_base_state_dict: model_to_load = getattr(model, prefix) key_renaming_mapping = {k: v[len(_prefix):] for k, v in key_renaming_mapping.items()} checkpoint_keys = list(key_renaming_mapping.values()) if device_map is not None: device_map = {k[len(_prefix):] if k.startswith(_prefix) else k: v for k, v in device_map.items()} task_specific_expected_keys = [s for s in model.state_dict() if not s.startswith(_prefix)] base_model_expected_keys = list(model_to_load.state_dict().keys()) if any((key in task_specific_expected_keys and key not in base_model_expected_keys for key in checkpoint_keys)): raise ValueError('The state dictionary of the model you are trying to load is corrupted. Are you sure it was properly saved?') reverse_key_renaming_mapping = {v: k for k, v in key_renaming_mapping.items()} is_offloaded_safetensors = False disk_offload_index = None disk_only_shard_files = [] if device_map is not None and 'disk' in device_map.values(): if offload_state_dict is None: offload_state_dict = True if disk_offload_folder is not None: os.makedirs(disk_offload_folder, exist_ok=True) is_offloaded_safetensors = checkpoint_files is not None and checkpoint_files[0].endswith('.safetensors') if disk_offload_folder is None and (not is_offloaded_safetensors): raise ValueError('The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder` for them. Alternatively, make sure you have `safetensors` installed if the model you are using offers the weights in this format.') if is_offloaded_safetensors: param_device_map = expand_device_map(device_map, checkpoint_keys) str_dtype = str(dtype).replace('torch.', '') if dtype is not None else 'float32' if sharded_metadata is None: weight_map = dict.fromkeys(checkpoint_keys, checkpoint_files[0]) else: folder = os.path.sep.join(checkpoint_files[0].split(os.path.sep)[:-1]) weight_map = {key_renaming_mapping[k]: v for k, v in sharded_metadata['weight_map'].items() if k in key_renaming_mapping} weight_map = {k: os.path.join(folder, v) for k, v in weight_map.items()} disk_only_shard_files = get_disk_only_shard_files(device_map, weight_map) disk_offload_index = {name: {'safetensors_file': file, 'weight_name': reverse_key_renaming_mapping[name], 'dtype': str_dtype} for name, file in weight_map.items() if param_device_map[name] == 'disk'} else: disk_offload_index = {} cpu_offload_folder = None cpu_offload_index = None if offload_state_dict: cpu_offload_folder = tempfile.mkdtemp() cpu_offload_index = {} elif state_dict is not None: checkpoint_files = [''] expected_keys = list(model_to_load.state_dict().keys()) if hf_quantizer is not None: expected_keys = hf_quantizer.update_expected_keys(model_to_load, expected_keys, checkpoint_keys) if logger.level >= logging.WARNING: verify_tp_plan(expected_keys, getattr(model_to_load, '_tp_plan', None)) if device_map is not None and (not is_hqq_or_quark): expanded_device_map = expand_device_map(device_map, expected_keys) caching_allocator_warmup(model_to_load, expanded_device_map, hf_quantizer) args_list = [(shard_file, state_dict, disk_only_shard_files, is_hqq_or_bnb, is_quantized, device_map, hf_quantizer, key_renaming_mapping, weights_only, model_to_load, expected_keys, reverse_key_renaming_mapping, disk_offload_folder, disk_offload_index, cpu_offload_folder, cpu_offload_index, is_offloaded_safetensors, keep_in_fp32_regex, unexpected_keys, device_mesh) for shard_file in checkpoint_files] error_msgs = [] if os.environ.get('HF_ENABLE_PARALLEL_LOADING', '').upper() in ENV_VARS_TRUE_VALUES and (not is_deepspeed_zero3_enabled()): _error_msgs, disk_offload_index, cpu_offload_index = load_shard_files_with_threadpool(args_list) error_msgs += _error_msgs else: if len(args_list) > 1: args_list = logging.tqdm(args_list, desc='Loading checkpoint shards') for args in args_list: _error_msgs, disk_offload_index, cpu_offload_index = load_shard_file(args) error_msgs += _error_msgs if disk_offload_index is not None and len(disk_offload_index) > 0: if loading_task_model_from_base_state_dict: prefix = cls.base_model_prefix if not is_offloaded_safetensors: for weight_name in disk_offload_index: shutil.move(os.path.join(disk_offload_folder, f'{weight_name}.dat'), os.path.join(disk_offload_folder, f'{prefix}.{weight_name}.dat')) disk_offload_index = {f'{prefix}.{key}': value for key, value in disk_offload_index.items()} if not is_offloaded_safetensors: save_offload_index(disk_offload_index, disk_offload_folder) disk_offload_index = None if offload_state_dict: load_offloaded_weights(model_to_load, cpu_offload_index, cpu_offload_folder) shutil.rmtree(cpu_offload_folder) if hf_quantizer is not None: missing_keys = hf_quantizer.update_missing_keys_after_loading(model_to_load, missing_keys, prefix) if device_mesh is not None: tp_device = list(device_map.values())[0] for buffer in model.buffers(): if buffer.device != tp_device: buffer.data = buffer.to(tp_device) if loading_task_model_from_base_state_dict: parameters_to_initialize = {name: param for name, param in model.named_parameters() if not name.startswith(prefix)} for name, param in parameters_to_initialize.items(): if param.device.type == 'meta': continue to_contiguous, casting_dtype = _infer_parameter_dtype(model, name, param, keep_in_fp32_regex) shard_and_distribute_module(model, param.to(tp_device), param, name, casting_dtype, to_contiguous, device_mesh.get_local_rank(), device_mesh) if len(error_msgs) > 0: error_msg = '\n\t'.join(error_msgs) if 'size mismatch' in error_msg: error_msg += '\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.' raise RuntimeError(f'Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}') if len(unexpected_keys) > 0: archs = [] if model.config.architectures is None else model.config.architectures warner = logger.warning if model.__class__.__name__ in archs else logger.info warner(f'Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).') else: logger.info(f'All model checkpoint weights were used when initializing {model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') elif len(mismatched_keys) == 0: logger.info(f'All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.') if len(mismatched_keys) > 0: mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for key, (shape1, shape2) in zip(mismatched_keys, mismatched_shapes)]) logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized because the shapes did not match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') return (model, missing_keys, unexpected_keys, mismatched_keys, disk_offload_index, error_msgs) def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): module_keys = {'.'.join(key.split('.')[:-1]) for key in names} module_keys = module_keys.union({'.'.join(key.split('.')[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()}) retrieved_modules = [] for name, module in self.named_modules(): if remove_prefix: _prefix = f'{self.base_model_prefix}.' name = name[len(_prefix):] if name.startswith(_prefix) else name elif add_prefix: name = '.'.join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix if name in module_keys: retrieved_modules.append(module) return retrieved_modules @classmethod def register_for_auto_class(cls, auto_class='AutoModel'): """ Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): The auto class to register this new model with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class def to_bettertransformer(self) -> 'PreTrainedModel': """ Converts the model to use [PyTorch's native attention implementation](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html), integrated to Transformers through [Optimum library](https://huggingface.co/docs/optimum/bettertransformer/overview). Only a subset of all Transformers models are supported. PyTorch's attention fastpath allows to speed up inference through kernel fusions and the use of [nested tensors](https://pytorch.org/docs/stable/nested.html). Detailed benchmarks can be found in [this blog post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2). Returns: [`PreTrainedModel`]: The model converted to BetterTransformer. """ if not is_optimum_available(): raise ImportError('The package `optimum` is required to use Better Transformer.') from optimum.version import __version__ as optimum_version if version.parse(optimum_version) < version.parse('1.7.0'): raise ImportError(f'Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found.') from optimum.bettertransformer import BetterTransformer return BetterTransformer.transform(self) def reverse_bettertransformer(self): """ Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is used, for example in order to save the model. Returns: [`PreTrainedModel`]: The model converted back to the original modeling. """ if not is_optimum_available(): raise ImportError('The package `optimum` is required to use Better Transformer.') from optimum.version import __version__ as optimum_version if version.parse(optimum_version) < version.parse('1.7.0'): raise ImportError(f'Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found.') from optimum.bettertransformer import BetterTransformer return BetterTransformer.reverse(self) def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): """ Shows a one-time warning if the input_ids appear to contain padding and no attention mask was given. """ if is_torch_fx_proxy(input_ids) or torch.jit.is_tracing() or is_torchdynamo_compiling(): return if attention_mask is not None or self.config.pad_token_id is None: return if self.config.pad_token_id in input_ids[:, [-1, 0]]: warn_string = 'We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See https://huggingface.co/docs/transformers/troubleshooting#incorrect-output-when-padding-tokens-arent-masked.' if self.config.bos_token_id is not None and self.config.bos_token_id == self.config.pad_token_id or (self.config.eos_token_id is not None and self.config.eos_token_id == self.config.pad_token_id) or (self.config.sep_token_id is not None and self.config.sep_token_id == self.config.pad_token_id): warn_string += f'\nYou may ignore this warning if your `pad_token_id` ({self.config.pad_token_id}) is identical to the `bos_token_id` ({self.config.bos_token_id}), `eos_token_id` ({self.config.eos_token_id}), or the `sep_token_id` ({self.config.sep_token_id}), and your input is not padded.' logger.warning_once(warn_string) @property def supports_tp_plan(self): """ Returns whether the model has a tensor parallelism plan. """ if self._tp_plan is not None: return True if getattr(self.base_model, '_tp_plan', None) is not None: return True if self.config.base_model_tp_plan is not None: return True return False @property def tp_size(self): """ Returns the model's tensor parallelism degree. """ return self._tp_size @property def supports_pp_plan(self): if self._pp_plan is not None: return True if getattr(self.base_model, '_pp_plan', None) is not None: return True return False @property def loss_function(self): if hasattr(self, '_loss_function'): return self._loss_function loss_type = getattr(self, 'loss_type', None) if loss_type is None or loss_type not in LOSS_MAPPING: logger.warning_once(f'`loss_type={loss_type}` was set in the config but it is unrecognized. Using the default loss: `ForCausalLMLoss`.') loss_type = 'ForCausalLM' return LOSS_MAPPING[loss_type] @loss_function.setter def loss_function(self, value): self._loss_function = value def kernelize(self): if not is_kernels_available(): raise ValueError('Kernels are not available. To use kernels, please install kernels using `pip install kernels`') from kernels import Device, Mode, kernelize mode = Mode.INFERENCE if not self.training else Mode.TRAINING kernelize(self, device=Device(type=self.device.type), mode=mode) self._use_kernels = True @property def use_kernels(self) -> bool: return getattr(self, '_use_kernels', False) @use_kernels.setter def use_kernels(self, value: bool) -> None: if bool(value) and getattr(self, '_use_kernels', False): return if value: self.kernelize() else: if getattr(self, '_use_kernels', False): logger.warning_once("Disabling kernels at runtime is a no-op as there is no 'unkernelize' routine; keeping current kernels active.") self._use_kernels = False def get_compiled_call(self, compile_config: Optional[CompileConfig]) -> Callable: """Return a `torch.compile`'d version of `self.__call__`. This is useful to dynamically choose between non-compiled/compiled `forward` during inference, especially to switch between prefill (where we don't want to use compiled version to avoid recomputing the graph with new shapes) and iterative decoding (where we want the speed-ups of compiled version with static shapes).""" if 'llama4' in self.config.model_type: return self.__call__ compile_config = compile_config or CompileConfig() default_config = getattr(self.generation_config, 'compile_config', None) or CompileConfig() if not hasattr(self, '_compiled_call') or getattr(self, '_last_compile_config', default_config) != compile_config: self._last_compile_config = compile_config self._compiled_call = torch.compile(self.__call__, **compile_config.to_dict()) return self._compiled_call @classmethod def is_backend_compatible(cls): return cls._supports_attention_backend def _move_missing_keys_from_meta_to_cpu(self, missing_keys: list[str], unexpected_keys: list[str], dtype: Optional[torch.dtype], hf_quantizer: Optional[HfQuantizer]) -> 'PreTrainedModel': """Move the missing keys (keys that are part of the model parameters, but were NOT found in the loaded state dicts) back from meta device to cpu. """ is_quantized = hf_quantizer is not None if is_fsdp_enabled() and (not is_local_dist_rank_0()) and (not is_quantized): for key, param in self.named_parameters(): value = torch.empty_like(param, dtype=dtype, device='cpu') _load_parameter_into_model(self, key, value) return model_state_dict = self.state_dict() for key in missing_keys: param = model_state_dict[key] if param.device == torch.device('meta'): value = torch.empty_like(param, dtype=dtype, device='cpu') if not is_quantized or getattr(hf_quantizer, 'requires_parameters_quantization', False) or (not hf_quantizer.check_quantized_param(self, param_value=value, param_name=key, state_dict={})): _load_parameter_into_model(self, key, value) else: hf_quantizer.create_quantized_param(self, value, key, 'cpu', model_state_dict, unexpected_keys) def _initialize_missing_keys(self, loaded_keys: list[str], ignore_mismatched_sizes: bool, is_quantized: bool) -> 'PreTrainedModel': """Initialize the missing keys (keys that are part of the model parameters, but were NOT found in the loaded state dicts), according to `_initialize_weights`. Indeed, since the corresponding weights are missing from the state dict, they will not be replaced and need to be initialized correctly (i.e. weight initialization distribution). Also take care of setting the `_is_hf_initialized` flag for keys that are not missing. """ if not ignore_mismatched_sizes: not_initialized_submodules = set_initialized_submodules(self, loaded_keys) if hasattr(self.config.get_text_config(decoder=True), 'tie_word_embeddings') and self.config.get_text_config(decoder=True).tie_word_embeddings: output_embeddings = self.get_output_embeddings() if output_embeddings is not None: if not hasattr(output_embeddings, 'bias') or output_embeddings.bias is None: output_embeddings._is_hf_initialized = True else: not_initialized_submodules = dict(self.named_modules()) if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed not_initialized_parameters = list(set(itertools.chain.from_iterable((submodule.parameters(recurse=False) for submodule in not_initialized_submodules.values())))) with deepspeed.zero.GatheredParameters(not_initialized_parameters, modifier_rank=0): self.initialize_weights() else: self.initialize_weights() def get_parameter_or_buffer(self, target: str): """ Return the parameter or buffer given by `target` if it exists, otherwise throw an error. This combines `get_parameter()` and `get_buffer()` in a single handy function. If the target is an `_extra_state` attribute, it will return the extra state provided by the module. Note that it only work if `target` is a leaf of the model. """ try: return self.get_parameter(target) except AttributeError: pass try: return self.get_buffer(target) except AttributeError: pass module, param_name = get_module_from_name(self, target) if param_name == '_extra_state' and getattr(module.__class__, 'get_extra_state', torch.nn.Module.get_extra_state) is not torch.nn.Module.get_extra_state: return module.get_extra_state() raise AttributeError(f'`{target}` is neither a parameter, buffer, nor extra state.') def train(self, mode: bool=True): out = super().train(mode) if self.use_kernels: self.kernelize() return out def eval(self): return self.train(False)
class PreTrainedModel(nn.Module, EmbeddingAccessMixin, ModuleUtilsMixin, PushToHubMixin, PeftAdapterMixin): ''' Base class for all models. [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). - **can_record_outputs** (dict): ''' @property @torch._dynamo.allow_in_graph def can_record_outputs(self) -> dict[str, OutputRecorder]: ''' Maps output names (e.g., "attentions", "hidden_states") to either: - A module class (e.g., `LlamaDecoderLayer`), using default index conventions: * index=0 for "hidden_states" * index=1 for "attentions" - Or an `OutputRecorder(...)` with `target_class`, optional `index`, and `layer_name`. Examples: These two are equivalent: ```python _can_record_outputs = { "attentions": LlamaAttention, "hidden_states": LlamaDecoderLayer } _can_record_outputs = { "attentions": OutputRecorder(LlamaAttention, index=1), "hidden_states": OutputRecorder(LlamaDecoderLayer, index=0) } ``` This means you can record outputs from the same class, by specifying a layer name. Before collecting outputs, we check that they come from this layer. If you have cross attention that come from `LlamaAttention` and self attention that also come from `LlamaAttention` but from `self_attn` you can do this: ```python class LlamaModel(PreTrainedModel): _can_record_outputs = { "attentions": OutputRecorder(LlamaAttention, index=1, layer-name="self_attn"), "cross_attentions": OutputRecorder(LlamaAttention, index=1, layer_name="cross_attn") } ``` ''' pass @property def dummy_inputs(self) -> dict[str, torch.Tensor]: ''' `dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. ''' pass def __init_subclass__(cls, **kwargs): pass def __init__(self, config: PretrainedConfig, *inputs, **kwargs): pass def post_init(self): ''' A method executed at the end of each Transformer model initialization, to execute code that needs the model's modules properly initialized (such as weight initialization). This is also used when the user is running distributed code. We add hooks to the modules here, according to the model's tp_plan! ''' pass @property def tp_plan(self) -> dict[str, str]: ''' The full tp plan for the model's modules ''' pass @property def pp_plan(self) -> dict[str, tuple[str, str]]: pass @tp_plan.setter def tp_plan(self) -> dict[str, str]: pass @pp_plan.setter def pp_plan(self) -> dict[str, tuple[str, str]]: pass def dequantize(self): ''' Potentially dequantize the model in case it has been quantized by a quantization method that support dequantization. ''' pass def _backward_compatibility_gradient_checkpointing(self): pass def add_model_tags(self, tags: Union[list[str], str]) -> None: ''' Add custom tags into the model that gets pushed to the Hugging Face Hub. Will not overwrite existing tags in the model. Args: tags (`Union[list[str], str]`): The desired tags to inject in the model Examples: ```python from transformers import AutoModel model = AutoModel.from_pretrained("google-bert/bert-base-cased") model.add_model_tags(["custom", "custom-bert"]) # Push the model to your namespace with the name "my-custom-bert". model.push_to_hub("my-custom-bert") ``` ''' pass @classmethod @restore_default_dtype def _from_config(cls, config, **kwargs): ''' All context managers that the model should be initialized under go here. Args: dtype (`torch.dtype`, *optional*): Override the default `dtype` and load the model under this dtype. ''' pass @classmethod def _set_default_dtype(cls, dtype: torch.dtype) -> torch.dtype: ''' Change the default dtype and return the previous one. This is needed when wanting to instantiate the model under specific dtype. Args: dtype (`torch.dtype`): a floating dtype to set to. Returns: `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was modified. If it wasn't, returns `None`. Note `set_default_dtype` currently only works with floating-point types and asserts if for example, `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception. ''' pass @property def base_model(self) -> nn.Module: ''' `torch.nn.Module`: The main body of the model. ''' pass @classmethod def can_generate(cls) -> bool: ''' Returns whether this model can generate sequences with `.generate()` from the `GenerationMixin`. Under the hood, on classes where this function returns True, some generation-specific changes are triggered: for instance, the model instance will have a populated `generation_config` attribute. Returns: `bool`: Whether this model can generate sequences with `.generate()`. ''' pass def _flash_attn_2_can_dispatch(self, is_init_check: bool=False) -> bool: ''' Check the availability of Flash Attention 2 for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. ''' pass def _flash_attn_3_can_dispatch(self, is_init_check: bool=False) -> bool: ''' Check the availability of Flash Attention 3 for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. ''' pass def _sdpa_can_dispatch(self, is_init_check: bool=False) -> bool: ''' Check the availability of SDPA for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. ''' pass def _flex_attn_can_dispatch(self, is_init_check: bool=False) -> bool: ''' Check the availability of Flex Attention for a given model. Args: is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. ''' pass def _check_and_adjust_attn_implementation(self, attn_implementation: Optional[str], is_init_check: bool=False) -> str: ''' Check that the `attn_implementation` exists and is supported by the models, and try to get the kernel from hub if it matches hf kernels pattern. Args: attn_implementation (`str` or `None`): The attention implementation to check for existence/validity. is_init_check (`bool`, *optional*): Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are fully instantiated. This is needed as we also check the devices of the weights, and/or if the model uses BetterTransformer, which are only available later after __init__. This allows to raise proper exceptions early before instantiating the full models if we know that the model does not support the requested attention. Returns: `str`: The final attention implementation to use, including potential fallbacks from sdpa to eager, or from None to sdpa (to potentially eager). ''' pass def get_correct_attn_implementation(self, requested_attention: Optional[str], is_init_check: bool=False) -> str: pass @classmethod def _can_set_attn_implementation(cls) -> bool: '''Detect whether the class supports setting its attention implementation dynamically. It is an ugly check based on opening the file, but avoids maintaining yet another property flag. ''' pass def set_attn_implementation(self, attn_implementation: Union[str, dict]): ''' Set the requested `attn_implementation` for this model. Args: attn_implementation (`str` or `dict`): The attention implementation to set for this model. It can be either a `str`, in which case it will be dispatched to all submodels if relevant, or a `dict` where keys are the sub_configs name, in which case each submodel will dispatch the corresponding value. ''' pass def enable_input_require_grads(self): ''' Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. ''' pass def make_inputs_require_grads(module, input, output): pass def disable_input_require_grads(self): ''' Removes the `_require_grads_hook`. ''' pass def get_decoder(self): ''' Best-effort lookup of the *decoder* module. Order of attempts (covers ~85 % of current usages): 1. `self.decoder` 2. `self.model` (many wrappers store the decoder here) 3. `self.model.get_decoder()` (nested wrappers) 4. fallback: raise for the few exotic models that need a bespoke rule ''' pass def set_decoder(self, decoder): ''' Symmetric setter. Mirrors the lookup logic used in `get_decoder`. ''' pass def _init_weights(self, module): ''' Initialize the weights. This is quite general on purpose, in the spirit of what we usually do. For more complex initialization scheme, it should be overridden by the derived `PreTrainedModel` class. In case a model adds an explicit `nn.Parameter`, this method should also be overridden in order to initialize it correctly. ''' pass def _initialize_weights(self, module): ''' Initialize the weights if they are not already initialized. ''' pass @torch.no_grad() def initialize_weights(self): ''' This is equivalent to calling `self.apply(self._initialize_weights)`, but correctly handles composite models. This function dynamically dispatches the correct `init_weights` function to the modules as we advance in the module graph along the recursion. It can handle an arbitrary number of sub-models. Without it, every composite model would have to recurse a second time on all sub-models explicitly in the outer-most `_init_weights`, which is extremely error prone and inefficient. Note that the `torch.no_grad()` decorator is very important as well, as most of our `_init_weights` do not use `torch.nn.init` functions (which are all no_grad by default), but simply do in-place ops such as `module.weight.data.zero_()`. ''' pass def smart_apply(self, fn): pass def tie_embeddings_and_encoder_decoder(self): ''' If set in the config, tie the weights between the input embeddings and the output embeddings, and the encoder and decoder. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. ''' pass def tie_weights(self): ''' Recursively (for all submodels) tie all the weights of the model. ''' pass @staticmethod def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, base_encoder_name: str): pass def tie_encoder_to_decoder_recursively(decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, base_encoder_name: str, uninitialized_encoder_weights: list[str], depth=0, total_decoder_name='', total_encoder_name=''): pass def _tie_or_clone_weights(self, output_embeddings, input_embeddings): '''Tie or clone module weights depending of whether we are using TorchScript or not''' pass def _get_no_split_modules(self, device_map: str): ''' Get the modules of the model that should not be spit when using device_map. We iterate through the modules to get the underlying `_no_split_modules`. Args: device_map (`str`): The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"] Returns: `list[str]`: List of modules that should not be split ''' pass def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: ''' Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The new number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. ''' pass def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True): pass def _get_resized_embeddings(self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: ''' Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (`torch.nn.Embedding`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` ''' pass def _get_resized_lm_head(self, old_lm_head: nn.Linear, new_num_tokens: Optional[int]=None, transposed: Optional[bool]=False, mean_resizing: bool=True) -> nn.Linear: ''' Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head (`torch.nn.Linear`): Old lm head liner layer to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, vocab_size` else `vocab_size, lm_head_dim`. mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is `None` ''' pass def _init_added_embeddings_weights_with_mean(self, old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens): pass def _init_added_lm_head_weights_with_mean(self, old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed=False): pass def _init_added_lm_head_bias_with_mean(self, old_lm_head, new_lm_head, added_num_tokens): pass def _copy_lm_head_original_to_resized(self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias): pass def resize_position_embeddings(self, new_num_position_embeddings: int): pass def get_position_embeddings(self) -> Union[nn.Embedding, tuple[nn.Embedding]]: pass def init_weights(self): ''' If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any initialization logic in `_init_weights`. ''' pass def prune_heads(self, heads_to_prune: dict[int, list[int]]): ''' Prunes heads of the base model. Arguments: heads_to_prune (`dict[int, list[int]]`): Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. ''' pass def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): ''' Activates gradient checkpointing for the current model. We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 Args: gradient_checkpointing_kwargs (dict, *optional*): Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function. ''' pass def _set_gradient_checkpointing(self, enable: bool=True, gradient_checkpointing_func: Callable=checkpoint): pass def gradient_checkpointing_disable(self): ''' Deactivates gradient checkpointing for the current model. ''' pass @property def is_gradient_checkpointing(self) -> bool: ''' Whether gradient checkpointing is activated for this model or not. ''' pass def save_pretrained(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]='5GB', safe_serialization: bool=True, variant: Optional[str]=None, token: Optional[Union[str, bool]]=None, save_peft_format: bool=True, **kwargs): ''' Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~PreTrainedModel.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. state_dict (nested dictionary of `torch.Tensor`): The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism). save_function (`Callable`): The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). We default it to 5GB in order for models to be able to run easily on free-tier google colab instances without CPU OOM issues. <Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip> safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model.<variant>.bin. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). save_peft_format (`bool`, *optional*, defaults to `True`): For backward compatibility with PEFT library, in case adapter weights are attached to the model, all keys of the state dict of adapters needs to be prepended with `base_model.model`. Advanced users can disable this behaviours by setting `save_peft_format` to `False`. kwargs (`dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. ''' pass @wraps(PushToHubMixin.push_to_hub) def push_to_hub(self, *args, **kwargs): pass def get_memory_footprint(self, return_buffers=True): ''' Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2 Arguments: return_buffers (`bool`, *optional*, defaults to `True`): Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 ''' pass @wraps(torch.nn.Module.cuda) def cuda(self, *args, **kwargs): pass @wraps(torch.nn.Module.to) def to(self, *args, **kwargs): pass def half(self, *args): pass def float(self, *args): pass @classmethod def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool): pass @classmethod @restore_default_dtype def from_pretrained(cls: type[SpecificPreTrainedModelType], pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, ignore_mismatched_sizes: bool=False, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', use_safetensors: Optional[bool]=None, weights_only: bool=True, **kwargs) -> SpecificPreTrainedModelType: ''' Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - `None` if you are both providing the configuration and state dictionary (resp. with keyword arguments `config` and `state_dict`). model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): Can be either: - an instance of a class derived from [`PretrainedConfig`], - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. state_dict (`dict[str, torch.Tensor]`, *optional*): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and [`~PreTrainedModel.from_pretrained`] is not a simpler option. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> attn_implementation (`str`, *optional*): The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)), or `"flash_attention_3"` (using [Dao-AILab/flash-attention/hopper](https://github.com/Dao-AILab/flash-attention/tree/main/hopper)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. Accept HF kernel references in the form: <namespace>/<repo_name>[@<revision>][:<kernel_name>] - <namespace> and <repo_name> are any non-"/" and non-":" sequences. - "@<revision>" is optional (branch, tag, or commit-ish), e.g. "@main", "@v1.2.0", "@abc123". - ":<kernel_name>" is optional and selects a function inside the kernel repo. - Both options can appear together and in this order only: @revision first, then :kernel_name. - We intentionally allow a leading "<wrapper>|" prefix (e.g., "flash|...") because the code strips it before loading; '|' is not excluded in the character classes here. Examples that match: "org/model" "org/model@main" "org/model:custom_kernel" "org/model@v1.2.3:custom_kernel" > Parameters for big model inference dtype (`str` or `torch.dtype`, *optional*): Override the default `torch_dtype` and load the model under a specific `dtype`. The different options are: 1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified `dtype`, ignoring the model's `config.dtype` if one exists. If not specified - the model will get loaded in `torch.float` (fp32). 2. `"auto"` - A `dtype` or `torch_dtype` entry in the `config.json` file of the model will be attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in the checkpoint that's of a floating point type and use that as `dtype`. This will load the model using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. 3. A string that is a valid `torch.dtype`. E.g. "float32" loads the model in `torch.float32`, "float16" loads in `torch.float16` etc. <Tip> For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or reach out to the authors and ask them to add this information to the model's card and to insert the `dtype` or `torch_dtype` entry in `config.json` on the hub. </Tip> device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory if using `device_map`. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. tp_plan (`str`, *optional*): A torch tensor parallel plan, see [here](https://pytorch.org/tutorials/intermediate/TP_tutorial.html). Currently, it only accepts `tp_plan="auto"` to use predefined plan based on the model. Note that if you use it, you should launch your script accordingly with `torchrun [args] script.py`. This will be much faster than using a `device_map`, but has limitations. tp_size (`str`, *optional*): A torch tensor parallel degree. If not provided would default to world size. device_mesh (`torch.distributed.DeviceMesh`, *optional*): A torch device mesh. If not provided would default to world size. Used only for tensor parallel for now. If provided, it has to contain dimension named `"tp"` in case it's > 1 dimensional, this dimension will be used for tensor parallelism offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. offload_buffers (`bool`, *optional*): Whether or not to offload the buffers with the model parameters. quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*): A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g bitsandbytes, gptq). There may be other quantization-related kwargs, including `load_in_4bit` and `load_in_8bit`, which are parsed by QuantizationConfigParser. Supported only for bitsandbytes quantizations and not preferred. consider inserting all such arguments into quantization_config instead. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. variant (`str`, *optional*): If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. use_safetensors (`bool`, *optional*, defaults to `None`): Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` is not installed, it will be set to `False`. weights_only (`bool`, *optional*, defaults to `True`): Indicates whether unpickler should be restricted to loading only tensors, primitive types, dictionaries and any types added via torch.serialization.add_safe_globals(). When set to False, we can load wrapper tensor subclass weights. key_mapping (`dict[str, str], *optional*): A potential mapping of the weight names if using a model on the Hub which is compatible to a Transformers architecture, but was not converted accordingly. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```python >>> from transformers import BertConfig, BertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = BertModel.from_pretrained("./test/saved_model/") >>> # Update configuration during loading. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) >>> assert model.config.output_attentions == True ``` ''' pass def _assign_original_dtype(module): pass @staticmethod def _fix_state_dict_key_on_load(key: str) -> tuple[str, bool]: '''Replace legacy parameter names with their modern equivalents. E.g. beta -> bias, gamma -> weight.''' pass def _get_key_renaming_mapping(self, checkpoint_keys: list[str], key_mapping: Optional[dict[str, str]]=None, loading_base_model_from_task_state_dict: bool=False, loading_task_model_from_base_state_dict: bool=False): ''' Compute a mapping between the serialized keys on disk `checkpoint_keys`, and the keys that the model that we are loading expects. This is the single entry point for key renaming that will be used during loading. Log if any parameters have been renamed. ''' pass @staticmethod def _fix_state_dict_key_on_save(key) -> tuple[str, bool]: ''' Similar to `_fix_state_dict_key_on_load` allows to define hook for state dict key renaming on model save. Do nothing by default, but can be overridden in particular models. ''' pass def _fix_state_dict_keys_on_save(self, state_dict): ''' Similar to `_fix_state_dict_keys_on_load` allows to define hook for state dict key renaming on model save. Apply `_fix_state_dict_key_on_save` to all keys in `state_dict`. ''' pass @classmethod def _load_pretrained_model(cls, model: 'PreTrainedModel', state_dict: Optional[dict], checkpoint_files: Optional[list[str]], pretrained_model_name_or_path: Optional[str], ignore_mismatched_sizes: bool=False, sharded_metadata: Optional[dict]=None, device_map: Optional[dict]=None, disk_offload_folder: Optional[str]=None, offload_state_dict: Optional[bool]=None, dtype: Optional[torch.dtype]=None, hf_quantizer: Optional[HfQuantizer]=None, keep_in_fp32_regex: Optional[re.Pattern]=None, device_mesh: Optional['torch.distributed.device_mesh.DeviceMesh']=None, key_mapping: Optional[dict[str, str]]=None, weights_only: bool=True): pass def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): pass @classmethod def register_for_auto_class(cls, auto_class='AutoModel'): ''' Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): The auto class to register this new model with. ''' pass def to_bettertransformer(self) -> 'PreTrainedModel': ''' Converts the model to use [PyTorch's native attention implementation](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html), integrated to Transformers through [Optimum library](https://huggingface.co/docs/optimum/bettertransformer/overview). Only a subset of all Transformers models are supported. PyTorch's attention fastpath allows to speed up inference through kernel fusions and the use of [nested tensors](https://pytorch.org/docs/stable/nested.html). Detailed benchmarks can be found in [this blog post](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2). Returns: [`PreTrainedModel`]: The model converted to BetterTransformer. ''' pass def reverse_bettertransformer(self): ''' Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is used, for example in order to save the model. Returns: [`PreTrainedModel`]: The model converted back to the original modeling. ''' pass def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): ''' Shows a one-time warning if the input_ids appear to contain padding and no attention mask was given. ''' pass @property def supports_tp_plan(self): ''' Returns whether the model has a tensor parallelism plan. ''' pass @property def tp_size(self): ''' Returns the model's tensor parallelism degree. ''' pass @property def supports_pp_plan(self): pass @property def loss_function(self): pass @loss_function.setter def loss_function(self): pass def kernelize(self): pass @property def use_kernels(self) -> bool: pass @use_kernels.setter def use_kernels(self) -> bool: pass def get_compiled_call(self, compile_config: Optional[CompileConfig]) -> Callable: '''Return a `torch.compile`'d version of `self.__call__`. This is useful to dynamically choose between non-compiled/compiled `forward` during inference, especially to switch between prefill (where we don't want to use compiled version to avoid recomputing the graph with new shapes) and iterative decoding (where we want the speed-ups of compiled version with static shapes).''' pass @classmethod def is_backend_compatible(cls): pass def _move_missing_keys_from_meta_to_cpu(self, missing_keys: list[str], unexpected_keys: list[str], dtype: Optional[torch.dtype], hf_quantizer: Optional[HfQuantizer]) -> 'PreTrainedModel': '''Move the missing keys (keys that are part of the model parameters, but were NOT found in the loaded state dicts) back from meta device to cpu. ''' pass def _initialize_missing_keys(self, loaded_keys: list[str], ignore_mismatched_sizes: bool, is_quantized: bool) -> 'PreTrainedModel': '''Initialize the missing keys (keys that are part of the model parameters, but were NOT found in the loaded state dicts), according to `_initialize_weights`. Indeed, since the corresponding weights are missing from the state dict, they will not be replaced and need to be initialized correctly (i.e. weight initialization distribution). Also take care of setting the `_is_hf_initialized` flag for keys that are not missing. ''' pass def get_parameter_or_buffer(self, target: str): ''' Return the parameter or buffer given by `target` if it exists, otherwise throw an error. This combines `get_parameter()` and `get_buffer()` in a single handy function. If the target is an `_extra_state` attribute, it will return the extra state provided by the module. Note that it only work if `target` is a leaf of the model. ''' pass def train(self, mode: bool=True): pass def eval(self): pass
125
54
56
6
37
12
8
0.35
5
30
6
31
51
13
67
129
3,998
507
2,590
572
2,354
913
1,476
423
1,384
146
1
7
574
488
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/configuration_albert.py
transformers.models.albert.configuration_albert.AlbertConfig
from ...configuration_utils import PretrainedConfig class AlbertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ALBERT [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30000): Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`]. embedding_size (`int`, *optional*, defaults to 128): Dimensionality of vocabulary embeddings. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_hidden_groups (`int`, *optional*, defaults to 1): Number of groups for the hidden layers, parameters in the same group are shared. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 16384): The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. inner_group_num (`int`, *optional*, defaults to 1): The number of inner repetition of attention and ffn. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for attached classifiers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 3): End of stream token id. Examples: ```python >>> from transformers import AlbertConfig, AlbertModel >>> # Initializing an ALBERT-xxlarge style configuration >>> albert_xxlarge_configuration = AlbertConfig() >>> # Initializing an ALBERT-base style configuration >>> albert_base_configuration = AlbertConfig( ... hidden_size=768, ... num_attention_heads=12, ... intermediate_size=3072, ... ) >>> # Initializing a model (with random weights) from the ALBERT-base style configuration >>> model = AlbertModel(albert_xxlarge_configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'albert' def __init__(self, vocab_size=30000, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, hidden_act='gelu_new', hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout_prob=0.1, position_embedding_type='absolute', pad_token_id=0, bos_token_id=2, eos_token_id=3, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout_prob = classifier_dropout_prob self.position_embedding_type = position_embedding_type
class AlbertConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ALBERT [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30000): Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`]. embedding_size (`int`, *optional*, defaults to 128): Dimensionality of vocabulary embeddings. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_hidden_groups (`int`, *optional*, defaults to 1): Number of groups for the hidden layers, parameters in the same group are shared. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 16384): The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. inner_group_num (`int`, *optional*, defaults to 1): The number of inner repetition of attention and ffn. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for attached classifiers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 3): End of stream token id. Examples: ```python >>> from transformers import AlbertConfig, AlbertModel >>> # Initializing an ALBERT-xxlarge style configuration >>> albert_xxlarge_configuration = AlbertConfig() >>> # Initializing an ALBERT-base style configuration >>> albert_base_configuration = AlbertConfig( ... hidden_size=768, ... num_attention_heads=12, ... intermediate_size=3072, ... ) >>> # Initializing a model (with random weights) from the ALBERT-base style configuration >>> model = AlbertModel(albert_xxlarge_configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30000, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, hidden_act='gelu_new', hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout_prob=0.1, position_embedding_type='absolute', pad_token_id=0, bos_token_id=2, eos_token_id=3, **kwargs): pass
2
1
43
1
42
0
1
1.61
1
1
0
0
1
17
1
1
126
11
44
43
19
71
21
20
19
1
1
0
1
489
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/configuration_albert.py
transformers.models.albert.configuration_albert.AlbertOnnxConfig
from collections import OrderedDict from collections.abc import Mapping from ...onnx import OnnxConfig class AlbertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
class AlbertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass
3
0
12
0
12
0
2
0
1
3
0
0
1
0
1
1
14
0
14
4
11
0
6
3
4
2
1
1
2
490
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertAttention
from ...processing_utils import Unpack from .configuration_albert import AlbertConfig from torch import nn from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer import torch from typing import Callable, Optional, Union from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging class AlbertAttention(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}') self.config = config self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = config.hidden_size // config.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.scaling = self.attention_head_size ** (-0.5) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_causal = False def prune_heads(self, heads: list[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads) self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.attention_head_size) query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2) key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': if self.position_embedding_type != 'absolute': raise ValueError(f'You are using {self.config._attn_implementation} as attention type. However, non-absolute positional embeddings can not work with them. Please load the model with `attn_implementation="eager"`.') attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.attention_dropout.p, scaling=self.scaling, head_mask=head_mask, use_cache=False, **kwargs) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.dense(attn_output) attn_output = self.output_dropout(attn_output) attn_output = self.LayerNorm(hidden_states + attn_output) return (attn_output, attn_weights)
class AlbertAttention(nn.Module): def __init__(self, config: AlbertConfig): pass def prune_heads(self, heads: list[int]) -> None: pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]: pass
4
0
26
4
20
2
3
0.12
1
7
1
1
4
15
4
14
110
19
82
48
71
10
70
42
65
7
1
2
13
491
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertEmbeddings
import torch from typing import Callable, Optional, Union from torch import nn from .configuration_albert import AlbertConfig class AlbertEmbeddings(nn.Module): """ Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config: AlbertConfig): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class AlbertEmbeddings(nn.Module): ''' Construct the embeddings from word, position and token_type embeddings. ''' def __init__(self, config: AlbertConfig): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass
3
1
30
4
23
3
4
0.21
1
4
1
0
2
6
2
12
66
9
47
23
37
10
34
16
31
7
1
2
8
492
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertForMaskedLM
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from ...utils.generic import can_return_tuple, check_model_inputs from typing import Callable, Optional, Union from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging from ...processing_utils import Unpack @auto_docstring class AlbertForMaskedLM(AlbertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.bias', 'predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config, add_pooling_layer=False) self.predictions = AlbertMLMHead(config) self.post_init() def get_output_embeddings(self) -> nn.Linear: return self.predictions.decoder def set_output_embeddings(self, new_embeddings: nn.Linear) -> None: self.predictions.decoder = new_embeddings self.predictions.bias = new_embeddings.bias def get_input_embeddings(self) -> nn.Embedding: return self.albert.embeddings.word_embeddings @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[MaskedLMOutput, tuple]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example: ```python >>> import torch >>> from transformers import AutoTokenizer, AlbertForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2") >>> model = AlbertForMaskedLM.from_pretrained("albert/albert-base-v2") >>> # add mask_token >>> inputs = tokenizer("The capital of [MASK] is Paris.", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) 'france' ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 0.81 ``` """ outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs) sequence_outputs = outputs[0] prediction_scores = self.predictions(sequence_outputs) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class AlbertForMaskedLM(AlbertPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self) -> nn.Linear: pass def set_output_embeddings(self, new_embeddings: nn.Linear) -> None: pass def get_input_embeddings(self) -> nn.Embedding: pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[MaskedLMOutput, tuple]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example: ```python >>> import torch >>> from transformers import AutoTokenizer, AlbertForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2") >>> model = AlbertForMaskedLM.from_pretrained("albert/albert-base-v2") >>> # add mask_token >>> inputs = tokenizer("The capital of [MASK] is Paris.", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) 'france' ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 0.81 ``` ''' pass
9
1
19
3
10
6
2
0.55
2
5
3
0
5
2
5
6
106
19
56
28
36
31
27
15
21
5
2
1
9
493
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertForMultipleChoice
from torch import nn import torch from typing import Callable, Optional, Union from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging from ...processing_utils import Unpack from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from .configuration_albert import AlbertConfig from ...utils.generic import can_return_tuple, check_model_inputs from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[AlbertForPreTrainingOutput, tuple]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.albert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits: torch.Tensor = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[AlbertForPreTrainingOutput, tuple]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) ''' pass
6
1
37
4
29
4
6
0.11
1
7
4
0
2
3
2
3
81
9
65
27
44
7
28
14
25
11
2
1
12
494
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertForPreTraining
from typing import Callable, Optional, Union from ...utils.generic import can_return_tuple, check_model_inputs from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...processing_utils import Unpack from torch import nn import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging from .configuration_albert import AlbertConfig @auto_docstring(custom_intro='\n Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `sentence order prediction (classification)` head.\n ') class AlbertForPreTraining(AlbertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.bias', 'predictions.decoder.weight'] def __init__(self, config: AlbertConfig): super().__init__(config) self.albert = AlbertModel(config) self.predictions = AlbertMLMHead(config) self.sop_classifier = AlbertSOPHead(config) self.post_init() def get_output_embeddings(self) -> nn.Linear: return self.predictions.decoder def set_output_embeddings(self, new_embeddings: nn.Linear) -> None: self.predictions.decoder = new_embeddings def get_input_embeddings(self) -> nn.Embedding: return self.albert.embeddings.word_embeddings @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, sentence_order_label: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[AlbertForPreTrainingOutput, tuple]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then sequence B), `1` indicates switched order (sequence B, then sequence A). Example: ```python >>> from transformers import AutoTokenizer, AlbertForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2") >>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) >>> # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> sop_logits = outputs.sop_logits ```""" outputs = self.albert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(sequence_output) sop_scores = self.sop_classifier(pooled_output) total_loss = None if labels is not None and sentence_order_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1)) total_loss = masked_lm_loss + sentence_order_loss return AlbertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `sentence order prediction (classification)` head.\n ') class AlbertForPreTraining(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): pass def get_output_embeddings(self) -> nn.Linear: pass def set_output_embeddings(self, new_embeddings: nn.Linear) -> None: pass def get_input_embeddings(self) -> nn.Embedding: pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, sentence_order_label: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[AlbertForPreTrainingOutput, tuple]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then sequence B), `1` indicates switched order (sequence B, then sequence A). Example: ```python >>> from transformers import AutoTokenizer, AlbertForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2") >>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) >>> # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> sop_logits = outputs.sop_logits ```''' pass
9
1
19
3
11
5
2
0.38
1
7
5
0
5
3
5
6
103
19
61
33
40
23
30
19
24
5
2
1
9
495
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging import torch from typing import Callable, Optional, Union from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n Output type of [`AlbertForPreTraining`].\n ') class AlbertForPreTrainingOutput(ModelOutput): """ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None sop_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`AlbertForPreTraining`].\n ') class AlbertForPreTrainingOutput(ModelOutput): ''' loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). ''' pass
3
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
31
4
6
6
5
21
6
6
5
0
1
0
0
496
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertForQuestionAnswering
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from .configuration_albert import AlbertConfig from torch import nn from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging from typing import Callable, Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from ...processing_utils import Unpack from ...utils.generic import can_return_tuple, check_model_inputs @auto_docstring class AlbertForQuestionAnswering(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[AlbertForPreTrainingOutput, tuple]: outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs) sequence_output = outputs[0] logits: torch.Tensor = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class AlbertForQuestionAnswering(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[AlbertForPreTrainingOutput, tuple]: pass
6
0
41
5
30
7
4
0.18
1
7
4
0
2
3
2
3
94
10
71
30
45
13
32
16
29
7
2
2
8
497
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertForSequenceClassification
from .configuration_albert import AlbertConfig from ...utils.generic import can_return_tuple, check_model_inputs from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging from typing import Callable, Optional, Union from ...processing_utils import Unpack from torch import nn @auto_docstring(custom_intro='\n Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ') class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[SequenceClassifierOutput, tuple]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ') class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[SequenceClassifierOutput, tuple]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
6
1
41
5
33
4
7
0.09
1
6
3
0
2
5
2
3
92
10
75
27
52
7
35
14
32
12
2
3
13
498
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertForTokenClassification
from .configuration_albert import AlbertConfig from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging from ...processing_utils import Unpack from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils.generic import can_return_tuple, check_model_inputs from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from typing import Callable, Optional, Union from torch import nn @auto_docstring class AlbertForTokenClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) classifier_dropout_prob = config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[TokenClassifierOutput, tuple]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.albert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class AlbertForTokenClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[TokenClassifierOutput, tuple]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
6
1
33
4
27
3
4
0.08
1
5
3
0
2
4
2
3
74
9
60
27
39
5
23
14
20
5
2
1
7
499
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/albert/modeling_albert.py
transformers.models.albert.modeling_albert.AlbertLayer
from ...activations import ACT2FN from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer import torch from torch import nn from ...processing_utils import Unpack from typing import Callable, Optional, Union from .configuration_albert import AlbertConfig class AlbertLayer(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = AlbertAttention(config) self.ffn = nn.Linear(config.hidden_size, config.intermediate_size) self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size) self.activation = ACT2FN[config.hidden_act] self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]: attention_output, _ = self.attention(hidden_states, attention_mask, head_mask, **kwargs) ffn_output = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) hidden_states = self.full_layer_layer_norm(ffn_output + attention_output) return hidden_states def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor: ffn_output = self.ffn(attention_output) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) return ffn_output
class AlbertLayer(nn.Module): def __init__(self, config: AlbertConfig): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]: pass def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor: pass
4
0
12
1
11
0
1
0.03
1
4
1
0
3
9
3
13
39
5
34
23
23
1
22
16
18
1
1
0
3