text stringlengths 31 243k | type stringclasses 1
value | start int64 36 275k | end int64 286 280k | depth int64 0 1 | filepath stringlengths 85 188 | parent_class stringclasses 3
values | class_index int64 0 10.8k |
|---|---|---|---|---|---|---|---|
class MobileViTV2InvertedResidual(nn.Module):
"""
Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
"""
def __init__(
self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1
) -> None:
super().__init__()
expa... | class_definition | 4,985 | 6,510 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,200 |
class MobileViTV2MobileNetLayer(nn.Module):
def __init__(
self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
) -> None:
super().__init__()
self.layer = nn.ModuleList()
for i in range(num_stages):
layer = MobileV... | class_definition | 6,628 | 7,389 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,201 |
class MobileViTV2LinearSelfAttention(nn.Module):
"""
This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
https://arxiv.org/abs/2206.02680
Args:
config (`MobileVitv2Config`):
Model configuration object
embed_dim (`int`):
... | class_definition | 7,392 | 10,144 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,202 |
class MobileViTV2FFN(nn.Module):
def __init__(
self,
config: MobileViTV2Config,
embed_dim: int,
ffn_latent_dim: int,
ffn_dropout: float = 0.0,
) -> None:
super().__init__()
self.conv1 = MobileViTV2ConvLayer(
config=config,
in_channe... | class_definition | 10,147 | 11,373 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,203 |
class MobileViTV2TransformerLayer(nn.Module):
def __init__(
self,
config: MobileViTV2Config,
embed_dim: int,
ffn_latent_dim: int,
dropout: float = 0.0,
) -> None:
super().__init__()
self.layernorm_before = nn.GroupNorm(num_groups=1, num_channels=embed_dim,... | class_definition | 11,376 | 12,477 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,204 |
class MobileViTV2Transformer(nn.Module):
def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None:
super().__init__()
ffn_multiplier = config.ffn_multiplier
ffn_dims = [ffn_multiplier * d_model] * n_layers
# ensure that dims are multiple of 16
ffn... | class_definition | 12,480 | 13,323 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,205 |
class MobileViTV2Layer(nn.Module):
"""
MobileViTV2 layer: https://arxiv.org/abs/2206.02680
"""
def __init__(
self,
config: MobileViTV2Config,
in_channels: int,
out_channels: int,
attn_unit_dim: int,
n_attn_blocks: int = 2,
dilation: int = 1,
... | class_definition | 13,326 | 17,238 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,206 |
class MobileViTV2Encoder(nn.Module):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList()
self.gradient_checkpointing = False
# segmentation architectures like DeepLab and PSPNet modify the strides
... | class_definition | 17,241 | 21,014 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,207 |
class MobileViTV2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MobileViTV2Config
base_model_prefix = "mobilevitv2"
main_input_name = "pixel_values"
support... | class_definition | 21,156 | 22,186 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,208 |
class MobileViTV2Model(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config, expand_output: bool = True):
super().__init__(config)
self.config = config
self.expand_output = expand_output
layer_0_dim = make_divisible(
clip(value=32 * config.width_mult... | class_definition | 23,595 | 26,852 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,209 |
class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevitv2 = MobileViTV2Model(config)
out_channels = make_divisible(512 * config.width_mult... | class_definition | 27,064 | 30,514 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,210 |
class MobileViTV2ASPPPooling(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None:
super().__init__()
self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_1x1 = MobileViTV2ConvLayer(
config,
in_channels=i... | class_definition | 30,629 | 31,461 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,211 |
class MobileViTV2ASPP(nn.Module):
"""
ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
"""
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
encoder_out_channels = make_divisible(512 * config.width_multipl... | class_definition | 31,464 | 33,416 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,212 |
class MobileViTV2DeepLabV3(nn.Module):
"""
DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
"""
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
self.aspp = MobileViTV2ASPP(config)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
... | class_definition | 33,529 | 34,366 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,213 |
class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevitv2 = MobileViTV2Model(config, expand_output=False)
self.segmentation_head = MobileV... | class_definition | 34,529 | 38,204 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | null | 6,214 |
class ModernBertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yi... | class_definition | 1,567 | 11,276 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/configuration_modernbert.py | null | 6,215 |
class ApplyRotaryEmbUnpad(torch.autograd.Function):
@staticmethod
def forward(
ctx,
qkv,
cos,
sin,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
):
# (total_nnz, 3, nheads, headdim)
qkv = qkv.contiguous()
... | class_definition | 2,704 | 4,358 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,216 |
class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding):
"""
The rotary position embeddings applied directly to unpadded sequences.
"""
def __init__(
self,
dim: int,
base: float = 10000.0,
max_seqlen: Optional[int] = None,
device: Optional[torch.device] = None,
... | class_definition | 5,301 | 7,149 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,217 |
class ModernBertEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_s... | class_definition | 7,152 | 8,345 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,218 |
class ModernBertMLP(nn.Module):
"""Applies the GLU at the end of each ModernBERT layer.
Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate`
and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that ha... | class_definition | 8,348 | 9,300 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,219 |
class ModernBertRotaryEmbedding(nn.Module):
def __init__(self, config: ModernBertConfig, dim: int, base: float, device: Optional[torch.device] = None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
... | class_definition | 9,303 | 12,569 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,220 |
class ModernBertAttention(nn.Module):
"""Performs multi-headed self attention on a batch of unpadded sequences.
If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput.
If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel,
which requ... | class_definition | 18,468 | 21,868 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,221 |
class ModernBertEncoderLayer(nn.Module):
def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None):
super().__init__()
self.config = config
if layer_id == 0:
self.attn_norm = nn.Identity()
else:
self.attn_norm = nn.LayerNorm(config.hidden_si... | class_definition | 21,871 | 23,733 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,222 |
class ModernBertPreTrainedModel(PreTrainedModel):
config_class = ModernBertConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["ModernBertEmbeddings", "ModernBertEncoderLayer"]
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_flex_attn ... | class_definition | 24,775 | 30,045 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,223 |
class ModernBertModel(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.embeddings = ModernBertEmbeddings(config)
self.layers = nn.ModuleList(
[ModernBertEncoderLayer(config, layer_id) for layer_id... | class_definition | 36,786 | 44,528 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,224 |
class ModernBertPredictionHead(nn.Module):
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias)
self.act = ACT2FN[config.classifier_activation]
self.norm = ... | class_definition | 44,531 | 45,058 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,225 |
class ModernBertForMaskedLM(ModernBertPreTrainedModel):
_tied_weights_keys = ["decoder.weight"]
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
s... | class_definition | 45,216 | 50,560 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,226 |
class ModernBertForSequenceClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(con... | class_definition | 50,714 | 55,376 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,227 |
class ModernBertForTokenClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.n... | class_definition | 55,552 | 58,712 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modeling_modernbert.py | null | 6,228 |
class ModernBertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yi... | class_definition | 1,955 | 11,664 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,229 |
class ApplyRotaryEmbUnpad(torch.autograd.Function):
@staticmethod
def forward(
ctx,
qkv,
cos,
sin,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
):
# (total_nnz, 3, nheads, headdim)
qkv = qkv.contiguous()
... | class_definition | 14,380 | 16,034 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,230 |
class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding):
"""
The rotary position embeddings applied directly to unpadded sequences.
"""
def __init__(
self,
dim: int,
base: float = 10000.0,
max_seqlen: Optional[int] = None,
device: Optional[torch.device] = None,
... | class_definition | 16,977 | 18,825 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,231 |
class ModernBertEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_s... | class_definition | 18,828 | 20,021 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,232 |
class ModernBertMLP(nn.Module):
"""Applies the GLU at the end of each ModernBERT layer.
Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate`
and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that ha... | class_definition | 20,024 | 20,976 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,233 |
class ModernBertRotaryEmbedding(GemmaRotaryEmbedding):
def __init__(self, config: ModernBertConfig, dim: int, base: float, device: Optional[torch.device] = None):
super().__init__(self, config=config, device=device)
inv_freq, self.attention_scaling = self.rope_init_fn(None, device, dim=dim, base=bas... | class_definition | 20,979 | 21,301 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,234 |
class ModernBertAttention(nn.Module):
"""Performs multi-headed self attention on a batch of unpadded sequences.
If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput.
If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel,
which requ... | class_definition | 25,475 | 28,875 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,235 |
class ModernBertEncoderLayer(nn.Module):
def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None):
super().__init__()
self.config = config
if layer_id == 0:
self.attn_norm = nn.Identity()
else:
self.attn_norm = nn.LayerNorm(config.hidden_si... | class_definition | 28,878 | 30,740 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,236 |
class ModernBertPreTrainedModel(PreTrainedModel):
config_class = ModernBertConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["ModernBertEmbeddings", "ModernBertEncoderLayer"]
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_flex_attn ... | class_definition | 31,782 | 37,052 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,237 |
class ModernBertModel(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.embeddings = ModernBertEmbeddings(config)
self.layers = nn.ModuleList(
[ModernBertEncoderLayer(config, layer_id) for layer_id... | class_definition | 41,080 | 48,822 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,238 |
class ModernBertPredictionHead(nn.Module):
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias)
self.act = ACT2FN[config.classifier_activation]
self.norm = ... | class_definition | 48,825 | 49,352 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,239 |
class ModernBertForMaskedLM(ModernBertPreTrainedModel):
_tied_weights_keys = ["decoder.weight"]
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
s... | class_definition | 49,510 | 54,854 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,240 |
class ModernBertForSequenceClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(con... | class_definition | 55,008 | 59,670 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,241 |
class ModernBertForTokenClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.n... | class_definition | 59,846 | 63,006 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/modernbert/modular_modernbert.py | null | 6,242 |
class CodeGenAttention(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.layer_idx = layer_idx
... | class_definition | 2,550 | 9,439 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/modeling_codegen.py | null | 6,243 |
class CodeGenMLP(nn.Module):
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
super().__init__()
embed_dim = config.n_embd
self.fc_in = nn.Linear(embed_dim, intermediate_size)
self.fc_out = nn.Linear(intermediate_size, embed_dim)
se... | class_definition | 9,522 | 10,258 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/modeling_codegen.py | null | 6,244 |
class CodeGenBlock(nn.Module):
# Ignore copy
def __init__(self, config, layer_idx=None):
super().__init__()
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.attn = CodeGenAt... | class_definition | 10,343 | 12,187 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/modeling_codegen.py | null | 6,245 |
class CodeGenPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CodeGenConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split... | class_definition | 12,190 | 13,602 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/modeling_codegen.py | null | 6,246 |
class CodeGenModel(CodeGenPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.n_embd
self.vocab_size = config.vocab_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
se... | class_definition | 18,736 | 31,824 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/modeling_codegen.py | null | 6,247 |
class CodeGenForCausalLM(CodeGenPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = CodeGenModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
# Initialize weight... | class_definition | 31,970 | 36,649 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/modeling_codegen.py | null | 6,248 |
class CodeGenConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a simi... | class_definition | 1,022 | 6,385 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/configuration_codegen.py | null | 6,249 |
class CodeGenOnnxConfig(OnnxConfigWithPast):
def __init__(
self,
config: PretrainedConfig,
task: str = "default",
patching_specs: List[PatchingSpec] = None,
use_past: bool = False,
):
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_... | class_definition | 6,461 | 9,491 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/configuration_codegen.py | null | 6,250 |
class CodeGenTokenizer(PreTrainedTokenizer):
"""
Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentenc... | class_definition | 2,542 | 16,529 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/tokenization_codegen.py | null | 6,251 |
class CodeGenTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be... | class_definition | 1,316 | 10,928 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/codegen/tokenization_codegen_fast.py | null | 6,252 |
class DeiTFeatureExtractor(DeiTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead.",
FutureWarning,
)... | class_definition | 809 | 1,171 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/feature_extraction_deit.py | null | 6,253 |
class DeiTEmbeddings(nn.Module):
"""
Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: DeiTConfig, use_mask_token: bool = False) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zer... | class_definition | 1,868 | 5,789 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,254 |
class DeiTPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
... | class_definition | 5,792 | 7,295 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,255 |
class DeiTSelfAttention(nn.Module):
def __init__(self, config: DeiTConfig) -> None:
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a mul... | class_definition | 7,381 | 10,223 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,256 |
class DeiTSdpaSelfAttention(DeiTSelfAttention):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
def forward(
self,
hidden_states: torch.FloatTensor,
head_mask: Optional[torch... | class_definition | 10,313 | 12,355 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,257 |
class DeiTSelfOutput(nn.Module):
"""
The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: DeiTConfig) -> None:
super().__init__()
self.dense = nn.Linear(conf... | class_definition | 12,438 | 13,084 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,258 |
class DeiTAttention(nn.Module):
def __init__(self, config: DeiTConfig) -> None:
super().__init__()
self.attention = DeiTSelfAttention(config)
self.output = DeiTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: Set[int]) -> None:
if len(heads) == 0... | class_definition | 13,166 | 14,847 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,259 |
class DeiTSdpaAttention(DeiTAttention):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.attention = DeiTSdpaSelfAttention(config) | class_definition | 14,933 | 15,112 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,260 |
class DeiTIntermediate(nn.Module):
def __init__(self, config: DeiTConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
... | class_definition | 15,197 | 15,783 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,261 |
class DeiTOutput(nn.Module):
def __init__(self, config: DeiTConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.T... | class_definition | 15,862 | 16,391 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,262 |
class DeiTLayer(nn.Module):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: DeiTConfig) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = DEIT_ATTENTION... | class_definition | 16,569 | 18,290 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,263 |
class DeiTEncoder(nn.Module):
def __init__(self, config: DeiTConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([DeiTLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
h... | class_definition | 18,370 | 20,294 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,264 |
class DeiTPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DeiTConfig
base_model_prefix = "deit"
main_input_name = "pixel_values"
supports_gradient_checkpoint... | class_definition | 20,297 | 21,423 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,265 |
class DeiTModel(DeiTPreTrainedModel):
def __init__(self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False) -> None:
super().__init__(config)
self.config = config
self.embeddings = DeiTEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = De... | class_definition | 23,474 | 27,682 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,266 |
class DeiTPooler(nn.Module):
def __init__(self, config: DeiTConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state correspond... | class_definition | 27,761 | 28,302 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,267 |
class DeiTForMaskedImageModeling(DeiTPreTrainedModel):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.deit = DeiTModel(config, add_pooling_layer=False, use_mask_token=True)
self.decoder = nn.Sequential(
nn.Conv2d(
in_channels=confi... | class_definition | 28,711 | 33,311 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,268 |
class DeiTForImageClassification(DeiTPreTrainedModel):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.deit = DeiTModel(config, add_pooling_layer=False)
# Classifier head
self.classifier = nn.Linear(config.hi... | class_definition | 33,542 | 38,321 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,269 |
class DeiTForImageClassificationWithTeacherOutput(ModelOutput):
"""
Output type of [`DeiTForImageClassificationWithTeacher`].
Args:
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Prediction scores as the average of the cls_logits and distillation logits.
... | class_definition | 38,335 | 40,246 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,270 |
class DeiTForImageClassificationWithTeacher(DeiTPreTrainedModel):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.deit = DeiTModel(config, add_pooling_layer=False)
# Classifier heads
self.cls_classifier = (
... | class_definition | 40,704 | 43,212 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_deit.py | null | 6,271 |
class DeiTImageProcessor(BaseImageProcessor):
r"""
Constructs a DeiT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in `preprocess`.... | class_definition | 1,370 | 15,143 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/image_processing_deit.py | null | 6,272 |
class TFDeiTForImageClassificationWithTeacherOutput(ModelOutput):
"""
Output type of [`DeiTForImageClassificationWithTeacher`].
Args:
logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
Prediction scores as the average of the cls_logits and distillation logits.
cls_... | class_definition | 1,897 | 3,708 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,273 |
class TFDeiTEmbeddings(keras.layers.Layer):
"""
Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: DeiTConfig, use_mask_token: bool = False, **kwargs) -> None:
super().__init__(**kwargs)
self.config... | class_definition | 3,711 | 8,451 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,274 |
class TFDeiTPatchEmbeddings(keras.layers.Layer):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, ... | class_definition | 8,454 | 10,473 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,275 |
class TFDeiTSelfAttention(keras.layers.Layer):
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number... | class_definition | 10,564 | 15,018 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,276 |
class TFDeiTSelfOutput(keras.layers.Layer):
"""
The residual connection is defined in TFDeiTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
sel... | class_definition | 15,106 | 16,240 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,277 |
class TFDeiTAttention(keras.layers.Layer):
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFDeiTSelfAttention(config, name="attention")
self.dense_output = TFDeiTSelfOutput(config, name="output")
def prune_heads(self, heads):
... | class_definition | 16,327 | 17,725 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,278 |
class TFDeiTIntermediate(keras.layers.Layer):
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if is... | class_definition | 17,815 | 18,837 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,279 |
class TFDeiTOutput(keras.layers.Layer):
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = ker... | class_definition | 18,921 | 19,935 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,280 |
class TFDeiTLayer(keras.layers.Layer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFDeiTAttention(config, name="attention")
self.intermediate = TFDeiTIntermediate(co... | class_definition | 19,938 | 22,818 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,281 |
class TFDeiTEncoder(keras.layers.Layer):
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
self.layer = [TFDeiTLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
head_mask: tf... | class_definition | 22,903 | 24,776 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,282 |
class TFDeiTMainLayer(keras.layers.Layer):
config_class = DeiTConfig
def __init__(
self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs
) -> None:
super().__init__(**kwargs)
self.config = config
self.embeddings = TFDeiTEmbeddings(... | class_definition | 24,799 | 29,508 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,283 |
class TFDeiTPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DeiTConfig
base_model_prefix = "deit"
main_input_name = "pixel_values" | class_definition | 29,612 | 29,903 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,284 |
class TFDeiTModel(TFDeiTPreTrainedModel):
def __init__(
self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs
) -> None:
super().__init__(config, **kwargs)
self.deit = TFDeiTMainLayer(
config, add_pooling_layer=add_pooling_layer, us... | class_definition | 31,950 | 33,757 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,285 |
class TFDeiTPooler(keras.layers.Layer):
def __init__(self, config: DeiTConfig, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
... | class_definition | 33,841 | 34,810 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,286 |
class TFDeitPixelShuffle(keras.layers.Layer):
"""TF layer implementation of torch.nn.PixelShuffle"""
def __init__(self, upscale_factor: int, **kwargs) -> None:
super().__init__(**kwargs)
if not isinstance(upscale_factor, int) or upscale_factor < 2:
raise ValueError(f"upscale_factor ... | class_definition | 34,813 | 36,260 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,287 |
class TFDeitDecoder(keras.layers.Layer):
def __init__(self, config: DeiTConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.conv2d = keras.layers.Conv2D(
filters=config.encoder_stride**2 * config.num_channels, kernel_size=1, name="0"
)
self.pixel_shuffle = TFDeitPi... | class_definition | 36,263 | 37,357 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,288 |
class TFDeiTForMaskedImageModeling(TFDeiTPreTrainedModel):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, use_mask_token=True, name="deit")
self.decoder = TFDeitDecoder(config, name="decoder")
@unpac... | class_definition | 37,544 | 43,172 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,289 |
class TFDeiTForImageClassification(TFDeiTPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: DeiTConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name="deit")
# Classifier head
... | class_definition | 43,403 | 47,706 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,290 |
class TFDeiTForImageClassificationWithTeacher(TFDeiTPreTrainedModel):
def __init__(self, config: DeiTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name="deit")
# Classifier heads
se... | class_definition | 48,166 | 51,568 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/modeling_tf_deit.py | null | 6,291 |
class DeiTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DeiTModel`]. It is used to instantiate an DeiT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar conf... | class_definition | 939 | 5,294 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/configuration_deit.py | null | 6,292 |
class DeiTOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.11")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
]
)
... | class_definition | 5,297 | 5,694 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/deit/configuration_deit.py | null | 6,293 |
class DPTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DPTModel`]. It is used to instantiate an DPT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configu... | class_definition | 943 | 14,041 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dpt/configuration_dpt.py | null | 6,294 |
class DPTFeatureExtractor(DPTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead.",
FutureWarning,
)
... | class_definition | 806 | 1,164 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dpt/feature_extraction_dpt.py | null | 6,295 |
class DPTImageProcessor(BaseImageProcessor):
r"""
Constructs a DPT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions. Can be overidden by `do_resize` in `preprocess`.
size (`Dict[str, int]` *option... | class_definition | 3,043 | 24,382 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dpt/image_processing_dpt.py | null | 6,296 |
class CenterPadding:
def __init__(self, multiple):
super().__init__()
self.multiple = multiple
def _get_pad(self, size):
new_size = math.ceil(size / self.multiple) * self.multiple
pad_size = new_size - size
pad_size_left = pad_size // 2
... | class_definition | 10,574 | 11,288 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dpt/convert_dinov2_depth_to_hf.py | null | 6,297 |
class BaseModelOutputWithIntermediateActivations(ModelOutput):
"""
Base class for model's outputs that also contains intermediate activations that can be used at later stages. Useful
in the context of Vision models.:
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_l... | class_definition | 1,883 | 2,630 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dpt/modeling_dpt.py | null | 6,298 |
class BaseModelOutputWithPoolingAndIntermediateActivations(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states as well as intermediate
activations that can be used by the model at later stages.
Args:
last_hidden_state (`torch.FloatTensor` of s... | class_definition | 2,644 | 5,076 | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/dpt/modeling_dpt.py | null | 6,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.