Feature Extraction
Transformers
Safetensors
Fairseq
French
pantagruel_uni
fill-mask
data2vec2
JEPA
text
custom_code
flaubert commited on
Commit
23ab6c6
·
verified ·
1 Parent(s): e8d86a4

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_pantagruel_uni.py +6 -8
modeling_pantagruel_uni.py CHANGED
@@ -23,6 +23,7 @@
23
 
24
  """ PantagruelUni model."""
25
  import math
 
26
  import warnings
27
  from typing import Optional, Tuple, Dict, List, Callable, Any, Union
28
  from functools import partial
@@ -40,10 +41,7 @@ from transformers import PreTrainedModel
40
  from transformers.utils import (
41
  ModelOutput, TransformersKwargs, auto_docstring
42
  )
43
- from transformers.activations import ACT2FN, gelu
44
- from transformers.modeling_attn_mask_utils import (
45
- _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
46
- )
47
  from transformers.utils.generic import can_return_tuple
48
  from transformers.processing_utils import Unpack
49
  from transformers.modeling_outputs import (
@@ -55,7 +53,7 @@ from transformers.modeling_outputs import (
55
  CausalLMOutput,
56
  XVectorOutput,
57
  )
58
- from transformers.utils import auto_docstring, is_peft_available
59
  from .configuration_pantagruel_uni import (
60
  PantagruelUniConfig,
61
  PantagruelModalityConfig,
@@ -1769,7 +1767,7 @@ class PantagruelUniForMaskedLM(PantagruelUniPreTrainedModel):
1769
  super().__init__(config)
1770
 
1771
  if config.is_decoder:
1772
- logger.warning(
1773
  "If you want to use `PantagruelTextForMaskedLM` make sure `config.is_decoder=False` for "
1774
  "bi-directional self-attention."
1775
  )
@@ -1845,10 +1843,10 @@ class PantagruelUniForSequenceClassification(PantagruelUniPreTrainedModel):
1845
  self.pantagruel_uni = PantagruelUniModel(config, add_pooling_layer=False)
1846
 
1847
  if config.supported_modality == "TEXT":
1848
- logger.info("Initializing PantagruelUniForSequenceClassification for TEXT")
1849
  self.classifier = PantagruelTextClassificationHead(config)
1850
  elif config.supported_modality == "AUDIO":
1851
- logger.info("Initializing PantagruelUniForSequenceClassification for AUDIO")
1852
  num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1853
  if config.modalities.audio.use_weighted_layer_sum:
1854
  self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
 
23
 
24
  """ PantagruelUni model."""
25
  import math
26
+ import logging
27
  import warnings
28
  from typing import Optional, Tuple, Dict, List, Callable, Any, Union
29
  from functools import partial
 
41
  from transformers.utils import (
42
  ModelOutput, TransformersKwargs, auto_docstring
43
  )
44
+ from transformers.activations import gelu
 
 
 
45
  from transformers.utils.generic import can_return_tuple
46
  from transformers.processing_utils import Unpack
47
  from transformers.modeling_outputs import (
 
53
  CausalLMOutput,
54
  XVectorOutput,
55
  )
56
+ from transformers.utils import is_peft_available
57
  from .configuration_pantagruel_uni import (
58
  PantagruelUniConfig,
59
  PantagruelModalityConfig,
 
1767
  super().__init__(config)
1768
 
1769
  if config.is_decoder:
1770
+ logging.warning(
1771
  "If you want to use `PantagruelTextForMaskedLM` make sure `config.is_decoder=False` for "
1772
  "bi-directional self-attention."
1773
  )
 
1843
  self.pantagruel_uni = PantagruelUniModel(config, add_pooling_layer=False)
1844
 
1845
  if config.supported_modality == "TEXT":
1846
+ logging.info("Initializing PantagruelUniForSequenceClassification for TEXT")
1847
  self.classifier = PantagruelTextClassificationHead(config)
1848
  elif config.supported_modality == "AUDIO":
1849
+ logging.info("Initializing PantagruelUniForSequenceClassification for AUDIO")
1850
  num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1851
  if config.modalities.audio.use_weighted_layer_sum:
1852
  self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)