Safetensors
custom_code
jonggwon-park commited on
Commit
2ba7893
·
1 Parent(s): 8250fed

auto model bug fix

Browse files
Files changed (3) hide show
  1. align_transformers.py +1 -1
  2. config.json +3 -3
  3. text_encoders.py +0 -3
align_transformers.py CHANGED
@@ -3,7 +3,7 @@ from torch import nn
3
  from transformers import PreTrainedModel
4
  from transformers.models.dinov2.modeling_dinov2 import Dinov2Encoder
5
 
6
- from .configuration import AlignTransformerConfig
7
 
8
 
9
  def build_align_transformer(config):
 
3
  from transformers import PreTrainedModel
4
  from transformers.models.dinov2.modeling_dinov2 import Dinov2Encoder
5
 
6
+ from .configuration_radzero import AlignTransformerConfig
7
 
8
 
9
  def build_align_transformer(config):
config.json CHANGED
@@ -44,7 +44,7 @@
44
  "max_length": 20,
45
  "min_length": 0,
46
  "mlp_ratio": 4,
47
- "model_type": "",
48
  "no_repeat_ngram_size": 0,
49
  "num_attention_heads": 12,
50
  "num_beam_groups": 1,
@@ -206,7 +206,7 @@
206
  "length_penalty": 1.0,
207
  "max_length": 20,
208
  "min_length": 0,
209
- "model_type": "",
210
  "no_repeat_ngram_size": 0,
211
  "num_beam_groups": 1,
212
  "num_beams": 1,
@@ -289,7 +289,7 @@
289
  "max_length": 20,
290
  "min_length": 0,
291
  "mlp_ratio": 4,
292
- "model_type": "",
293
  "no_repeat_ngram_size": 0,
294
  "num_attention_heads": 12,
295
  "num_beam_groups": 1,
 
44
  "max_length": 20,
45
  "min_length": 0,
46
  "mlp_ratio": 4,
47
+ "model_type": "align_transformer",
48
  "no_repeat_ngram_size": 0,
49
  "num_attention_heads": 12,
50
  "num_beam_groups": 1,
 
206
  "length_penalty": 1.0,
207
  "max_length": 20,
208
  "min_length": 0,
209
+ "model_type": "mpnet",
210
  "no_repeat_ngram_size": 0,
211
  "num_beam_groups": 1,
212
  "num_beams": 1,
 
289
  "max_length": 20,
290
  "min_length": 0,
291
  "mlp_ratio": 4,
292
+ "model_type": "dinov2",
293
  "no_repeat_ngram_size": 0,
294
  "num_attention_heads": 12,
295
  "num_beam_groups": 1,
text_encoders.py CHANGED
@@ -1,8 +1,5 @@
1
- import open_clip
2
  import torch
3
  from transformers import AutoModel
4
- from transformers.models.clip.modeling_clip import CLIPTextModel
5
- from transformers.models.siglip.modeling_siglip import SiglipTextModel
6
 
7
 
8
  def build_text_encoder(config):
 
 
1
  import torch
2
  from transformers import AutoModel
 
 
3
 
4
 
5
  def build_text_encoder(config):