shruthib commited on
Commit
fd80a76
·
verified ·
1 Parent(s): 33f99f8

Update transformers to 4.48.0 (#17)

Browse files

- Update transformers to 4.48.0 (db05f0330e8b132d3e8e098ff4d5c71f3dd6bdc9)
- Pin transformers to <4.49 (6b647f4a57c435178c7648e1733a9a688f40170d)

README.md CHANGED
@@ -84,13 +84,10 @@ pillow
84
  protobuf
85
  sentencepiece
86
  torch
87
- transformers
88
  ```
89
 
90
- Note: You may temporarily need to install transformers from source since MAIRA-2 requires `transformers>=4.46.0.dev0`. Due to an [incompatible commit](https://github.com/huggingface/transformers/commit/0f49deacbff3e57cde45222842c0db6375e4fa43) in transformers main, the current fix is to install a transformers version from or after commit [88d960937c81a32bfb63356a2e8ecf7999619681](https://github.com/huggingface/transformers/commit/88d960937c81a32bfb63356a2e8ecf7999619681) but before commit [0f49deacbff3e57cde45222842c0db6375e4fa43](https://github.com/huggingface/transformers/commit/0f49deacbff3e57cde45222842c0db6375e4fa43).
91
- ```
92
- pip install git+https://github.com/huggingface/transformers.git@88d960937c81a32bfb63356a2e8ecf7999619681
93
- ```
94
 
95
  First, initialise the model and put it in eval mode.
96
  ```python
 
84
  protobuf
85
  sentencepiece
86
  torch
87
+ transformers>=4.48.0,<4.49
88
  ```
89
 
90
+ Note: MAIRA-2 has last been tested with transformers v4.48.0.
 
 
 
91
 
92
  First, initialise the model and put it in eval mode.
93
  ```python
config.json CHANGED
@@ -12,171 +12,53 @@
12
  "image_seq_length": 576,
13
  "image_token_index": 32204,
14
  "model_type": "maira2",
 
15
  "pad_token_id": 0,
16
  "projector_hidden_act": "gelu",
17
  "projector_n_layers": 4,
18
  "text_config": {
19
  "_name_or_path": "lmsys/vicuna-7b-v1.5",
20
- "add_cross_attention": false,
21
  "architectures": [
22
  "LlamaForCausalLM"
23
  ],
24
- "attention_bias": false,
25
- "attention_dropout": 0.0,
26
- "bad_words_ids": null,
27
- "begin_suppress_tokens": null,
28
- "bos_token_id": 1,
29
- "chunk_size_feed_forward": 0,
30
- "cross_attention_hidden_size": null,
31
- "decoder_start_token_id": null,
32
- "diversity_penalty": 0.0,
33
- "do_sample": false,
34
- "early_stopping": false,
35
- "encoder_no_repeat_ngram_size": 0,
36
- "eos_token_id": 2,
37
- "exponential_decay_length_penalty": null,
38
- "finetuning_task": null,
39
- "forced_bos_token_id": null,
40
- "forced_eos_token_id": null,
41
- "head_dim": 128,
42
- "hidden_act": "silu",
43
- "hidden_size": 4096,
44
- "id2label": {
45
- "0": "LABEL_0",
46
- "1": "LABEL_1"
47
- },
48
- "initializer_range": 0.02,
49
- "intermediate_size": 11008,
50
- "is_decoder": false,
51
- "is_encoder_decoder": false,
52
- "label2id": {
53
- "LABEL_0": 0,
54
- "LABEL_1": 1
55
- },
56
- "length_penalty": 1.0,
57
- "max_length": 20,
58
  "max_position_embeddings": 4096,
59
- "min_length": 0,
60
- "mlp_bias": false,
61
  "model_type": "llama",
62
- "no_repeat_ngram_size": 0,
63
- "num_attention_heads": 32,
64
- "num_beam_groups": 1,
65
- "num_beams": 1,
66
- "num_hidden_layers": 32,
67
- "num_key_value_heads": 32,
68
- "num_return_sequences": 1,
69
- "output_attentions": false,
70
- "output_hidden_states": false,
71
- "output_scores": false,
72
  "pad_token_id": 0,
73
- "prefix": null,
74
- "pretraining_tp": 1,
75
- "problem_type": null,
76
- "pruned_heads": {},
77
- "remove_invalid_values": false,
78
- "repetition_penalty": 1.0,
79
- "return_dict": true,
80
- "return_dict_in_generate": false,
81
  "rms_norm_eps": 1e-05,
82
  "rope_scaling": {
83
  "factor": 1.5,
84
  "rope_type": "linear"
85
  },
86
- "rope_theta": 10000.0,
87
- "sep_token_id": null,
88
- "suppress_tokens": null,
89
- "task_specific_params": null,
90
- "temperature": 1.0,
91
- "tf_legacy_loss": false,
92
- "tie_encoder_decoder": false,
93
- "tie_word_embeddings": false,
94
- "tokenizer_class": null,
95
- "top_k": 50,
96
- "top_p": 1.0,
97
  "torch_dtype": "bfloat16",
98
- "torchscript": false,
99
- "typical_p": 1.0,
100
- "use_bfloat16": false,
101
- "use_cache": true,
102
  "vocab_size": 32207
103
  },
104
  "torch_dtype": "float32",
105
- "transformers_version": "4.46.0.dev0",
106
  "vision_config": {
107
- "_name_or_path": "",
108
- "add_cross_attention": false,
109
  "apply_layernorm": true,
110
  "architectures": [
111
  "Dinov2Model"
112
  ],
113
  "attention_probs_dropout_prob": 0.0,
114
- "bad_words_ids": null,
115
- "begin_suppress_tokens": null,
116
- "bos_token_id": null,
117
- "chunk_size_feed_forward": 0,
118
- "cross_attention_hidden_size": null,
119
- "decoder_start_token_id": null,
120
- "diversity_penalty": 0.0,
121
- "do_sample": false,
122
  "drop_path_rate": 0.0,
123
- "early_stopping": false,
124
- "encoder_no_repeat_ngram_size": 0,
125
- "eos_token_id": null,
126
- "exponential_decay_length_penalty": null,
127
- "finetuning_task": null,
128
- "forced_bos_token_id": null,
129
- "forced_eos_token_id": null,
130
  "hidden_act": "gelu",
131
  "hidden_dropout_prob": 0.0,
132
  "hidden_size": 768,
133
- "id2label": {
134
- "0": "LABEL_0",
135
- "1": "LABEL_1"
136
- },
137
  "image_size": 518,
138
- "initializer_range": 0.02,
139
- "is_decoder": false,
140
- "is_encoder_decoder": false,
141
- "label2id": {
142
- "LABEL_0": 0,
143
- "LABEL_1": 1
144
- },
145
  "layer_norm_eps": 1e-06,
146
  "layerscale_value": 1.0,
147
- "length_penalty": 1.0,
148
- "max_length": 20,
149
- "min_length": 0,
150
  "mlp_ratio": 4,
151
  "model_type": "dinov2",
152
- "no_repeat_ngram_size": 0,
153
  "num_attention_heads": 12,
154
- "num_beam_groups": 1,
155
- "num_beams": 1,
156
- "num_channels": 3,
157
  "num_hidden_layers": 12,
158
- "num_return_sequences": 1,
159
  "out_features": [
160
  "stage12"
161
  ],
162
  "out_indices": [
163
  12
164
  ],
165
- "output_attentions": false,
166
- "output_hidden_states": false,
167
- "output_scores": false,
168
- "pad_token_id": null,
169
- "patch_size": 14,
170
- "prefix": null,
171
- "problem_type": null,
172
- "pruned_heads": {},
173
  "qkv_bias": true,
174
- "remove_invalid_values": false,
175
- "repetition_penalty": 1.0,
176
  "reshape_hidden_states": false,
177
- "return_dict": true,
178
- "return_dict_in_generate": false,
179
- "sep_token_id": null,
180
  "stage_names": [
181
  "stem",
182
  "stage1",
@@ -192,19 +74,7 @@
192
  "stage11",
193
  "stage12"
194
  ],
195
- "suppress_tokens": null,
196
- "task_specific_params": null,
197
- "temperature": 1.0,
198
- "tf_legacy_loss": false,
199
- "tie_encoder_decoder": false,
200
- "tie_word_embeddings": true,
201
- "tokenizer_class": null,
202
- "top_k": 50,
203
- "top_p": 1.0,
204
  "torch_dtype": "float32",
205
- "torchscript": false,
206
- "typical_p": 1.0,
207
- "use_bfloat16": false,
208
  "use_swiglu_ffn": false
209
  },
210
  "vision_feature_layer": -1,
 
12
  "image_seq_length": 576,
13
  "image_token_index": 32204,
14
  "model_type": "maira2",
15
+ "multimodal_projector_bias": true,
16
  "pad_token_id": 0,
17
  "projector_hidden_act": "gelu",
18
  "projector_n_layers": 4,
19
  "text_config": {
20
  "_name_or_path": "lmsys/vicuna-7b-v1.5",
 
21
  "architectures": [
22
  "LlamaForCausalLM"
23
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  "max_position_embeddings": 4096,
 
 
25
  "model_type": "llama",
 
 
 
 
 
 
 
 
 
 
26
  "pad_token_id": 0,
 
 
 
 
 
 
 
 
27
  "rms_norm_eps": 1e-05,
28
  "rope_scaling": {
29
  "factor": 1.5,
30
  "rope_type": "linear"
31
  },
 
 
 
 
 
 
 
 
 
 
 
32
  "torch_dtype": "bfloat16",
 
 
 
 
33
  "vocab_size": 32207
34
  },
35
  "torch_dtype": "float32",
36
+ "transformers_version": "4.48.0",
37
  "vision_config": {
 
 
38
  "apply_layernorm": true,
39
  "architectures": [
40
  "Dinov2Model"
41
  ],
42
  "attention_probs_dropout_prob": 0.0,
 
 
 
 
 
 
 
 
43
  "drop_path_rate": 0.0,
 
 
 
 
 
 
 
44
  "hidden_act": "gelu",
45
  "hidden_dropout_prob": 0.0,
46
  "hidden_size": 768,
 
 
 
 
47
  "image_size": 518,
 
 
 
 
 
 
 
48
  "layer_norm_eps": 1e-06,
49
  "layerscale_value": 1.0,
 
 
 
50
  "mlp_ratio": 4,
51
  "model_type": "dinov2",
 
52
  "num_attention_heads": 12,
 
 
 
53
  "num_hidden_layers": 12,
 
54
  "out_features": [
55
  "stage12"
56
  ],
57
  "out_indices": [
58
  12
59
  ],
 
 
 
 
 
 
 
 
60
  "qkv_bias": true,
 
 
61
  "reshape_hidden_states": false,
 
 
 
62
  "stage_names": [
63
  "stem",
64
  "stage1",
 
74
  "stage11",
75
  "stage12"
76
  ],
 
 
 
 
 
 
 
 
 
77
  "torch_dtype": "float32",
 
 
 
78
  "use_swiglu_ffn": false
79
  },
80
  "vision_feature_layer": -1,
generation_config.json CHANGED
@@ -5,5 +5,5 @@
5
  "max_length": 4096,
6
  "max_new_tokens": 450,
7
  "pad_token_id": 0,
8
- "transformers_version": "4.46.0.dev0"
9
  }
 
5
  "max_length": 4096,
6
  "max_new_tokens": 450,
7
  "pad_token_id": 0,
8
+ "transformers_version": "4.48.0"
9
  }
processing_maira2.py CHANGED
@@ -40,6 +40,7 @@ class Maira2Processor(LlavaProcessor):
40
  "patch_size",
41
  "vision_feature_select_strategy",
42
  "image_token",
 
43
  "phrase_start_token",
44
  "phrase_end_token",
45
  "box_start_token",
@@ -55,6 +56,7 @@ class Maira2Processor(LlavaProcessor):
55
  vision_feature_select_strategy: str | None = None,
56
  chat_template: str | None = None,
57
  image_token: str = "<image>",
 
58
  phrase_start_token: str = "<obj>",
59
  phrase_end_token: str = "</obj>",
60
  box_start_token: str = "<box>",
@@ -69,6 +71,7 @@ class Maira2Processor(LlavaProcessor):
69
  vision_feature_select_strategy=vision_feature_select_strategy,
70
  chat_template=chat_template,
71
  image_token=image_token,
 
72
  **kwargs,
73
  )
74
 
 
40
  "patch_size",
41
  "vision_feature_select_strategy",
42
  "image_token",
43
+ "num_additional_image_tokens",
44
  "phrase_start_token",
45
  "phrase_end_token",
46
  "box_start_token",
 
56
  vision_feature_select_strategy: str | None = None,
57
  chat_template: str | None = None,
58
  image_token: str = "<image>",
59
+ num_additional_image_tokens: int = 1,
60
  phrase_start_token: str = "<obj>",
61
  phrase_end_token: str = "</obj>",
62
  box_start_token: str = "<box>",
 
71
  vision_feature_select_strategy=vision_feature_select_strategy,
72
  chat_template=chat_template,
73
  image_token=image_token,
74
+ num_additional_image_tokens=num_additional_image_tokens,
75
  **kwargs,
76
  )
77
 
processor_config.json CHANGED
@@ -5,6 +5,7 @@
5
  "box_end_token": "</box>",
6
  "box_start_token": "<box>",
7
  "image_token": "<image>",
 
8
  "num_box_coord_bins": 100,
9
  "patch_size": 14,
10
  "phrase_end_token": "</obj>",
 
5
  "box_end_token": "</box>",
6
  "box_start_token": "<box>",
7
  "image_token": "<image>",
8
+ "num_additional_image_tokens": 1,
9
  "num_box_coord_bins": 100,
10
  "patch_size": 14,
11
  "phrase_end_token": "</obj>",
tokenizer_config.json CHANGED
@@ -1688,6 +1688,7 @@
1688
  "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}You are an expert radiology assistant tasked with interpreting a chest X-ray study. {% for message in messages %}{% if message[\"role\"] == \"user\" %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message[\"content\"] %}{% if item[\"type\"] == \"text\" %}{{ item[\"text\"] }}{% elif item[\"type\"] == \"image\" %}<image>{% endif %}{% endfor %}{% if message[\"role\"] == \"user\" %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}",
1689
  "clean_up_tokenization_spaces": false,
1690
  "eos_token": "</s>",
 
1691
  "legacy": false,
1692
  "model_max_length": 4096,
1693
  "pad_token": "<unk>",
 
1688
  "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}You are an expert radiology assistant tasked with interpreting a chest X-ray study. {% for message in messages %}{% if message[\"role\"] == \"user\" %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message[\"content\"] %}{% if item[\"type\"] == \"text\" %}{{ item[\"text\"] }}{% elif item[\"type\"] == \"image\" %}<image>{% endif %}{% endfor %}{% if message[\"role\"] == \"user\" %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}",
1689
  "clean_up_tokenization_spaces": false,
1690
  "eos_token": "</s>",
1691
+ "extra_special_tokens": {},
1692
  "legacy": false,
1693
  "model_max_length": 4096,
1694
  "pad_token": "<unk>",