Upload nanoVLM using push_to_hub
Browse files- README.md +1 -1
- config.json +2 -2
- model.safetensors +1 -1
README.md
CHANGED
|
@@ -23,5 +23,5 @@ Follow the install instructions and run the following code:
|
|
| 23 |
```python
|
| 24 |
from models.vision_language_model import VisionLanguageModel
|
| 25 |
|
| 26 |
-
model = VisionLanguageModel.from_pretrained("6cyu/
|
| 27 |
```
|
|
|
|
| 23 |
```python
|
| 24 |
from models.vision_language_model import VisionLanguageModel
|
| 25 |
|
| 26 |
+
model = VisionLanguageModel.from_pretrained("6cyu/nanoVLM_scienceqa")
|
| 27 |
```
|
config.json
CHANGED
|
@@ -30,8 +30,8 @@
|
|
| 30 |
"lm_chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
| 31 |
"mp_pixel_shuffle_factor": 4,
|
| 32 |
"mp_image_token_length": 64,
|
| 33 |
-
"max_img_size":
|
| 34 |
-
"resize_to_max_side_len":
|
| 35 |
"vlm_extra_tokens": {
|
| 36 |
"image_token": "<|image|>",
|
| 37 |
"global_image_token": "<|global_image|>",
|
|
|
|
| 30 |
"lm_chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
| 31 |
"mp_pixel_shuffle_factor": 4,
|
| 32 |
"mp_image_token_length": 64,
|
| 33 |
+
"max_img_size": 1024,
|
| 34 |
+
"resize_to_max_side_len": false,
|
| 35 |
"vlm_extra_tokens": {
|
| 36 |
"image_token": "<|image|>",
|
| 37 |
"global_image_token": "<|global_image|>",
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1840504504
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a68f8e312204b42191c33194b6581c317fb09d118e23e5fc1b41d8993a2cd861
|
| 3 |
size 1840504504
|