Upload folder using huggingface_hub
Browse files- only_0.5ce_posix_100_really_fixed/adapter_20/README.md +9 -0
- only_0.5ce_posix_100_really_fixed/adapter_20/adapter_config.json +24 -0
- only_0.5ce_posix_100_really_fixed/adapter_20/adapter_model.bin +3 -0
- only_0.5ce_posix_100_really_fixed/adapter_40/README.md +9 -0
- only_0.5ce_posix_100_really_fixed/adapter_40/adapter_config.json +24 -0
- only_0.5ce_posix_100_really_fixed/adapter_40/adapter_model.bin +3 -0
- only_0.5ce_posix_100_really_fixed/adapter_60/README.md +9 -0
- only_0.5ce_posix_100_really_fixed/adapter_60/adapter_config.json +24 -0
- only_0.5ce_posix_100_really_fixed/adapter_60/adapter_model.bin +3 -0
- only_0.5ce_posix_100_really_fixed/adapter_80/README.md +9 -0
- only_0.5ce_posix_100_really_fixed/adapter_80/adapter_config.json +24 -0
- only_0.5ce_posix_100_really_fixed/adapter_80/adapter_model.bin +3 -0
only_0.5ce_posix_100_really_fixed/adapter_20/README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
### Framework versions
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
- PEFT 0.4.0
|
only_0.5ce_posix_100_really_fixed/adapter_20/adapter_config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": {
|
| 3 |
+
"base_model_class": "LlavaMistralForCausalLM",
|
| 4 |
+
"parent_library": "llava.model.language_model.llava_mistral"
|
| 5 |
+
},
|
| 6 |
+
"base_model_name_or_path": "/share/ssddata/sarimhashmi/posix/llava_med/llava-med",
|
| 7 |
+
"bias": "none",
|
| 8 |
+
"fan_in_fan_out": false,
|
| 9 |
+
"inference_mode": true,
|
| 10 |
+
"init_lora_weights": true,
|
| 11 |
+
"layers_pattern": null,
|
| 12 |
+
"layers_to_transform": null,
|
| 13 |
+
"lora_alpha": 32,
|
| 14 |
+
"lora_dropout": 0.05,
|
| 15 |
+
"modules_to_save": null,
|
| 16 |
+
"peft_type": "LORA",
|
| 17 |
+
"r": 16,
|
| 18 |
+
"revision": null,
|
| 19 |
+
"target_modules": [
|
| 20 |
+
"q_proj",
|
| 21 |
+
"v_proj"
|
| 22 |
+
],
|
| 23 |
+
"task_type": null
|
| 24 |
+
}
|
only_0.5ce_posix_100_really_fixed/adapter_20/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa339fa54099822fb278e4702db7ad8c9f099c34665ecf75f5a500200ba37ca2
|
| 3 |
+
size 33640010
|
only_0.5ce_posix_100_really_fixed/adapter_40/README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
### Framework versions
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
- PEFT 0.4.0
|
only_0.5ce_posix_100_really_fixed/adapter_40/adapter_config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": {
|
| 3 |
+
"base_model_class": "LlavaMistralForCausalLM",
|
| 4 |
+
"parent_library": "llava.model.language_model.llava_mistral"
|
| 5 |
+
},
|
| 6 |
+
"base_model_name_or_path": "/share/ssddata/sarimhashmi/posix/llava_med/llava-med",
|
| 7 |
+
"bias": "none",
|
| 8 |
+
"fan_in_fan_out": false,
|
| 9 |
+
"inference_mode": true,
|
| 10 |
+
"init_lora_weights": true,
|
| 11 |
+
"layers_pattern": null,
|
| 12 |
+
"layers_to_transform": null,
|
| 13 |
+
"lora_alpha": 32,
|
| 14 |
+
"lora_dropout": 0.05,
|
| 15 |
+
"modules_to_save": null,
|
| 16 |
+
"peft_type": "LORA",
|
| 17 |
+
"r": 16,
|
| 18 |
+
"revision": null,
|
| 19 |
+
"target_modules": [
|
| 20 |
+
"q_proj",
|
| 21 |
+
"v_proj"
|
| 22 |
+
],
|
| 23 |
+
"task_type": null
|
| 24 |
+
}
|
only_0.5ce_posix_100_really_fixed/adapter_40/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d30eb612cfadad20e83a0db3b105bb6067e2615cdf16010a7a5383c139190fb
|
| 3 |
+
size 33640010
|
only_0.5ce_posix_100_really_fixed/adapter_60/README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
### Framework versions
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
- PEFT 0.4.0
|
only_0.5ce_posix_100_really_fixed/adapter_60/adapter_config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": {
|
| 3 |
+
"base_model_class": "LlavaMistralForCausalLM",
|
| 4 |
+
"parent_library": "llava.model.language_model.llava_mistral"
|
| 5 |
+
},
|
| 6 |
+
"base_model_name_or_path": "/share/ssddata/sarimhashmi/posix/llava_med/llava-med",
|
| 7 |
+
"bias": "none",
|
| 8 |
+
"fan_in_fan_out": false,
|
| 9 |
+
"inference_mode": true,
|
| 10 |
+
"init_lora_weights": true,
|
| 11 |
+
"layers_pattern": null,
|
| 12 |
+
"layers_to_transform": null,
|
| 13 |
+
"lora_alpha": 32,
|
| 14 |
+
"lora_dropout": 0.05,
|
| 15 |
+
"modules_to_save": null,
|
| 16 |
+
"peft_type": "LORA",
|
| 17 |
+
"r": 16,
|
| 18 |
+
"revision": null,
|
| 19 |
+
"target_modules": [
|
| 20 |
+
"q_proj",
|
| 21 |
+
"v_proj"
|
| 22 |
+
],
|
| 23 |
+
"task_type": null
|
| 24 |
+
}
|
only_0.5ce_posix_100_really_fixed/adapter_60/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f4901a66936bd581d3f3131cc5318433ec301b31329b567eddbb65166a5f5ac
|
| 3 |
+
size 33640010
|
only_0.5ce_posix_100_really_fixed/adapter_80/README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
### Framework versions
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
- PEFT 0.4.0
|
only_0.5ce_posix_100_really_fixed/adapter_80/adapter_config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": {
|
| 3 |
+
"base_model_class": "LlavaMistralForCausalLM",
|
| 4 |
+
"parent_library": "llava.model.language_model.llava_mistral"
|
| 5 |
+
},
|
| 6 |
+
"base_model_name_or_path": "/share/ssddata/sarimhashmi/posix/llava_med/llava-med",
|
| 7 |
+
"bias": "none",
|
| 8 |
+
"fan_in_fan_out": false,
|
| 9 |
+
"inference_mode": true,
|
| 10 |
+
"init_lora_weights": true,
|
| 11 |
+
"layers_pattern": null,
|
| 12 |
+
"layers_to_transform": null,
|
| 13 |
+
"lora_alpha": 32,
|
| 14 |
+
"lora_dropout": 0.05,
|
| 15 |
+
"modules_to_save": null,
|
| 16 |
+
"peft_type": "LORA",
|
| 17 |
+
"r": 16,
|
| 18 |
+
"revision": null,
|
| 19 |
+
"target_modules": [
|
| 20 |
+
"q_proj",
|
| 21 |
+
"v_proj"
|
| 22 |
+
],
|
| 23 |
+
"task_type": null
|
| 24 |
+
}
|
only_0.5ce_posix_100_really_fixed/adapter_80/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b0d3ddd772b9801a6c78bd4793e360b8acb468f2bd867ebdcccdcf9d970b560
|
| 3 |
+
size 33640010
|