Upload LoRA adapters and tokenizer after fine-tuning
Browse files- README.md +1 -0
- adapter_config.json +6 -2
- adapter_model.safetensors +1 -1
README.md
CHANGED
|
@@ -207,4 +207,5 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
|
|
| 207 |
[More Information Needed]
|
| 208 |
### Framework versions
|
| 209 |
|
|
|
|
| 210 |
- PEFT 0.16.0
|
|
|
|
| 207 |
[More Information Needed]
|
| 208 |
### Framework versions
|
| 209 |
|
| 210 |
+
- PEFT 0.17.0
|
| 211 |
- PEFT 0.16.0
|
adapter_config.json
CHANGED
|
@@ -1,6 +1,9 @@
|
|
| 1 |
{
|
| 2 |
"alpha_pattern": {},
|
| 3 |
-
"auto_mapping":
|
|
|
|
|
|
|
|
|
|
| 4 |
"base_model_name_or_path": "unsloth/gemma-3n-e4b-it-unsloth-bnb-4bit",
|
| 5 |
"bias": "none",
|
| 6 |
"corda_config": null,
|
|
@@ -25,7 +28,8 @@
|
|
| 25 |
"rank_pattern": {},
|
| 26 |
"revision": null,
|
| 27 |
"target_modules": "(?:.*?(?:language|text).*?(?:self_attn|attention|attn|mlp|feed_forward|ffn|dense).*?(?:q_proj|k_proj|v_proj|o_proj|gate_proj|up_proj|down_proj|correction_coefs|prediction_coefs|modality_router|linear_left|linear_right|per_layer_input_gate|per_layer_projection|0|1|2|ffw_layer_1|ffw_layer_2|pos_proj|post|linear_start|linear_end|embedding_projection).*?)|(?:\\bmodel\\.layers\\.[\\d]{1,}\\.(?:self_attn|attention|attn|mlp|feed_forward|ffn|dense)\\.(?:(?:q_proj|k_proj|v_proj|o_proj|gate_proj|up_proj|down_proj|correction_coefs|prediction_coefs|modality_router|linear_left|linear_right|per_layer_input_gate|per_layer_projection|0|1|2|ffw_layer_1|ffw_layer_2|pos_proj|post|linear_start|linear_end|embedding_projection)))",
|
| 28 |
-
"
|
|
|
|
| 29 |
"trainable_token_indices": null,
|
| 30 |
"use_dora": false,
|
| 31 |
"use_qalora": false,
|
|
|
|
| 1 |
{
|
| 2 |
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": {
|
| 4 |
+
"base_model_class": "Gemma3nForConditionalGeneration",
|
| 5 |
+
"parent_library": "transformers.models.gemma3n.modeling_gemma3n"
|
| 6 |
+
},
|
| 7 |
"base_model_name_or_path": "unsloth/gemma-3n-e4b-it-unsloth-bnb-4bit",
|
| 8 |
"bias": "none",
|
| 9 |
"corda_config": null,
|
|
|
|
| 28 |
"rank_pattern": {},
|
| 29 |
"revision": null,
|
| 30 |
"target_modules": "(?:.*?(?:language|text).*?(?:self_attn|attention|attn|mlp|feed_forward|ffn|dense).*?(?:q_proj|k_proj|v_proj|o_proj|gate_proj|up_proj|down_proj|correction_coefs|prediction_coefs|modality_router|linear_left|linear_right|per_layer_input_gate|per_layer_projection|0|1|2|ffw_layer_1|ffw_layer_2|pos_proj|post|linear_start|linear_end|embedding_projection).*?)|(?:\\bmodel\\.layers\\.[\\d]{1,}\\.(?:self_attn|attention|attn|mlp|feed_forward|ffn|dense)\\.(?:(?:q_proj|k_proj|v_proj|o_proj|gate_proj|up_proj|down_proj|correction_coefs|prediction_coefs|modality_router|linear_left|linear_right|per_layer_input_gate|per_layer_projection|0|1|2|ffw_layer_1|ffw_layer_2|pos_proj|post|linear_start|linear_end|embedding_projection)))",
|
| 31 |
+
"target_parameters": null,
|
| 32 |
+
"task_type": null,
|
| 33 |
"trainable_token_indices": null,
|
| 34 |
"use_dora": false,
|
| 35 |
"use_qalora": false,
|
adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 76913008
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c854bf35b67bcb829053eb11fae4c6dd965384da382ece2a0f1d83858ed7a74b
|
| 3 |
size 76913008
|