Update README.md
Browse files
README.md
CHANGED
|
@@ -1,15 +1,17 @@
|
|
| 1 |
-
|
| 2 |
---
|
| 3 |
license: mit
|
| 4 |
tags:
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
|
|
|
| 10 |
language: en
|
| 11 |
library_name: transformers
|
| 12 |
base_model: microsoft/phi-2
|
|
|
|
|
|
|
| 13 |
---
|
| 14 |
|
| 15 |
# Phi-2 QLoRA Fine-Tuned Model
|
|
@@ -153,5 +155,4 @@ pip install torch transformers peft datasets huggingface_hub python-dotenv
|
|
| 153 |
* `r=8`, `lora_alpha=16`, `lora_dropout=0.05`
|
| 154 |
* `target_modules=["q_proj","v_proj"]` (adjust for different base models)
|
| 155 |
* Learning rate: `2e-4`
|
| 156 |
-
* Batch si
|
| 157 |
-
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: mit
|
| 3 |
tags:
|
| 4 |
+
- causal-lm
|
| 5 |
+
- instruction-following
|
| 6 |
+
- loRA
|
| 7 |
+
- QLoRA
|
| 8 |
+
- sentiment-analysi
|
| 9 |
+
- quantized
|
| 10 |
language: en
|
| 11 |
library_name: transformers
|
| 12 |
base_model: microsoft/phi-2
|
| 13 |
+
metrics:
|
| 14 |
+
- accuracy
|
| 15 |
---
|
| 16 |
|
| 17 |
# Phi-2 QLoRA Fine-Tuned Model
|
|
|
|
| 155 |
* `r=8`, `lora_alpha=16`, `lora_dropout=0.05`
|
| 156 |
* `target_modules=["q_proj","v_proj"]` (adjust for different base models)
|
| 157 |
* Learning rate: `2e-4`
|
| 158 |
+
* Batch si
|
|
|