TLF-7B-LLM-01: Upload LoRA adapters (2026-03-29)
Browse files- README.md +2 -7
- adapter_config.json +3 -3
README.md
CHANGED
|
@@ -10,11 +10,6 @@ tags:
|
|
| 10 |
- hindi
|
| 11 |
- sft
|
| 12 |
- lora
|
| 13 |
-
- TransLiteral
|
| 14 |
-
- Kannada
|
| 15 |
-
- Indic
|
| 16 |
-
- Oriya
|
| 17 |
-
- Punjabi
|
| 18 |
---
|
| 19 |
# TLF-7B-LLM-01
|
| 20 |
|
|
@@ -64,7 +59,7 @@ To achieve the intended structured output, use the following prompt format:
|
|
| 64 |
|
| 65 |
```python
|
| 66 |
import mlx_lm
|
| 67 |
-
model, tokenizer = mlx_lm.load("
|
| 68 |
|
| 69 |
prompt = "Provide a comprehensive morphological breakdown for: 'Abacus'"
|
| 70 |
# Use Sarvam/Llama template logic here
|
|
@@ -79,7 +74,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 79 |
from peft import PeftModel
|
| 80 |
|
| 81 |
base_model = AutoModelForCausalLM.from_pretrained("sarvamai/sarvam-1")
|
| 82 |
-
model = PeftModel.from_pretrained(base_model, "
|
| 83 |
tokenizer = AutoTokenizer.from_pretrained("sarvamai/sarvam-1")
|
| 84 |
|
| 85 |
inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
|
| 10 |
- hindi
|
| 11 |
- sft
|
| 12 |
- lora
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
---
|
| 14 |
# TLF-7B-LLM-01
|
| 15 |
|
|
|
|
| 59 |
|
| 60 |
```python
|
| 61 |
import mlx_lm
|
| 62 |
+
model, tokenizer = mlx_lm.load("AssignArc/TLF-7B-LLM-01")
|
| 63 |
|
| 64 |
prompt = "Provide a comprehensive morphological breakdown for: 'Abacus'"
|
| 65 |
# Use Sarvam/Llama template logic here
|
|
|
|
| 74 |
from peft import PeftModel
|
| 75 |
|
| 76 |
base_model = AutoModelForCausalLM.from_pretrained("sarvamai/sarvam-1")
|
| 77 |
+
model = PeftModel.from_pretrained(base_model, "AssignArc/TLF-7B-LLM-01")
|
| 78 |
tokenizer = AutoTokenizer.from_pretrained("sarvamai/sarvam-1")
|
| 79 |
|
| 80 |
inputs = tokenizer(prompt, return_tensors="pt")
|
adapter_config.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
-
"adapter_path": "
|
| 3 |
"batch_size": 1,
|
| 4 |
-
"config": "
|
| 5 |
-
"data": "
|
| 6 |
"fine_tune_type": "lora",
|
| 7 |
"grad_accumulation_steps": 64,
|
| 8 |
"grad_checkpoint": true,
|
|
|
|
| 1 |
{
|
| 2 |
+
"adapter_path": ".",
|
| 3 |
"batch_size": 1,
|
| 4 |
+
"config": "./mlx_config.yaml",
|
| 5 |
+
"data": "./data",
|
| 6 |
"fine_tune_type": "lora",
|
| 7 |
"grad_accumulation_steps": 64,
|
| 8 |
"grad_checkpoint": true,
|