Upload train.py with huggingface_hub
Browse files
train.py
CHANGED
|
@@ -19,10 +19,9 @@ from transformers import (
|
|
| 19 |
AutoModelForCausalLM,
|
| 20 |
AutoTokenizer,
|
| 21 |
BitsAndBytesConfig,
|
| 22 |
-
TrainingArguments,
|
| 23 |
)
|
| 24 |
from peft import LoraConfig
|
| 25 |
-
from trl import SFTTrainer
|
| 26 |
|
| 27 |
load_dotenv()
|
| 28 |
|
|
@@ -102,8 +101,8 @@ def main():
|
|
| 102 |
output_texts.append(text)
|
| 103 |
return output_texts
|
| 104 |
|
| 105 |
-
# Training Arguments
|
| 106 |
-
training_args =
|
| 107 |
output_dir=OUTPUT_DIR,
|
| 108 |
per_device_train_batch_size=1,
|
| 109 |
gradient_accumulation_steps=8,
|
|
@@ -130,7 +129,7 @@ def main():
|
|
| 130 |
train_dataset=dataset,
|
| 131 |
peft_config=peft_config,
|
| 132 |
formatting_func=formatting_prompts_func,
|
| 133 |
-
|
| 134 |
args=training_args,
|
| 135 |
)
|
| 136 |
|
|
|
|
| 19 |
AutoModelForCausalLM,
|
| 20 |
AutoTokenizer,
|
| 21 |
BitsAndBytesConfig,
|
|
|
|
| 22 |
)
|
| 23 |
from peft import LoraConfig
|
| 24 |
+
from trl import SFTTrainer, SFTConfig
|
| 25 |
|
| 26 |
load_dotenv()
|
| 27 |
|
|
|
|
| 101 |
output_texts.append(text)
|
| 102 |
return output_texts
|
| 103 |
|
| 104 |
+
# Training Arguments (SFTConfig for TRL 0.27+)
|
| 105 |
+
training_args = SFTConfig(
|
| 106 |
output_dir=OUTPUT_DIR,
|
| 107 |
per_device_train_batch_size=1,
|
| 108 |
gradient_accumulation_steps=8,
|
|
|
|
| 129 |
train_dataset=dataset,
|
| 130 |
peft_config=peft_config,
|
| 131 |
formatting_func=formatting_prompts_func,
|
| 132 |
+
processing_class=tokenizer, # renamed from 'tokenizer' in TRL 0.27+
|
| 133 |
args=training_args,
|
| 134 |
)
|
| 135 |
|