Nutnell commited on
Commit
4146ecf
·
verified ·
1 Parent(s): a89712a

Update fine_tune.py

Browse files
Files changed (1) hide show
  1. fine_tune.py +3 -3
fine_tune.py CHANGED
@@ -10,7 +10,7 @@ from transformers import (
10
  from peft import LoraConfig, PeftModel
11
  from trl import SFTTrainer
12
  from fastapi import FastAPI
13
- from pydantic import BaseModel # 1. ADD THIS IMPORT
14
  import uvicorn
15
 
16
  # --- Configuration ---
@@ -69,7 +69,7 @@ if not os.path.exists(os.path.join(output_dir, 'adapter_config.json')):
69
  group_by_length=True,
70
  lr_scheduler_type="linear",
71
  push_to_hub=True,
72
- hub_model_id="Nutnell/DirectEd-AI",
73
  )
74
 
75
  # Initialize Trainer
@@ -77,7 +77,7 @@ if not os.path.exists(os.path.join(output_dir, 'adapter_config.json')):
77
  model=model,
78
  train_dataset=dataset,
79
  peft_config=peft_config,
80
- dataset_text_field="text", # Ensure your dataset has a 'text' column
81
  args=training_arguments,
82
  )
83
 
 
10
  from peft import LoraConfig, PeftModel
11
  from trl import SFTTrainer
12
  from fastapi import FastAPI
13
+ from pydantic import BaseModel
14
  import uvicorn
15
 
16
  # --- Configuration ---
 
69
  group_by_length=True,
70
  lr_scheduler_type="linear",
71
  push_to_hub=True,
72
+ hub_model_id="Nutnell/direct-ed-finetune-job",
73
  )
74
 
75
  # Initialize Trainer
 
77
  model=model,
78
  train_dataset=dataset,
79
  peft_config=peft_config,
80
+ dataset_text_field="text",
81
  args=training_arguments,
82
  )
83