Hajime MATSUMOTO commited on
Commit
a6b7407
·
1 Parent(s): 8cdd59c

Fix SFTTrainer: use tokenizer instead of processing_class

Browse files
Files changed (1) hide show
  1. train.py +1 -1
train.py CHANGED
@@ -328,7 +328,7 @@ def main():
328
  eval_dataset=dataset["test"],
329
  args=training_args,
330
  peft_config=lora_config,
331
- processing_class=tokenizer,
332
  max_seq_length=2048, # 7Bなので少し長く
333
  packing=True,
334
  dataset_text_field="text",
 
328
  eval_dataset=dataset["test"],
329
  args=training_args,
330
  peft_config=lora_config,
331
+ tokenizer=tokenizer,
332
  max_seq_length=2048, # 7Bなので少し長く
333
  packing=True,
334
  dataset_text_field="text",