yagnik12's picture
Update train.py
b7b9e5f verified
raw
history blame
1.11 kB
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
import os
dataset = load_dataset("HanxiGuo/BiScope_Data")
model_name = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_name)
def tokenize(batch):
return tokenizer(batch["text"], truncation=True, padding="max_length", max_length=256)
tokenized = dataset.map(tokenize, batched=True)
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
training_args = TrainingArguments(
output_dir="./results",
evaluation_strategy="epoch",
save_strategy="epoch",
num_train_epochs=1,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
push_to_hub=True,
hub_model_id="yagnik12/AI_Text_Detecter_HanxiGuo_BiScope-Data", # ✅ model repo, not Space
hub_token=os.getenv("HF_TOKEN"),
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized["train"],
eval_dataset=tokenized["test"],
tokenizer=tokenizer,
)
trainer.train()
trainer.push_to_hub()