| | from datasets import load_dataset |
| | from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments |
| | import torch |
| |
|
| | |
| | dataset = load_dataset("ilyada/web_accessibility_dataset") |
| |
|
| | |
| | model_name = "bert-base-uncased" |
| | tokenizer = BertTokenizer.from_pretrained(model_name) |
| | model = BertForSequenceClassification.from_pretrained(model_name, num_labels=2) |
| |
|
| | |
| | def tokenize_function(examples): |
| | return tokenizer(examples["text"], padding="max_length", truncation=True) |
| |
|
| | tokenized_datasets = dataset.map(tokenize_function, batched=True) |
| |
|
| | |
| | train_test_split = tokenized_datasets["train"].train_test_split(test_size=0.2) |
| | train_dataset = train_test_split['train'] |
| | test_dataset = train_test_split['test'] |
| |
|
| | |
| | training_args = TrainingArguments( |
| | output_dir="./results", |
| | evaluation_strategy="epoch", |
| | learning_rate=2e-5, |
| | per_device_train_batch_size=8, |
| | per_device_eval_batch_size=8, |
| | num_train_epochs=3, |
| | weight_decay=0.01, |
| | push_to_hub=True, # This enables pushing the model to Hugging Face Hub |
| | hub_model_id="ilyada/web_accessibility_model", # LLM generated dataset |
| | hub_strategy="end", |
| | ) |
| |
|
| | |
| | trainer = Trainer( |
| | model=model, |
| | args=training_args, |
| | train_dataset=train_dataset, |
| | eval_dataset=test_dataset, |
| | ) |
| |
|
| | |
| | trainer.train() |
| |
|
| | |
| | results = trainer.evaluate() |
| | print(results) |
| |
|
| | |
| | trainer.push_to_hub() |