stanfordnlp/sst2
Viewer • Updated • 70k • 26.4k • 161
How to use VityaVitalich/bert-tiny-sst2 with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-classification", model="VityaVitalich/bert-tiny-sst2") # Load model directly
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizer = AutoTokenizer.from_pretrained("VityaVitalich/bert-tiny-sst2")
model = AutoModelForSequenceClassification.from_pretrained("VityaVitalich/bert-tiny-sst2")# Load model directly
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizer = AutoTokenizer.from_pretrained("VityaVitalich/bert-tiny-sst2")
model = AutoModelForSequenceClassification.from_pretrained("VityaVitalich/bert-tiny-sst2")This model is a fine-tuned version of M-FAC/bert-tiny-finetuned-sst2 on the sst2 dataset. It achieves the following results on the evaluation set:
from transformers import BertTokenizer, BertForSequenceClassification, TrainingArguments, Trainer, DataCollatorWithPadding
import datasets
model = BertForSequenceClassification.from_pretrained('VityaVitalich/bert-tiny-sst2')
tokenizer = BertTokenizer.from_pretrained('VityaVitalich/bert-tiny-sst2')
def create_data(tokenizer):
train_set = datasets.load_dataset('sst2', split='train').remove_columns(['idx'])
val_set = datasets.load_dataset('sst2', split='validation').remove_columns(['idx'])
def tokenize_func(examples):
return tokenizer(examples["sentence"], max_length=128, padding='max_length', truncation=True)
encoded_dataset_train = train_set.map(tokenize_func, batched=True)
encoded_dataset_test = val_set.map(tokenize_func, batched=True)
data_collator = DataCollatorWithPadding(tokenizer)
return encoded_dataset_train, encoded_dataset_test, data_collator
encoded_dataset_train, encoded_dataset_test, data_collator = create_data(tokenizer)
training_args = TrainingArguments(
output_dir='./results',
learning_rate=3e-5,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
load_best_model_at_end=True,
num_train_epochs=5,
weight_decay=0.1,
fp16=True,
fp16_full_eval=True,
evaluation_strategy="epoch",
seed=42,
save_strategy = "epoch",
save_total_limit=5,
logging_strategy="epoch",
report_to="all",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=encoded_dataset_train,
eval_dataset=encoded_dataset_test,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
trainer.evaluate(encoded_dataset_test)
The following hyperparameters were used during training:
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|---|---|---|---|---|
| 0.2313 | 1.0 | 527 | 0.4771 | 0.8280 |
| 0.2057 | 2.0 | 1054 | 0.4937 | 0.8257 |
| 0.1949 | 3.0 | 1581 | 0.5121 | 0.8177 |
| 0.1904 | 4.0 | 2108 | 0.5100 | 0.8200 |
| 0.1879 | 5.0 | 2635 | 0.5137 | 0.8211 |
Base model
M-FAC/bert-tiny-finetuned-sst2
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-classification", model="VityaVitalich/bert-tiny-sst2")