|
|
|
|
|
from transformers import TrainingArguments |
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir="./results", |
|
|
learning_rate=2e-5, |
|
|
per_device_train_batch_size=4, |
|
|
per_device_eval_batch_size=4, |
|
|
num_train_epochs=3, |
|
|
weight_decay=0.01, |
|
|
logging_dir="./logs", |
|
|
logging_steps=10 |
|
|
) |
|
|
|
|
|
!pip uninstall -y transformers |
|
|
!pip install -U transformers datasets accelerate |
|
|
!pip show transformers | grep Version |
|
|
|
|
|
import os |
|
|
os.environ["WANDB_DISABLED"] = "true" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!pip install -q transformers datasets torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import pandas as pd |
|
|
|
|
|
data = { |
|
|
"text": [ |
|
|
"I love this movie, it was fantastic!", |
|
|
"This product is terrible and useless.", |
|
|
"What a great experience, I will come again!", |
|
|
"I hate this item, waste of money.", |
|
|
"Absolutely amazing service and food.", |
|
|
"Worst app I have ever used.", |
|
|
"The phone works perfectly and fast.", |
|
|
"It broke after two days, horrible!", |
|
|
"Very happy with my purchase.", |
|
|
"Not worth the price at all." |
|
|
], |
|
|
"label": [1,0,1,0,1,0,1,0,1,0] |
|
|
} |
|
|
|
|
|
df = pd.DataFrame(data) |
|
|
df.to_csv("sentiment_data.csv", index=False) |
|
|
print("✅ Dữ liệu mẫu đã được tạo:\n") |
|
|
print(df.head()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from datasets import load_dataset |
|
|
|
|
|
dataset = load_dataset("csv", data_files="sentiment_data.csv") |
|
|
dataset = dataset["train"].train_test_split(test_size=0.3, seed=42) |
|
|
|
|
|
train_dataset = dataset["train"] |
|
|
test_dataset = dataset["test"] |
|
|
|
|
|
print("\n🔹 Số mẫu train:", len(train_dataset)) |
|
|
print("🔹 Số mẫu test:", len(test_dataset)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import AutoTokenizer |
|
|
|
|
|
model_name = "bert-base-uncased" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
def preprocess_function(examples): |
|
|
return tokenizer( |
|
|
examples["text"], |
|
|
padding="max_length", |
|
|
truncation=True, |
|
|
max_length=64, |
|
|
) |
|
|
|
|
|
train_tokenized = train_dataset.map(preprocess_function, batched=True) |
|
|
test_tokenized = test_dataset.map(preprocess_function, batched=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer |
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from sklearn.metrics import accuracy_score, f1_score |
|
|
|
|
|
def compute_metrics(eval_pred): |
|
|
logits, labels = eval_pred |
|
|
preds = torch.argmax(torch.tensor(logits), dim=1) |
|
|
acc = accuracy_score(labels, preds) |
|
|
f1 = f1_score(labels, preds) |
|
|
return {"accuracy": acc, "f1": f1} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir="./results", |
|
|
learning_rate=2e-5, |
|
|
per_device_train_batch_size=4, |
|
|
per_device_eval_batch_size=4, |
|
|
num_train_epochs=3, |
|
|
weight_decay=0.01, |
|
|
evaluation_strategy="epoch", |
|
|
save_strategy="epoch", |
|
|
logging_dir="./logs", |
|
|
logging_steps=10 |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trainer = Trainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=train_tokenized, |
|
|
eval_dataset=test_tokenized, |
|
|
tokenizer=tokenizer, |
|
|
compute_metrics=compute_metrics |
|
|
) |
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval_results = trainer.evaluate() |
|
|
print("\n📊 Kết quả đánh giá:", eval_results) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text_samples = [ |
|
|
"I really love this product!", |
|
|
"This is the worst movie ever." |
|
|
] |
|
|
|
|
|
inputs = tokenizer(text_samples, padding=True, truncation=True, max_length=64, return_tensors="pt") |
|
|
outputs = model(**inputs) |
|
|
preds = torch.argmax(outputs.logits, dim=1) |
|
|
|
|
|
for text, label in zip(text_samples, preds): |
|
|
print(f"\n🗣️ {text}") |
|
|
print("➡️ Dự đoán:", "Tích cực (1)" if label == 1 else "Tiêu cực (0)") |
|
|
|