noranisa commited on
Commit
f57e115
·
verified ·
1 Parent(s): 61fe0b8

Create training/train_model.py

Browse files
Files changed (1) hide show
  1. training/train_model.py +61 -0
training/train_model.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
3
+ import numpy as np
4
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
5
+
6
+ model_name = "indobenchmark/indobert-base-p1"
7
+
8
+ dataset = load_dataset("csv", data_files="data/eval_dataset.csv")
9
+
10
+ label_map = {"negative":0, "neutral":1, "positive":2}
11
+
12
+ def encode_label(example):
13
+ example["label"] = label_map[example["label"]]
14
+ return example
15
+
16
+ dataset = dataset.map(encode_label)
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+
20
+ def preprocess(example):
21
+ return tokenizer(example["text"], truncation=True, padding=True)
22
+
23
+ dataset = dataset.map(preprocess)
24
+
25
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3)
26
+
27
+ def compute_metrics(eval_pred):
28
+ logits, labels = eval_pred
29
+ preds = np.argmax(logits, axis=1)
30
+
31
+ precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
32
+ acc = accuracy_score(labels, preds)
33
+
34
+ return {
35
+ "accuracy": acc,
36
+ "f1": f1,
37
+ "precision": precision,
38
+ "recall": recall
39
+ }
40
+
41
+ training_args = TrainingArguments(
42
+ output_dir="./model",
43
+ learning_rate=2e-5,
44
+ per_device_train_batch_size=8,
45
+ num_train_epochs=3,
46
+ evaluation_strategy="epoch"
47
+ )
48
+
49
+ trainer = Trainer(
50
+ model=model,
51
+ args=training_args,
52
+ train_dataset=dataset["train"],
53
+ eval_dataset=dataset["train"],
54
+ compute_metrics=compute_metrics
55
+ )
56
+
57
+ trainer.train()
58
+
59
+ # 🔥 simpan model
60
+ trainer.save_model("model/final_model")
61
+ tokenizer.save_pretrained("model/final_model")