upload file
Browse files- README.md +58 -0
- added_tokens.json +3 -0
- bpe.codes +0 -0
- config.json +41 -0
- model.safetensors +3 -0
- special_tokens_map.json +9 -0
- tokenizer_config.json +55 -0
- vocab.txt +0 -0
README.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
tags:
|
| 4 |
+
- text-classification
|
| 5 |
+
- sentiment-analysis
|
| 6 |
+
- vietnamese
|
| 7 |
+
- vsfc
|
| 8 |
+
- phobert
|
| 9 |
+
language:
|
| 10 |
+
- vi
|
| 11 |
+
datasets:
|
| 12 |
+
- uit-vsfc
|
| 13 |
+
model-index:
|
| 14 |
+
- name: VSFC Sentiment Classifier (PhoBERT)
|
| 15 |
+
results:
|
| 16 |
+
- task:
|
| 17 |
+
type: text-classification
|
| 18 |
+
name: Sentiment Analysis
|
| 19 |
+
dataset:
|
| 20 |
+
name: UIT-VSFC
|
| 21 |
+
type: uit-vsfc
|
| 22 |
+
metrics:
|
| 23 |
+
- type: accuracy
|
| 24 |
+
value: 85.3
|
| 25 |
+
- type: f1
|
| 26 |
+
value: 84.7
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
# VSFC Sentiment Classifier using PhoBERT
|
| 30 |
+
|
| 31 |
+
This model is fine-tuned from [`vinai/phobert-base`](https://huggingface.co/vinai/phobert-base) on the UIT-VSFC dataset for Vietnamese sentiment analysis.
|
| 32 |
+
|
| 33 |
+
## 🧠 Model Details
|
| 34 |
+
|
| 35 |
+
- **Model type**: Transformer (BERT-based)
|
| 36 |
+
- **Base model**: [`vinai/phobert-base`](https://huggingface.co/vinai/phobert-base)
|
| 37 |
+
- **Fine-tuned task**: Sentence-level sentiment classification
|
| 38 |
+
- **Target labels**: Positive, Negative, Neutral
|
| 39 |
+
- **Tokenizer**: SentencePiece BPE
|
| 40 |
+
|
| 41 |
+
## 📚 Training Data
|
| 42 |
+
|
| 43 |
+
- **Dataset**: [UIT-VSFC](https://drive.google.com/drive/folders/1xclbjHHK58zk2X6iqbvMPS2rcy9y9E0X)
|
| 44 |
+
- **Language**: Vietnamese
|
| 45 |
+
- **License**: Academic use
|
| 46 |
+
- Students’ feedback is a vital resource for the interdisciplinary research involving the combining of two different research fields between sentiment analysis and education.
|
| 47 |
+
|
| 48 |
+
## 🚀 How to Use
|
| 49 |
+
|
| 50 |
+
```python
|
| 51 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 52 |
+
|
| 53 |
+
tokenizer = AutoTokenizer.from_pretrained("tmt3103/VSFC-sentiment-classify-phoBERT")
|
| 54 |
+
model = AutoModelForSequenceClassification.from_pretrained("tmt3103/VSFC-sentiment-classify-phoBERT")
|
| 55 |
+
|
| 56 |
+
inputs = tokenizer("Giảng viên thân thiện dễ thương", return_tensors="pt")
|
| 57 |
+
outputs = model(**inputs)
|
| 58 |
+
predicted_class = outputs.logits.argmax(dim=-1).item()
|
added_tokens.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"<mask>": 64000
|
| 3 |
+
}
|
bpe.codes
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
config.json
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"RobertaForSequenceClassification"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"bos_token_id": 0,
|
| 7 |
+
"classifier_dropout": null,
|
| 8 |
+
"eos_token_id": 2,
|
| 9 |
+
"gradient_checkpointing": false,
|
| 10 |
+
"hidden_act": "gelu",
|
| 11 |
+
"hidden_dropout_prob": 0.1,
|
| 12 |
+
"hidden_size": 768,
|
| 13 |
+
"id2label": {
|
| 14 |
+
"0": "LABEL_0",
|
| 15 |
+
"1": "LABEL_1",
|
| 16 |
+
"2": "LABEL_2",
|
| 17 |
+
"3": "LABEL_3"
|
| 18 |
+
},
|
| 19 |
+
"initializer_range": 0.02,
|
| 20 |
+
"intermediate_size": 3072,
|
| 21 |
+
"label2id": {
|
| 22 |
+
"LABEL_0": 0,
|
| 23 |
+
"LABEL_1": 1,
|
| 24 |
+
"LABEL_2": 2,
|
| 25 |
+
"LABEL_3": 3
|
| 26 |
+
},
|
| 27 |
+
"layer_norm_eps": 1e-05,
|
| 28 |
+
"max_position_embeddings": 258,
|
| 29 |
+
"model_type": "roberta",
|
| 30 |
+
"num_attention_heads": 12,
|
| 31 |
+
"num_hidden_layers": 12,
|
| 32 |
+
"pad_token_id": 1,
|
| 33 |
+
"position_embedding_type": "absolute",
|
| 34 |
+
"problem_type": "single_label_classification",
|
| 35 |
+
"tokenizer_class": "PhobertTokenizer",
|
| 36 |
+
"torch_dtype": "float32",
|
| 37 |
+
"transformers_version": "4.51.3",
|
| 38 |
+
"type_vocab_size": 1,
|
| 39 |
+
"use_cache": true,
|
| 40 |
+
"vocab_size": 64001
|
| 41 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5cb4d0f31150e4bf6488b0f8eb28c0f272d624b1a08190dba31f2b4e977980ae
|
| 3 |
+
size 540029536
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<s>",
|
| 3 |
+
"cls_token": "<s>",
|
| 4 |
+
"eos_token": "</s>",
|
| 5 |
+
"mask_token": "<mask>",
|
| 6 |
+
"pad_token": "<pad>",
|
| 7 |
+
"sep_token": "</s>",
|
| 8 |
+
"unk_token": "<unk>"
|
| 9 |
+
}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "<s>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<pad>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"content": "</s>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"3": {
|
| 28 |
+
"content": "<unk>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"64000": {
|
| 36 |
+
"content": "<mask>",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"bos_token": "<s>",
|
| 45 |
+
"clean_up_tokenization_spaces": false,
|
| 46 |
+
"cls_token": "<s>",
|
| 47 |
+
"eos_token": "</s>",
|
| 48 |
+
"extra_special_tokens": {},
|
| 49 |
+
"mask_token": "<mask>",
|
| 50 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 51 |
+
"pad_token": "<pad>",
|
| 52 |
+
"sep_token": "</s>",
|
| 53 |
+
"tokenizer_class": "PhobertTokenizer",
|
| 54 |
+
"unk_token": "<unk>"
|
| 55 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|