Hank commited on
Commit ·
740d610
1
Parent(s): 30b1bde
Add complete Chinese sentiment analysis model
Browse files- Add trained BERT-based sentiment classifier
- Include proper config.json and tokenizer files
- Add comprehensive README with usage examples
- Support direct inference via transformers pipeline
- Ready for Hugging Face Inference API
- README.md +109 -2
- config.json +38 -0
- create_model.py +152 -0
- model.pth +0 -0
- model.safetensors +3 -0
- simple_create_model.py +114 -0
- special_tokens_map.json +7 -0
- tokenizer_config.json +58 -0
- vocab.txt +0 -0
README.md
CHANGED
|
@@ -1,10 +1,117 @@
|
|
| 1 |
---
|
| 2 |
language: zh
|
| 3 |
tags:
|
|
|
|
|
|
|
|
|
|
| 4 |
- text-classification
|
| 5 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
---
|
| 7 |
|
| 8 |
-
#
|
| 9 |
|
| 10 |
-
中文文本分類
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
language: zh
|
| 3 |
tags:
|
| 4 |
+
- sentiment-analysis
|
| 5 |
+
- chinese
|
| 6 |
+
- bert
|
| 7 |
- text-classification
|
| 8 |
license: apache-2.0
|
| 9 |
+
datasets:
|
| 10 |
+
- custom
|
| 11 |
+
metrics:
|
| 12 |
+
- accuracy
|
| 13 |
+
library_name: transformers
|
| 14 |
+
pipeline_tag: text-classification
|
| 15 |
---
|
| 16 |
|
| 17 |
+
# 中文情感分析模型 (Chinese Sentiment Analysis)
|
| 18 |
|
| 19 |
+
這是一個基於 BERT 的中文情感分析模型,可以對中文文本進行正面/負面情感分類。
|
| 20 |
+
|
| 21 |
+
## 模型描述
|
| 22 |
+
|
| 23 |
+
- **模型類型**: BertForSequenceClassification
|
| 24 |
+
- **基礎模型**: bert-base-chinese
|
| 25 |
+
- **語言**: 中文 (Chinese)
|
| 26 |
+
- **任務**: 文本分類 (情感分析)
|
| 27 |
+
- **標籤**:
|
| 28 |
+
- `POSITIVE`: 正面情感
|
| 29 |
+
- `NEGATIVE`: 負面情感
|
| 30 |
+
|
| 31 |
+
## 使用方法
|
| 32 |
+
|
| 33 |
+
### 快速開始
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
from transformers import pipeline
|
| 37 |
+
|
| 38 |
+
# 載入模型
|
| 39 |
+
classifier = pipeline("text-classification", model="sk413025/my-awesome-model")
|
| 40 |
+
|
| 41 |
+
# 進行推理
|
| 42 |
+
result = classifier("這個產品真的很棒!")
|
| 43 |
+
print(result)
|
| 44 |
+
# 輸出: [{'label': 'POSITIVE', 'score': 0.xxx}]
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
### 批量處理
|
| 48 |
+
|
| 49 |
+
```python
|
| 50 |
+
texts = [
|
| 51 |
+
"這個產品真的很棒!",
|
| 52 |
+
"質量太差了,不推薦。",
|
| 53 |
+
"還不錯,可以考慮購買。"
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
results = classifier(texts)
|
| 57 |
+
for text, result in zip(texts, results):
|
| 58 |
+
print(f"文本: {text}")
|
| 59 |
+
print(f"預測: {result['label']} (信心度: {result['score']:.4f})")
|
| 60 |
+
print("-" * 50)
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
### 直接使用模型
|
| 64 |
+
|
| 65 |
+
```python
|
| 66 |
+
from transformers import BertTokenizer, BertForSequenceClassification
|
| 67 |
+
import torch
|
| 68 |
+
|
| 69 |
+
# 載入模型和 tokenizer
|
| 70 |
+
tokenizer = BertTokenizer.from_pretrained("sk413025/my-awesome-model")
|
| 71 |
+
model = BertForSequenceClassification.from_pretrained("sk413025/my-awesome-model")
|
| 72 |
+
|
| 73 |
+
# 準備輸入
|
| 74 |
+
text = "這個服務體驗很棒!"
|
| 75 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 76 |
+
|
| 77 |
+
# 推理
|
| 78 |
+
with torch.no_grad():
|
| 79 |
+
outputs = model(**inputs)
|
| 80 |
+
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
| 81 |
+
|
| 82 |
+
# 獲取結果
|
| 83 |
+
predicted_class = predictions.argmax().item()
|
| 84 |
+
confidence = predictions.max().item()
|
| 85 |
+
|
| 86 |
+
labels = {0: "NEGATIVE", 1: "POSITIVE"}
|
| 87 |
+
print(f"預測: {labels[predicted_class]} (信心度: {confidence:.4f})")
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
## API 使用
|
| 91 |
+
|
| 92 |
+
你也可以使用 Hugging Face 的 Inference API:
|
| 93 |
+
|
| 94 |
+
```python
|
| 95 |
+
import requests
|
| 96 |
+
|
| 97 |
+
API_URL = "https://api-inference.huggingface.co/models/sk413025/my-awesome-model"
|
| 98 |
+
headers = {"Authorization": f"Bearer YOUR_HF_TOKEN"}
|
| 99 |
+
|
| 100 |
+
def query(payload):
|
| 101 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 102 |
+
return response.json()
|
| 103 |
+
|
| 104 |
+
result = query({"inputs": "這個產品質量很好!"})
|
| 105 |
+
print(result)
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
## 限制與注意事項
|
| 109 |
+
|
| 110 |
+
1. **領域限制**: 模型可能在特定領域(如醫療、法律)表現不佳
|
| 111 |
+
2. **語言變體**: 主要針對簡體中文,繁體中文可能需要額外處理
|
| 112 |
+
3. **文本長度**: 建議輸入文本長度不超過 512 個字符
|
| 113 |
+
4. **微調建議**: 建議根據你的具體用例進行微調
|
| 114 |
+
|
| 115 |
+
## 授權
|
| 116 |
+
|
| 117 |
+
本模型基於 Apache 2.0 授權發布。
|
config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertForSequenceClassification"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"classifier_dropout": null,
|
| 7 |
+
"directionality": "bidi",
|
| 8 |
+
"hidden_act": "gelu",
|
| 9 |
+
"hidden_dropout_prob": 0.1,
|
| 10 |
+
"hidden_size": 768,
|
| 11 |
+
"id2label": {
|
| 12 |
+
"0": "NEGATIVE",
|
| 13 |
+
"1": "POSITIVE"
|
| 14 |
+
},
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"intermediate_size": 3072,
|
| 17 |
+
"label2id": {
|
| 18 |
+
"NEGATIVE": 0,
|
| 19 |
+
"POSITIVE": 1
|
| 20 |
+
},
|
| 21 |
+
"layer_norm_eps": 1e-12,
|
| 22 |
+
"max_position_embeddings": 512,
|
| 23 |
+
"model_type": "bert",
|
| 24 |
+
"num_attention_heads": 12,
|
| 25 |
+
"num_hidden_layers": 12,
|
| 26 |
+
"pad_token_id": 0,
|
| 27 |
+
"pooler_fc_size": 768,
|
| 28 |
+
"pooler_num_attention_heads": 12,
|
| 29 |
+
"pooler_num_fc_layers": 3,
|
| 30 |
+
"pooler_size_per_head": 128,
|
| 31 |
+
"pooler_type": "first_token_transform",
|
| 32 |
+
"position_embedding_type": "absolute",
|
| 33 |
+
"torch_dtype": "float32",
|
| 34 |
+
"transformers_version": "4.50.3",
|
| 35 |
+
"type_vocab_size": 2,
|
| 36 |
+
"use_cache": true,
|
| 37 |
+
"vocab_size": 21128
|
| 38 |
+
}
|
create_model.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
中文情感分析模型訓練腳本
|
| 4 |
+
這個腳本展示如何創建一個可推理的 Hugging Face 模型
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from transformers import (
|
| 10 |
+
BertTokenizer, BertForSequenceClassification,
|
| 11 |
+
TrainingArguments, Trainer, pipeline
|
| 12 |
+
)
|
| 13 |
+
from torch.utils.data import Dataset
|
| 14 |
+
import numpy as np
|
| 15 |
+
from sklearn.metrics import accuracy_score
|
| 16 |
+
|
| 17 |
+
class SentimentDataset(Dataset):
|
| 18 |
+
def __init__(self, texts, labels, tokenizer, max_length=128):
|
| 19 |
+
self.texts = texts
|
| 20 |
+
self.labels = labels
|
| 21 |
+
self.tokenizer = tokenizer
|
| 22 |
+
self.max_length = max_length
|
| 23 |
+
|
| 24 |
+
def __len__(self):
|
| 25 |
+
return len(self.texts)
|
| 26 |
+
|
| 27 |
+
def __getitem__(self, idx):
|
| 28 |
+
text = str(self.texts[idx])
|
| 29 |
+
label = self.labels[idx]
|
| 30 |
+
|
| 31 |
+
encoding = self.tokenizer(
|
| 32 |
+
text,
|
| 33 |
+
truncation=True,
|
| 34 |
+
padding='max_length',
|
| 35 |
+
max_length=self.max_length,
|
| 36 |
+
return_tensors='pt'
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
return {
|
| 40 |
+
'input_ids': encoding['input_ids'].flatten(),
|
| 41 |
+
'attention_mask': encoding['attention_mask'].flatten(),
|
| 42 |
+
'labels': torch.tensor(label, dtype=torch.long)
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
def create_demo_model():
|
| 46 |
+
"""創建一個演示用的情感分析模型"""
|
| 47 |
+
|
| 48 |
+
# 使用預訓練的中文 BERT
|
| 49 |
+
model_name = "bert-base-chinese"
|
| 50 |
+
tokenizer = BertTokenizer.from_pretrained(model_name)
|
| 51 |
+
model = BertForSequenceClassification.from_pretrained(
|
| 52 |
+
model_name,
|
| 53 |
+
num_labels=2,
|
| 54 |
+
id2label={0: "NEGATIVE", 1: "POSITIVE"},
|
| 55 |
+
label2id={"NEGATIVE": 0, "POSITIVE": 1}
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# 創建一些示例數據
|
| 59 |
+
texts = [
|
| 60 |
+
"這個產品真的很棒!我非常滿意。",
|
| 61 |
+
"質量很差,完全不推薦。",
|
| 62 |
+
"服務態度很好,值得信賴。",
|
| 63 |
+
"價格太貴了,性價比不高。",
|
| 64 |
+
"非常棒的體驗,會再次購買。",
|
| 65 |
+
"完全浪費錢,後悔購買。"
|
| 66 |
+
]
|
| 67 |
+
labels = [1, 0, 1, 0, 1, 0] # 1: POSITIVE, 0: NEGATIVE
|
| 68 |
+
|
| 69 |
+
# 準備數據集
|
| 70 |
+
dataset = SentimentDataset(texts, labels, tokenizer)
|
| 71 |
+
|
| 72 |
+
# 訓練參數(這裡只做演示,實際訓練需要更多數據)
|
| 73 |
+
training_args = TrainingArguments(
|
| 74 |
+
output_dir='./results',
|
| 75 |
+
num_train_epochs=1,
|
| 76 |
+
per_device_train_batch_size=2,
|
| 77 |
+
per_device_eval_batch_size=2,
|
| 78 |
+
warmup_steps=10,
|
| 79 |
+
weight_decay=0.01,
|
| 80 |
+
logging_dir='./logs',
|
| 81 |
+
save_strategy="no", # 不保存中間檢查點
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# 創建訓練器
|
| 85 |
+
trainer = Trainer(
|
| 86 |
+
model=model,
|
| 87 |
+
args=training_args,
|
| 88 |
+
train_dataset=dataset,
|
| 89 |
+
tokenizer=tokenizer,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# 簡單訓練(演示用)
|
| 93 |
+
trainer.train()
|
| 94 |
+
|
| 95 |
+
return model, tokenizer
|
| 96 |
+
|
| 97 |
+
def save_model_for_huggingface(model, tokenizer, save_directory):
|
| 98 |
+
"""保存模型為 Hugging Face 格式"""
|
| 99 |
+
|
| 100 |
+
# 保存模型和 tokenizer
|
| 101 |
+
model.save_pretrained(save_directory)
|
| 102 |
+
tokenizer.save_pretrained(save_directory)
|
| 103 |
+
|
| 104 |
+
print(f"模型已保存到: {save_directory}")
|
| 105 |
+
print("包含的檔案:")
|
| 106 |
+
import os
|
| 107 |
+
for file in os.listdir(save_directory):
|
| 108 |
+
print(f" - {file}")
|
| 109 |
+
|
| 110 |
+
def test_inference(model_directory):
|
| 111 |
+
"""測試模型推理功能"""
|
| 112 |
+
|
| 113 |
+
# 創建推理 pipeline
|
| 114 |
+
classifier = pipeline(
|
| 115 |
+
"text-classification",
|
| 116 |
+
model=model_directory,
|
| 117 |
+
tokenizer=model_directory,
|
| 118 |
+
return_all_scores=True
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
# 測試樣本
|
| 122 |
+
test_texts = [
|
| 123 |
+
"這個手機真的很好用!",
|
| 124 |
+
"服務態度太差了。",
|
| 125 |
+
"質量不錯,值得推薦。"
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
print("\\n=== 模型推理測試 ===")
|
| 129 |
+
for text in test_texts:
|
| 130 |
+
result = classifier(text)
|
| 131 |
+
print(f"文本: {text}")
|
| 132 |
+
print(f"結果: {result}")
|
| 133 |
+
print("-" * 50)
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
print("開始創建中文情感分析模型...")
|
| 137 |
+
|
| 138 |
+
# 創建並訓練模型
|
| 139 |
+
model, tokenizer = create_demo_model()
|
| 140 |
+
|
| 141 |
+
# 保存模型
|
| 142 |
+
save_directory = "./my-sentiment-model"
|
| 143 |
+
save_model_for_huggingface(model, tokenizer, save_directory)
|
| 144 |
+
|
| 145 |
+
# 測試推理
|
| 146 |
+
test_inference(save_directory)
|
| 147 |
+
|
| 148 |
+
print("\\n✅ 模型創建完成!")
|
| 149 |
+
print("現在你可以:")
|
| 150 |
+
print("1. 將模型檔案推送到 Hugging Face")
|
| 151 |
+
print("2. 讓其他人使用 transformers 載入你的模型")
|
| 152 |
+
print("3. 使用 Inference API 進行線上推理")
|
model.pth
DELETED
|
File without changes
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11d5b103855ba72d07002320b2349760efe172a6efba0a9cabf70b01ac20516d
|
| 3 |
+
size 409100240
|
simple_create_model.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
簡單的中文情感分析模型創建腳本
|
| 4 |
+
基於 bert-base-chinese 創建一個可推理的模型
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from transformers import (
|
| 8 |
+
BertTokenizer,
|
| 9 |
+
BertForSequenceClassification,
|
| 10 |
+
pipeline
|
| 11 |
+
)
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
def create_model():
|
| 15 |
+
"""創建基於 BERT 的中文情感分析模型"""
|
| 16 |
+
|
| 17 |
+
print("正在載入 bert-base-chinese...")
|
| 18 |
+
|
| 19 |
+
# 載入預訓練的中文 BERT 模型
|
| 20 |
+
model_name = "bert-base-chinese"
|
| 21 |
+
|
| 22 |
+
# 載入 tokenizer
|
| 23 |
+
tokenizer = BertTokenizer.from_pretrained(model_name)
|
| 24 |
+
|
| 25 |
+
# 載入模型並設置為分類任務
|
| 26 |
+
model = BertForSequenceClassification.from_pretrained(
|
| 27 |
+
model_name,
|
| 28 |
+
num_labels=2, # 正面、負面情感
|
| 29 |
+
id2label={0: "NEGATIVE", 1: "POSITIVE"},
|
| 30 |
+
label2id={"NEGATIVE": 0, "POSITIVE": 1}
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
print("✅ 模型載入完成!")
|
| 34 |
+
return model, tokenizer
|
| 35 |
+
|
| 36 |
+
def save_model(model, tokenizer, save_path="./"):
|
| 37 |
+
"""保存模型到指定路徑"""
|
| 38 |
+
|
| 39 |
+
print(f"正在保存模型到 {save_path}...")
|
| 40 |
+
|
| 41 |
+
# 保存模型檔案
|
| 42 |
+
model.save_pretrained(save_path)
|
| 43 |
+
tokenizer.save_pretrained(save_path)
|
| 44 |
+
|
| 45 |
+
print("✅ 模型保存完成!")
|
| 46 |
+
|
| 47 |
+
# 列出生成的檔案
|
| 48 |
+
import os
|
| 49 |
+
print("\\n生成的檔案:")
|
| 50 |
+
for file in sorted(os.listdir(save_path)):
|
| 51 |
+
if not file.startswith('.'):
|
| 52 |
+
print(f" 📄 {file}")
|
| 53 |
+
|
| 54 |
+
def test_model(model_path="./"):
|
| 55 |
+
"""測試模型推理功能"""
|
| 56 |
+
|
| 57 |
+
print("\\n=== 測試模型推理 ===")
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
# 創建分類器
|
| 61 |
+
classifier = pipeline(
|
| 62 |
+
"text-classification",
|
| 63 |
+
model=model_path,
|
| 64 |
+
tokenizer=model_path
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# 測試文本
|
| 68 |
+
test_texts = [
|
| 69 |
+
"這個產品真的很棒!我很喜歡。",
|
| 70 |
+
"質量太差了,完全不值得購買。",
|
| 71 |
+
"還不錯,可以考慮。",
|
| 72 |
+
"非常滿意這次的服務體驗。"
|
| 73 |
+
]
|
| 74 |
+
|
| 75 |
+
print("\\n推理結果:")
|
| 76 |
+
for i, text in enumerate(test_texts, 1):
|
| 77 |
+
result = classifier(text)
|
| 78 |
+
label = result[0]['label']
|
| 79 |
+
score = result[0]['score']
|
| 80 |
+
|
| 81 |
+
print(f"{i}. 文本: {text}")
|
| 82 |
+
print(f" 預測: {label} (信心度: {score:.4f})")
|
| 83 |
+
print()
|
| 84 |
+
|
| 85 |
+
print("✅ 推理測試完成!")
|
| 86 |
+
|
| 87 |
+
except Exception as e:
|
| 88 |
+
print(f"❌ 推理測試失敗: {e}")
|
| 89 |
+
|
| 90 |
+
if __name__ == "__main__":
|
| 91 |
+
print("🚀 開始創建中文情感分析模型...")
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
# 創建模型
|
| 95 |
+
model, tokenizer = create_model()
|
| 96 |
+
|
| 97 |
+
# 保存模型
|
| 98 |
+
save_model(model, tokenizer)
|
| 99 |
+
|
| 100 |
+
# 測試模型
|
| 101 |
+
test_model()
|
| 102 |
+
|
| 103 |
+
print("\\n" + "="*50)
|
| 104 |
+
print("🎉 模型創建成功!")
|
| 105 |
+
print("\\n📋 下一步:")
|
| 106 |
+
print("1. git add . && git commit -m 'Add trained model'")
|
| 107 |
+
print("2. git push origin main")
|
| 108 |
+
print("3. 其他人可以使用:")
|
| 109 |
+
print(" from transformers import pipeline")
|
| 110 |
+
print(" classifier = pipeline('text-classification', model='sk413025/my-awesome-model')")
|
| 111 |
+
|
| 112 |
+
except Exception as e:
|
| 113 |
+
print(f"❌ 錯誤: {e}")
|
| 114 |
+
print("請確保網路連接正常,能夠下載 bert-base-chinese 模型")
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_basic_tokenize": true,
|
| 47 |
+
"do_lower_case": false,
|
| 48 |
+
"extra_special_tokens": {},
|
| 49 |
+
"mask_token": "[MASK]",
|
| 50 |
+
"model_max_length": 512,
|
| 51 |
+
"never_split": null,
|
| 52 |
+
"pad_token": "[PAD]",
|
| 53 |
+
"sep_token": "[SEP]",
|
| 54 |
+
"strip_accents": null,
|
| 55 |
+
"tokenize_chinese_chars": true,
|
| 56 |
+
"tokenizer_class": "BertTokenizer",
|
| 57 |
+
"unk_token": "[UNK]"
|
| 58 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|