| | |
| |
|
| | import torch |
| | from modeling_tinytransformer import TinyTransformerModel |
| | from tokenization_tinytransformer import TinyTokenizer |
| | from configuration_tinytransformer import TinyTransformerConfig |
| |
|
| | model_path = "./tiny-sentiment-model" |
| | device = "cuda" if torch.cuda.is_available() else "cpu" |
| |
|
| | print("加载 tokenizer(手动)...") |
| | tokenizer = TinyTokenizer() |
| |
|
| | print("加载 config(手动)...") |
| | config = TinyTransformerConfig.from_pretrained(model_path) |
| |
|
| | print("加载模型(手动)...") |
| | model = TinyTransformerModel.from_pretrained(model_path, config=config) |
| | model.to(device) |
| | model.eval() |
| |
|
| | print("模型加载完成!设备:", device) |
| |
|
| | |
| | def predict(text: str): |
| | inputs = tokenizer( |
| | text, |
| | padding=True, |
| | truncation=True, |
| | max_length=64, |
| | return_tensors="pt" |
| | ) |
| | inputs = {k: v.to(device) for k, v in inputs.items()} |
| | |
| | with torch.no_grad(): |
| | outputs = model(**inputs) |
| | |
| | logits = outputs["logits"] if isinstance(outputs, dict) else outputs.logits |
| | probs = torch.softmax(logits, dim=-1)[0] |
| | |
| | pred_label = torch.argmax(probs).item() |
| | confidence = probs[pred_label].item() |
| | |
| | label_map = {0: "负面", 1: "正面"} |
| | |
| | print(f"文本: {text}") |
| | print(f"预测: {label_map[pred_label]} (置信度: {confidence:.4f})") |
| | print(f"负面概率: {probs[0]:.4f} | 正面概率: {probs[1]:.4f}") |
| | print("-"*50) |
| |
|
| | |
| | if __name__ == "__main__": |
| | test_texts = [ |
| | "质量超级好,强烈推荐", |
| | "服务差劲,东西还贵", |
| | "一般般,没什么惊喜", |
| | "老板人好,下次还来" |
| | ] |
| | for text in test_texts: |
| | predict(text) |