#!/usr/bin/env python3
"""
古文翻译模型最简调用示例
模型: Helsinki-NLP/opus-mt-zh-en 微调模型
路径: output/translation_model/checkpoint-198000
"""

import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# 模型路径
MODEL_PATH = "output/translation_model/checkpoint-198000"

def translate(ancient_text: str) -> str:
    """
    翻译古文到现代文(最简版本)
    
    Args:
        ancient_text: 输入的古文文本
    
    Returns:
        翻译后的现代文
    """
    # 加载模型和分词器
    tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
    model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_PATH)
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()
    
    # 编码输入
    inputs = tokenizer(
        ancient_text,
        max_length=128,
        padding=True,
        truncation=True,
        return_tensors="pt"
    )
    
    input_ids = inputs["input_ids"].to(device)
    attention_mask = inputs["attention_mask"].to(device)
    
    # 生成翻译
    with torch.no_grad():
        outputs = model.generate(
            input_ids=input_ids,
            attention_mask=attention_mask,
            max_length=128,
            num_beams=4,
            early_stopping=True,
        )
    
    # 解码输出
    result = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return result


if __name__ == "__main__":
    # 测试翻译
    test_texts = [
        "学而时习之,不亦说乎?",
        "温故而知新,可以为师矣。",
        "己所不欲,勿施于人。",
    ]
    
    print("=" * 50)
    print("古文翻译演示")
    print("=" * 50)
    
    for text in test_texts:
        result = translate(text)
        print(f"\n古文: {text}")
        print(f"现代文: {result}")
Downloads last month
1
Safetensors
Model size
0.1B params
Tensor type
F32
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for rkingzhong/opus-mt-classical-chinese

Finetuned
(18)
this model