| """ | |
| Interactive Refinement Qwen3-14B 使用例 | |
| """ | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import json | |
| import torch | |
| class InteractiveRefinementInference: | |
| def __init__(self, model_name_or_path): | |
| self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) | |
| self.model = AutoModelForCausalLM.from_pretrained( | |
| model_name_or_path, | |
| torch_dtype=torch.float16, | |
| device_map="auto" | |
| ) | |
| # 設定ファイル読み込み | |
| with open(f"{model_name_or_path}/refinement_config.json", "r") as f: | |
| self.config = json.load(f) | |
| def generate(self, question, rounds=None): | |
| rounds = rounds or self.config["refinement_rounds"] | |
| # Interactive Refinement実装(詳細は元のコードを参照) | |
| pass | |
| # 使用例 | |
| if __name__ == "__main__": | |
| model = InteractiveRefinementInference("your-username/interactive-refinement-qwen3-14b") | |
| response = model.generate("あなたの質問") | |
| print(response) | |