File size: 2,057 Bytes
d1033d4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
"""
OPT モデル実装
Meta OPT-125Mの実装を提供する
"""
from typing import List, Tuple
import torch
from transformers import OPTForCausalLM, GPT2Tokenizer
from .base import BaseLanguageModel, ModelConfig
# OPT-125M設定
OPT_125M_CONFIG = ModelConfig(
name="OPT-125M",
model_id="facebook/opt-125m",
embedding_dim=768,
vocab_size=50272,
)
class OPTModel(BaseLanguageModel):
"""
OPTモデルの実装
Meta OPTをラップしBaseLanguageModelインターフェースを実装
"""
# 出力ノイズの倍率
LOGITS_NOISE_SCALE = 10.0
def load(self) -> None:
"""モデルとトークナイザーをロード"""
if self._is_loaded:
return
try:
self._model = OPTForCausalLM.from_pretrained(self._config.model_id)
# OPTは独自のトークナイザーを持つが、GPT-2互換も可能
self._tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
self._model.eval()
self._is_loaded = True
except Exception as e:
raise RuntimeError(f"Failed to load model {self._config.model_id}: {e}")
def forward_with_noise(
self, noise: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""ノイズを入力として順伝播を実行"""
if not self._is_loaded:
raise RuntimeError("Model not loaded. Call load() first.")
with torch.no_grad():
outputs = self._model(inputs_embeds=noise)
logits = outputs.logits
logits_noise = (
torch.randn_like(logits) * logits.std() * self.LOGITS_NOISE_SCALE
)
corrupted_logits = logits + logits_noise
return logits, corrupted_logits
def decode_indices(self, indices: List[int]) -> List[str]:
"""トークンインデックスをデコード"""
if not self._is_loaded:
raise RuntimeError("Model not loaded. Call load() first.")
return [self._tokenizer.decode([i]) for i in indices]
|