| | import torch |
| | import torch.nn.functional as F |
| | from evo_decoder import EvoDecoder |
| | from transformers import GPT2Tokenizer |
| |
|
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| |
|
| | |
| | tokenizer = GPT2Tokenizer.from_pretrained("gpt2") |
| | tokenizer.pad_token = tokenizer.eos_token |
| | model = EvoDecoder( |
| | vocab_size=tokenizer.vocab_size, |
| | d_model=256, |
| | nhead=4, |
| | num_layers=3, |
| | dim_feedforward=512 |
| | ).to(device) |
| |
|
| | |
| | model.load_state_dict(torch.load("evo_decoder.pt", map_location=device)) |
| | model.eval() |
| |
|
| | |
| | @torch.no_grad() |
| | def generate_response(prompt, max_length=128, temperature=1.0, external_context=""): |
| | model.eval() |
| |
|
| | |
| | if external_context: |
| | full_prompt = f"Context: {external_context}\nQuestion: {prompt}\nAnswer:" |
| | else: |
| | full_prompt = f"Question: {prompt}\nAnswer:" |
| |
|
| | input_ids = tokenizer.encode(full_prompt, return_tensors="pt").to(device) |
| |
|
| | for _ in range(max_length): |
| | logits = model(input_ids) |
| | logits = logits[:, -1, :] / temperature |
| | probs = F.softmax(logits, dim=-1) |
| | next_token = torch.multinomial(probs, num_samples=1) |
| | input_ids = torch.cat((input_ids, next_token), dim=1) |
| |
|
| | if next_token.item() == tokenizer.eos_token_id: |
| | break |
| |
|
| | output = tokenizer.decode(input_ids.squeeze(), skip_special_tokens=True) |
| | return output[len(full_prompt):].strip() |
| |
|