|
|
import torch |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
|
|
|
model_name_a = "distilgpt2" |
|
|
model_name_b = "sshleifer/tiny-gpt2" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name_a) |
|
|
|
|
|
model_a = AutoModelForCausalLM.from_pretrained(model_name_a).to(device) |
|
|
model_b = AutoModelForCausalLM.from_pretrained(model_name_b).to(device) |
|
|
|
|
|
model_a.eval() |
|
|
model_b.eval() |
|
|
|
|
|
def blend_generate(prompt, wa, wb): |
|
|
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
output_a = model_a(input_ids) |
|
|
output_b = model_b(input_ids) |
|
|
|
|
|
logits_a = output_a.logits[:, -1, :] |
|
|
logits_b = output_b.logits[:, -1, :] |
|
|
|
|
|
|
|
|
blended_logits = wa * logits_a + wb * logits_b |
|
|
|
|
|
|
|
|
probs = torch.softmax(blended_logits, dim=-1) |
|
|
|
|
|
|
|
|
token = torch.multinomial(probs, 1) |
|
|
next_token_id = token.item() |
|
|
|
|
|
next_token = tokenizer.decode([next_token_id]) |
|
|
return prompt + next_token |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
prompt_input = gr.Textbox(label="Prompt", lines=2) |
|
|
weight_a = gr.Slider(0, 1, value=0.5, label="Weight model A") |
|
|
weight_b = gr.Slider(0, 1, value=0.5, label="Weight model B") |
|
|
output_text = gr.Textbox(label="Output") |
|
|
|
|
|
btn = gr.Button("Generate") |
|
|
btn.click(blend_generate, inputs=[prompt_input, weight_a, weight_b], outputs=output_text) |
|
|
|
|
|
demo.launch() |
|
|
|