|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import torch |
|
|
|
|
|
|
|
|
model_name = "EleutherAI/pythia-1.4B-deduped" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
device = torch.device("cpu") |
|
|
model.to(device) |
|
|
|
|
|
def generate(prompt): |
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(device) |
|
|
outputs = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=200, |
|
|
do_sample=True, |
|
|
temperature=0.7 |
|
|
) |
|
|
text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
return text |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=generate, |
|
|
inputs=gr.Textbox(lines=5, placeholder="Nhập prompt…"), |
|
|
outputs=gr.Textbox(), |
|
|
title="OASST-J-3B API" |
|
|
) |
|
|
|
|
|
iface.launch(server_name="0.0.0.0", server_port=7860) |
|
|
|