|
|
import gradio as gr |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
|
|
|
model_name = "AutoArk-AI/GPA" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name).to("cuda") |
|
|
|
|
|
def generate_text(input_text): |
|
|
|
|
|
inputs = tokenizer(input_text, return_tensors="pt").to("cuda") |
|
|
outputs = model.generate(**inputs, max_length=50) |
|
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
interface = gr.Interface( |
|
|
fn=generate_text, |
|
|
inputs=gr.Textbox(lines=5, placeholder="輸入你的文本..."), |
|
|
outputs="text", |
|
|
title="AutoArk-AI/GPA 模型演示", |
|
|
description="輸入文本,模型將生成回覆。" |
|
|
) |
|
|
|
|
|
|
|
|
interface.launch() |