Spaces:
Sleeping
Sleeping
File size: 854 Bytes
3cb3f16 ae869b2 3cb3f16 ae869b2 0363d82 ae869b2 359542d 0363d82 ae869b2 0363d82 ae869b2 0363d82 ae869b2 0363d82 ae869b2 3cb3f16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_name = "cyberagent/open-calm-1b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=100,
do_sample=True,
temperature=0.7
)
def chat(history, user_input):
response = pipe(user_input)[0]["generated_text"]
history.append((user_input, response))
return history, ""
with gr.Blocks() as demo:
gr.Markdown("## 日本語文章の要約・質問チャット(超軽量版)")
chatbot = gr.Chatbot(height=500)
text_input = gr.Textbox(label="文章を入力してください")
text_input.submit(chat, [chatbot, text_input], [chatbot, text_input])
demo.launch()
|