Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| model_name = "cyberagent/open-calm-1b" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| pipe = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| max_new_tokens=100, | |
| do_sample=True, | |
| temperature=0.7 | |
| ) | |
| def chat(history, user_input): | |
| response = pipe(user_input)[0]["generated_text"] | |
| history.append((user_input, response)) | |
| return history, "" | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 日本語文章の要約・質問チャット(超軽量版)") | |
| chatbot = gr.Chatbot(height=500) | |
| text_input = gr.Textbox(label="文章を入力してください") | |
| text_input.submit(chat, [chatbot, text_input], [chatbot, text_input]) | |
| demo.launch() | |