| | import gradio as gr |
| | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
| | from peft import PeftModel |
| |
|
| | |
| | BASE_MODEL = "facebook/blenderbot-400M-distill" |
| | tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) |
| | base_model = AutoModelForSeq2SeqLM.from_pretrained(BASE_MODEL) |
| |
|
| | |
| | ADAPTER_REPO = "abinashnp/bayedger-chatbot" |
| | model = PeftModel.from_pretrained(base_model, ADAPTER_REPO) |
| |
|
| | |
| | chatbot = pipeline( |
| | "text2text-generation", |
| | model=model, |
| | tokenizer=tokenizer, |
| | device_map="auto", |
| | ) |
| |
|
| | def respond(query): |
| | out = chatbot( |
| | f"question: {query} answer:", |
| | max_new_tokens=150, |
| | temperature=1.0, |
| | top_p=0.9, |
| | repetition_penalty=1.1, |
| | num_beams=1 |
| | )[0]["generated_text"] |
| | return out |
| |
|
| | with gr.Blocks() as demo: |
| | gr.Markdown("# 🤖 Bayedger FAQ Chatbot") |
| | txt = gr.Textbox(label="Ask me anything") |
| | out = gr.Textbox(label="Answer") |
| | txt.submit(respond, txt, out) |
| |
|
| | demo.launch() |
| |
|