Spaces:
Sleeping
Sleeping
File size: 933 Bytes
710bc31 1fd11df 710bc31 1fd11df 710bc31 1fd11df 710bc31 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import gradio as gr
model_path = "./tinyllama-qlora-support-bot-faq"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def generate_response(instruction):
prompt = f"### Instruction:\n{instruction}\n\n### Response:\n"
output = pipe(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
return output[0]['generated_text'].replace(prompt, "").strip()
gr.Interface(
fn=generate_response,
inputs=gr.Textbox(lines=3, placeholder="Ask your customer support question here..."),
outputs=gr.Textbox(lines=6),
title="🛠️ Customer Support Chatbot (TinyLlama + QLoRA)",
description="Ask any support question. Model trained on MakTek/Customer_support_faqs_dataset using TinyLlama 1.1B."
).launch()
|