Spaces:
Sleeping
Sleeping
File size: 1,193 Bytes
11b5aa1 2f62ff3 3bfc85c 98a07b9 abae146 0679ee3 3bfc85c 2f62ff3 0679ee3 2f62ff3 0679ee3 11b5aa1 2f62ff3 3bfc85c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# Load the model and tokenizer from Hugging Face's model hub with trust_remote_code=True
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-1.3B",
trust_remote_code=True, # Allow custom code execution
attn_implementation='eager'
)
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def chatbot_response(user_input):
messages = [{"role": "user", "content": user_input}] # Create a fresh conversation context
output = pipe(messages, max_new_tokens=500, return_full_text=False, temperature=0.0, do_sample=False)
return output[0]['generated_text']
iface = gr.Interface(
fn=chatbot_response,
inputs=gr.components.Textbox(lines=2, placeholder="Type your question here..."),
outputs=gr.components.Text(label="Response"),
title="Smart Chatbot",
description="This is a smart chatbot that can answer your questions. Just type your question below and get an instant response.",
theme="huggingface"
)
if __name__ == "__main__":
iface.launch()
|