Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| # Load the model and tokenizer from Hugging Face's model hub with trust_remote_code=True | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "EleutherAI/gpt-neo-1.3B", | |
| trust_remote_code=True, # Allow custom code execution | |
| attn_implementation='eager' | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") | |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| def chatbot_response(user_input): | |
| messages = [{"role": "user", "content": user_input}] # Create a fresh conversation context | |
| output = pipe(messages, max_new_tokens=500, return_full_text=False, temperature=0.0, do_sample=False) | |
| return output[0]['generated_text'] | |
| iface = gr.Interface( | |
| fn=chatbot_response, | |
| inputs=gr.components.Textbox(lines=2, placeholder="Type your question here..."), | |
| outputs=gr.components.Text(label="Response"), | |
| title="Smart Chatbot", | |
| description="This is a smart chatbot that can answer your questions. Just type your question below and get an instant response.", | |
| theme="huggingface" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |