Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # Load pre-trained model and tokenizer from Hugging Face | |
| model_name = "gpt2" # You can use other models like "gpt-neo", "gpt-3", etc. | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # Title of the app | |
| st.title("LLM Chatbot") | |
| # User input for chatbot | |
| user_input = st.text_input("You: ", "") | |
| # Function to generate the response using the model | |
| def generate_response(prompt): | |
| inputs = tokenizer(prompt, return_tensors="pt") | |
| outputs = model.generate(inputs.input_ids, max_length=150, num_return_sequences=1) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| if user_input: | |
| # Generate a response from the model | |
| response = generate_response(user_input) | |
| # Display only the bot's response | |
| st.write(f"Bot: {response}") | |