Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| # From here down is all the StreamLit UI. | |
| st.set_page_config(page_title="LangChain Demo", page_icon=":robot:") # Call it first | |
| st.header("Hey, I'm your Chat GPT") | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| # Define SystemMessage class if needed | |
| class SystemMessage: | |
| def __init__(self, content): | |
| self.content = content | |
| # Choose your desired free model from the Hugging Face Hub | |
| model_name = "t5-small" # Replace with your choice (e.g.,t5-small or facebook/bart-base or EleutherAI/gpt-neo-125M) | |
| # Load the model and tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| system_message = SystemMessage("Welcome to my Chatbot! Ask me anything.") | |
| st.write(system_message) | |
| if "sessionMessages" not in st.session_state: | |
| st.session_state.sessionMessages = [ | |
| SystemMessage(content="You are a helpful assistant.") | |
| ] | |
| class HumanMessage: | |
| def __init__(self, content): | |
| self.content = content | |
| class AIMessage: | |
| def __init__(self, content): | |
| self.content = content | |
| def load_answer(question): | |
| st.session_state.sessionMessages.append(HumanMessage(content=question)) | |
| inputs = tokenizer(question, return_tensors="pt") | |
| outputs = model.generate(**inputs) | |
| response_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Create AIMessage object after defining response_text | |
| assistant_answer = AIMessage(content=response_text) | |
| st.session_state.sessionMessages.append(assistant_answer) | |
| return response_text | |
| def get_text(): | |
| input_text = st.text_input("You: ", key= input) | |
| return input_text | |
| user_input=get_text() | |
| submit = st.button('Generate') | |
| if submit: | |
| response = load_answer(user_input) | |
| st.subheader("Answer:") | |
| st.write(response,key= 1) | |