Spaces:
Sleeping
Sleeping
File size: 1,414 Bytes
2ebd42d bf7c01a 5789561 0789e3d bf7c01a 0789e3d bf7c01a 77899fe bf7c01a 1025314 bf7c01a 0789e3d bf7c01a 1025314 bf7c01a 1025314 bf7c01a 1025314 0789e3d 1025314 0789e3d bf7c01a 1025314 0789e3d bf7c01a 0789e3d bf7c01a 1025314 0789e3d bf7c01a 0789e3d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import streamlit as st
from langchain.schema import AIMessage, HumanMessage, SystemMessage
# Set up Streamlit page
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
st.header("DIBYAJYOTI'S PERSONAL GPT ASSISTANT")
# Check for session state and initialize session if not set
if "sessionMessages" not in st.session_state:
st.session_state.sessionMessages = [
SystemMessage(content="You are a helpful assistant.")
]
# Use HuggingFace GPT-2 model (Free and Open-Source)
chat = HuggingFaceLLM(model="gpt2")
# Function to get user input
def get_text():
input_text = st.text_input("You: ")
return input_text
# Function to process the chat and load an answer
def load_answer(question):
st.session_state.sessionMessages.append(HumanMessage(content=question))
# Get the response from the assistant (HuggingFace GPT-2 model)
assistant_answer = chat.invoke(st.session_state.sessionMessages)
# Add the assistant's response to the session state
st.session_state.sessionMessages.append(AIMessage(content=assistant_answer.content))
return assistant_answer.content
# Streamlit UI
user_input = get_text()
submit = st.button('CLICK HERE TO GET YOUR RESPONSE')
if submit:
if user_input:
response = load_answer(user_input)
st.subheader("Answer:")
st.write(response)
else:
st.warning("Please enter a question.")
|