Spaces:
Sleeping
Sleeping
File size: 1,355 Bytes
8787ac8 53c4857 add5218 8787ac8 add5218 8787ac8 add5218 8787ac8 add5218 09508ac add5218 09508ac add5218 09508ac add5218 09508ac 041c2ee add5218 041c2ee add5218 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | import streamlit as st
from transformers import pipeline
from langchain_community.llms import HuggingFacePipeline
# Title
st.title("🤖 Hugging Face Chatbot with LangChain")
# Session state for conversation history
if "sessionMessages" not in st.session_state:
st.session_state.sessionMessages = []
# Build HF pipeline in float32
pipe = pipeline(
"text-generation",
model="tiiuae/falcon-7b-instruct", # or any Hugging Face model
torch_dtype="float32", # force safe float32 precision
device_map="auto" # auto-detect GPU/CPU
)
# Wrap into LangChain
chat = HuggingFacePipeline(pipeline=pipe)
# Function to handle answers
def load_answer(question):
st.session_state.sessionMessages.append({"role": "user", "content": question})
assistant_answer = chat.invoke(st.session_state.sessionMessages)
st.session_state.sessionMessages.append({"role": "assistant", "content": assistant_answer})
return assistant_answer
# Streamlit input
user_input = st.text_input("Ask me anything:")
if user_input:
response = load_answer(user_input)
st.write(f"**Assistant:** {response}")
# Display chat history
if st.session_state.sessionMessages:
st.subheader("Chat History")
for msg in st.session_state.sessionMessages:
st.write(f"**{msg['role'].capitalize()}:** {msg['content']}") |