Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import pipeline | |
| from langchain_community.llms import HuggingFacePipeline | |
| # Title | |
| st.title("🤖 Hugging Face Chatbot with LangChain") | |
| # Session state for conversation history | |
| if "sessionMessages" not in st.session_state: | |
| st.session_state.sessionMessages = [] | |
| # Build HF pipeline in float32 | |
| pipe = pipeline( | |
| "text-generation", | |
| model="tiiuae/falcon-7b-instruct", # or any Hugging Face model | |
| torch_dtype="float32", # force safe float32 precision | |
| device_map="auto" # auto-detect GPU/CPU | |
| ) | |
| # Wrap into LangChain | |
| chat = HuggingFacePipeline(pipeline=pipe) | |
| # Function to handle answers | |
| def load_answer(question): | |
| st.session_state.sessionMessages.append({"role": "user", "content": question}) | |
| assistant_answer = chat.invoke(st.session_state.sessionMessages) | |
| st.session_state.sessionMessages.append({"role": "assistant", "content": assistant_answer}) | |
| return assistant_answer | |
| # Streamlit input | |
| user_input = st.text_input("Ask me anything:") | |
| if user_input: | |
| response = load_answer(user_input) | |
| st.write(f"**Assistant:** {response}") | |
| # Display chat history | |
| if st.session_state.sessionMessages: | |
| st.subheader("Chat History") | |
| for msg in st.session_state.sessionMessages: | |
| st.write(f"**{msg['role'].capitalize()}:** {msg['content']}") |