yonkoyonks commited on
Commit
53c4857
·
verified ·
1 Parent(s): 041c2ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -28
app.py CHANGED
@@ -1,41 +1,47 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
  from langchain_community.llms import HuggingFacePipeline
 
 
4
 
5
- # Title
6
- st.title("🤖 Hugging Face Chatbot with LangChain")
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- # Session state for conversation history
9
  if "sessionMessages" not in st.session_state:
10
- st.session_state.sessionMessages = []
11
-
12
- # Build HF pipeline in float32
13
- pipe = pipeline(
14
- "text-generation",
15
- model="tiiuae/falcon-7b-instruct", # or any Hugging Face model
16
- torch_dtype="float32", # force safe float32 precision
17
- device_map="auto" # auto-detect GPU/CPU
18
- )
19
 
20
- # Wrap into LangChain
21
- chat = HuggingFacePipeline(pipeline=pipe)
22
-
23
- # Function to handle answers
24
- def load_answer(question):
25
- st.session_state.sessionMessages.append({"role": "user", "content": question})
26
  assistant_answer = chat.invoke(st.session_state.sessionMessages)
27
- st.session_state.sessionMessages.append({"role": "assistant", "content": assistant_answer})
28
  return assistant_answer
29
 
30
- # Streamlit input
31
- user_input = st.text_input("Ask me anything:")
 
 
32
 
33
  if user_input:
34
- response = load_answer(user_input)
 
35
  st.write(f"**Assistant:** {response}")
36
 
37
- # Display chat history
38
- if st.session_state.sessionMessages:
39
- st.subheader("Chat History")
40
- for msg in st.session_state.sessionMessages:
41
- st.write(f"**{msg['role'].capitalize()}:** {msg['content']}")
 
1
  import streamlit as st
 
2
  from langchain_community.llms import HuggingFacePipeline
3
+ from langchain.schema import HumanMessage, SystemMessage
4
+ from transformers import pipeline
5
 
6
+ # Cache the model so Streamlit doesn't reload every time
7
+ @st.cache_resource
8
+ def load_model():
9
+ return pipeline(
10
+ "text-generation",
11
+ model="tiiuae/falcon-1b-instruct",
12
+ torch_dtype="float32", # keep it stable
13
+ device_map="auto", # use GPU if available
14
+ trust_remote_code=True
15
+ )
16
+
17
+ # Load pipeline once
18
+ pipe = load_model()
19
+ chat = HuggingFacePipeline(pipeline=pipe)
20
 
21
+ # --- Session state for messages ---
22
  if "sessionMessages" not in st.session_state:
23
+ st.session_state.sessionMessages = [
24
+ SystemMessage(content="You are a helpful assistant.")
25
+ ]
 
 
 
 
 
 
26
 
27
+ def load_answer(user_input):
28
+ st.session_state.sessionMessages.append(HumanMessage(content=user_input))
 
 
 
 
29
  assistant_answer = chat.invoke(st.session_state.sessionMessages)
30
+ st.session_state.sessionMessages.append(SystemMessage(content=assistant_answer))
31
  return assistant_answer
32
 
33
+ # --- Streamlit UI ---
34
+ st.title("Falcon-1B Chatbot 🦅")
35
+
36
+ user_input = st.text_input("You:", key="input")
37
 
38
  if user_input:
39
+ with st.spinner("Thinking..."):
40
+ response = load_answer(user_input)
41
  st.write(f"**Assistant:** {response}")
42
 
43
+ if st.button("Clear Chat"):
44
+ st.session_state.sessionMessages = [
45
+ SystemMessage(content="You are a helpful assistant.")
46
+ ]
47
+ st.experimental_rerun()