pratikshahp commited on
Commit
7908dfb
·
verified ·
1 Parent(s): da008a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -22
app.py CHANGED
@@ -1,40 +1,36 @@
1
  import streamlit as st
 
 
2
  from langchain.chains import ConversationChain
3
  from langchain_community.chat_message_histories import StreamlitChatMessageHistory
4
  from langchain.memory import ConversationBufferMemory
5
- import os
6
- from dotenv import load_dotenv
7
- from langchain_huggingface import HuggingFaceEndpoint
8
- load_dotenv()
9
-
10
- # Set your HF_TOKEN
11
- HF_TOKEN = os.getenv("HF_TOKEN")
12
 
 
 
 
13
 
14
- # Initialize the HuggingFace inference endpoint
15
- llm = HuggingFaceEndpoint(
16
- repo_id="mistralai/Mistral-7B-Instruct-v0.3",
17
- huggingfacehub_api_token=HF_TOKEN.strip(),
18
  temperature=0.7,
19
- task="text-generation",
20
- max_new_tokens=100
21
  )
22
 
23
- # Initialize Streamlit app
24
  st.set_page_config(page_title="LangChain Chatbot with Memory")
25
- st.title("🤖 LangChain Chatbot with Memory")
26
 
27
- # Initialize chat message history
28
  history = StreamlitChatMessageHistory(key="chat_messages")
29
-
30
- # Display chat history
31
  for msg in history.messages:
32
  if msg.type == "human":
33
  st.chat_message("user").write(msg.content)
34
  else:
35
  st.chat_message("assistant").write(msg.content)
36
 
37
- # Initialize memory with chat history
38
  memory = ConversationBufferMemory(chat_memory=history, return_messages=True)
39
 
40
  # Create conversation chain
@@ -42,14 +38,14 @@ conversation = ConversationChain(llm=llm, memory=memory)
42
 
43
  # Chat input
44
  if prompt := st.chat_input("Say something..."):
45
- # Add user message to history
46
  history.add_user_message(prompt)
47
  st.chat_message("user").write(prompt)
48
 
49
- # Generate AI response with word limit instruction
50
  limited_prompt = f"{prompt}\n\nPlease respond in no more than 50 words."
51
  response = conversation.predict(input=limited_prompt)
52
 
53
- # Add AI message to history
54
  history.add_ai_message(response)
55
  st.chat_message("assistant").write(response)
 
1
  import streamlit as st
2
+ import os
3
+ from dotenv import load_dotenv
4
  from langchain.chains import ConversationChain
5
  from langchain_community.chat_message_histories import StreamlitChatMessageHistory
6
  from langchain.memory import ConversationBufferMemory
7
+ from langchain.chat_models import ChatOpenAI
 
 
 
 
 
 
8
 
9
+ # Load OpenAI API key
10
+ load_dotenv()
11
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
12
 
13
+ # Initialize OpenAI Chat Model (e.g., gpt-3.5-turbo)
14
+ llm = ChatOpenAI(
15
+ openai_api_key=OPENAI_API_KEY,
16
+ model_name="gpt-4o-mini",
17
  temperature=0.7,
18
+ max_tokens=50,
 
19
  )
20
 
21
+ # Streamlit UI
22
  st.set_page_config(page_title="LangChain Chatbot with Memory")
23
+ st.title("🤖 LangChain Chatbot with Memory (OpenAI)")
24
 
25
+ # Message history
26
  history = StreamlitChatMessageHistory(key="chat_messages")
 
 
27
  for msg in history.messages:
28
  if msg.type == "human":
29
  st.chat_message("user").write(msg.content)
30
  else:
31
  st.chat_message("assistant").write(msg.content)
32
 
33
+ # Memory setup
34
  memory = ConversationBufferMemory(chat_memory=history, return_messages=True)
35
 
36
  # Create conversation chain
 
38
 
39
  # Chat input
40
  if prompt := st.chat_input("Say something..."):
41
+ # Display and store user message
42
  history.add_user_message(prompt)
43
  st.chat_message("user").write(prompt)
44
 
45
+ # Add 50-word limit prompt
46
  limited_prompt = f"{prompt}\n\nPlease respond in no more than 50 words."
47
  response = conversation.predict(input=limited_prompt)
48
 
49
+ # Display and store assistant message
50
  history.add_ai_message(response)
51
  st.chat_message("assistant").write(response)