Atreyu4EVR commited on
Commit
ae4609c
·
verified ·
1 Parent(s): 556c9ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -15
app.py CHANGED
@@ -57,8 +57,8 @@ def setup_rag_pipeline():
57
  return retriever
58
 
59
  def reset_conversation():
60
- st.session_state.conversation = []
61
- st.session_state.messages = []
62
 
63
  def main():
64
  st.header('Multi-Models with RAG')
@@ -73,9 +73,9 @@ def main():
73
  st.session_state.prev_option = selected_model
74
 
75
  if st.session_state.prev_option != selected_model:
76
- st.session_state.messages = []
 
77
  st.session_state.prev_option = selected_model
78
- reset_conversation()
79
 
80
  st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
81
 
@@ -84,11 +84,13 @@ def main():
84
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
85
 
86
  # Initialize chat history
87
- if "messages" not in st.session_state:
88
- st.session_state.messages = []
 
 
89
 
90
  # Display chat messages from history on app rerun
91
- for message in st.session_state.messages:
92
  with st.chat_message(message["role"]):
93
  st.markdown(message["content"])
94
 
@@ -103,27 +105,28 @@ def process_user_input(client, prompt, selected_model, temperature, retriever):
103
  # Display user message
104
  with st.chat_message("user"):
105
  st.markdown(prompt)
 
106
 
107
  # Retrieve relevant documents
108
  relevant_docs = retriever.get_relevant_documents(prompt)
109
  context = "\n".join([doc.page_content for doc in relevant_docs])
110
 
111
- # Prepare messages with context
112
- messages = [
113
  {"role": "system", "content": f"You are an AI assistant. Use the following context to answer the user's question: {context}"},
 
114
  {"role": "user", "content": prompt}
115
  ]
116
- st.session_state.messages.extend(messages)
 
 
117
 
118
  # Generate and display assistant response
119
  with st.chat_message("assistant"):
120
  try:
121
  stream = client.chat.completions.create(
122
  model=model_links[selected_model],
123
- messages=[
124
- {"role": m["role"], "content": m["content"]}
125
- for m in st.session_state.messages
126
- ],
127
  temperature=temperature,
128
  stream=True,
129
  max_tokens=MAX_TOKENS,
@@ -133,7 +136,9 @@ def process_user_input(client, prompt, selected_model, temperature, retriever):
133
  handle_error(e)
134
  return
135
 
136
- st.session_state.messages.append({"role": "assistant", "content": response})
 
 
137
 
138
  def handle_error(error):
139
  response = """😵‍💫 Looks like someone unplugged something!
 
57
  return retriever
58
 
59
  def reset_conversation():
60
+ st.session_state.visible_messages = []
61
+ st.session_state.full_context = []
62
 
63
  def main():
64
  st.header('Multi-Models with RAG')
 
73
  st.session_state.prev_option = selected_model
74
 
75
  if st.session_state.prev_option != selected_model:
76
+ st.session_state.visible_messages = []
77
+ st.session_state.full_context = []
78
  st.session_state.prev_option = selected_model
 
79
 
80
  st.markdown(f'_powered_ by ***:violet[{selected_model}]***')
81
 
 
84
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
85
 
86
  # Initialize chat history
87
+ if "visible_messages" not in st.session_state:
88
+ st.session_state.visible_messages = []
89
+ if "full_context" not in st.session_state:
90
+ st.session_state.full_context = []
91
 
92
  # Display chat messages from history on app rerun
93
+ for message in st.session_state.visible_messages:
94
  with st.chat_message(message["role"]):
95
  st.markdown(message["content"])
96
 
 
105
  # Display user message
106
  with st.chat_message("user"):
107
  st.markdown(prompt)
108
+ st.session_state.visible_messages.append({"role": "user", "content": prompt})
109
 
110
  # Retrieve relevant documents
111
  relevant_docs = retriever.get_relevant_documents(prompt)
112
  context = "\n".join([doc.page_content for doc in relevant_docs])
113
 
114
+ # Prepare full context with system message and retrieved context
115
+ full_context = [
116
  {"role": "system", "content": f"You are an AI assistant. Use the following context to answer the user's question: {context}"},
117
+ *st.session_state.full_context,
118
  {"role": "user", "content": prompt}
119
  ]
120
+
121
+ # Update full context in session state
122
+ st.session_state.full_context = full_context
123
 
124
  # Generate and display assistant response
125
  with st.chat_message("assistant"):
126
  try:
127
  stream = client.chat.completions.create(
128
  model=model_links[selected_model],
129
+ messages=full_context,
 
 
 
130
  temperature=temperature,
131
  stream=True,
132
  max_tokens=MAX_TOKENS,
 
136
  handle_error(e)
137
  return
138
 
139
+ # Update visible messages and full context
140
+ st.session_state.visible_messages.append({"role": "assistant", "content": response})
141
+ st.session_state.full_context.append({"role": "assistant", "content": response})
142
 
143
  def handle_error(error):
144
  response = """😵‍💫 Looks like someone unplugged something!