Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -40,29 +40,7 @@ def Bot(Questions):
|
|
| 40 |
llama3 = Together(model="meta-llama/Llama-3-70b-chat-hf", max_tokens=250)
|
| 41 |
Generated_chat = LLMChain(llm=llama3, prompt=prompt)
|
| 42 |
|
| 43 |
-
|
| 44 |
-
response = Generated_chat.invoke({
|
| 45 |
-
"text": text,
|
| 46 |
-
"Questions": Questions
|
| 47 |
-
})
|
| 48 |
-
|
| 49 |
-
response_text = response['text']
|
| 50 |
-
|
| 51 |
-
response_text = response_text.replace("assistant", "")
|
| 52 |
-
|
| 53 |
-
# Post-processing to handle repeated words and ensure completeness
|
| 54 |
-
words = response_text.split()
|
| 55 |
-
seen = set()
|
| 56 |
-
filtered_words = [word for word in words if word.lower() not in seen and not seen.add(word.lower())]
|
| 57 |
-
response_text = ' '.join(filtered_words)
|
| 58 |
-
response_text = response_text.strip() # Ensuring no extra spaces at the ends
|
| 59 |
-
if not response_text.endswith('.'):
|
| 60 |
-
response_text += '.'
|
| 61 |
-
|
| 62 |
-
return response_text
|
| 63 |
-
except Exception as e:
|
| 64 |
-
return f"Error in generating response: {e}"
|
| 65 |
-
|
| 66 |
def ChatBot(Questions):
|
| 67 |
greetings = ["hi", "hello", "hey", "greetings", "what's up", "howdy"]
|
| 68 |
# Check if the input question is a greeting
|
|
@@ -106,4 +84,25 @@ if prompt := st.chat_input():
|
|
| 106 |
|
| 107 |
with st.chat_message("assistant"):
|
| 108 |
message_placeholder = st.empty()
|
| 109 |
-
full_response = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
llama3 = Together(model="meta-llama/Llama-3-70b-chat-hf", max_tokens=250)
|
| 41 |
Generated_chat = LLMChain(llm=llama3, prompt=prompt)
|
| 42 |
|
| 43 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
def ChatBot(Questions):
|
| 45 |
greetings = ["hi", "hello", "hey", "greetings", "what's up", "howdy"]
|
| 46 |
# Check if the input question is a greeting
|
|
|
|
| 84 |
|
| 85 |
with st.chat_message("assistant"):
|
| 86 |
message_placeholder = st.empty()
|
| 87 |
+
full_response = ""
|
| 88 |
+
|
| 89 |
+
try:
|
| 90 |
+
_chat_history = st.session_state.langchain_messages[1:40]
|
| 91 |
+
_chat_history_tranform = list(
|
| 92 |
+
chunked([msg.content for msg in _chat_history], n=2)
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
response = rag_chain.stream(
|
| 96 |
+
{"question": prompt, "chat_history": _chat_history_tranform}
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
for res in response:
|
| 100 |
+
full_response += res or ""
|
| 101 |
+
message_placeholder.markdown(full_response + "|")
|
| 102 |
+
message_placeholder.markdown(full_response)
|
| 103 |
+
|
| 104 |
+
msgs.add_user_message(prompt)
|
| 105 |
+
msgs.add_ai_message(full_response)
|
| 106 |
+
|
| 107 |
+
except Exception as e:
|
| 108 |
+
st.error(f"An error occured. {e}")
|