Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import os
|
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
import streamlit as st
|
| 4 |
import openai
|
|
|
|
| 5 |
|
| 6 |
# Load environment variables
|
| 7 |
load_dotenv()
|
|
@@ -20,30 +21,60 @@ if "messages" not in st.session_state:
|
|
| 20 |
for msg in st.session_state["messages"]:
|
| 21 |
st.chat_message(msg["role"]).write(msg["content"])
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# Input from user
|
| 24 |
if prompt := st.chat_input():
|
| 25 |
# Append user message to session state
|
| 26 |
st.session_state["messages"].append({"role": "user", "content": prompt})
|
| 27 |
st.chat_message("user").write(prompt)
|
| 28 |
-
|
| 29 |
try:
|
| 30 |
-
#
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
except Exception as e:
|
| 49 |
st.error(f"An error occurred: {e}")
|
|
|
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
import streamlit as st
|
| 4 |
import openai
|
| 5 |
+
from tavily import TavilySearchResults # Import TavilySearchResults
|
| 6 |
|
| 7 |
# Load environment variables
|
| 8 |
load_dotenv()
|
|
|
|
| 21 |
for msg in st.session_state["messages"]:
|
| 22 |
st.chat_message(msg["role"]).write(msg["content"])
|
| 23 |
|
| 24 |
+
# Initialize TavilySearchResults tool
|
| 25 |
+
tavily_tool = TavilySearchResults(
|
| 26 |
+
max_results=7,
|
| 27 |
+
search_depth="advanced",
|
| 28 |
+
topic="news",
|
| 29 |
+
days=1,
|
| 30 |
+
include_answer=True,
|
| 31 |
+
include_raw_content=True,
|
| 32 |
+
exclude_domains=['example.com'],
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
# Input from user
|
| 36 |
if prompt := st.chat_input():
|
| 37 |
# Append user message to session state
|
| 38 |
st.session_state["messages"].append({"role": "user", "content": prompt})
|
| 39 |
st.chat_message("user").write(prompt)
|
| 40 |
+
|
| 41 |
try:
|
| 42 |
+
# Check if user query requires Tavily
|
| 43 |
+
if "search for" in prompt.lower() or "find" in prompt.lower():
|
| 44 |
+
# Use TavilySearchResults tool
|
| 45 |
+
tavily_response = tavily_tool.run(prompt)
|
| 46 |
+
|
| 47 |
+
# Format Tavily results
|
| 48 |
+
if tavily_response:
|
| 49 |
+
formatted_results = "\n\n".join(
|
| 50 |
+
[f"**{i+1}.** {result['title']}\n{result['content']}" for i, result in enumerate(tavily_response)]
|
| 51 |
+
)
|
| 52 |
+
tavily_message = f"Here are the top results I found:\n\n{formatted_results}"
|
| 53 |
+
else:
|
| 54 |
+
tavily_message = "I couldn't find any relevant results."
|
| 55 |
+
|
| 56 |
+
# Append Tavily response to session state
|
| 57 |
+
st.session_state["messages"].append({"role": "assistant", "content": tavily_message})
|
| 58 |
+
st.chat_message("assistant").write(tavily_message)
|
| 59 |
+
|
| 60 |
+
else:
|
| 61 |
+
# GPT-4 response for general queries
|
| 62 |
+
response = openai.chat.completions.create(
|
| 63 |
+
model="gpt-4o",
|
| 64 |
+
messages=[
|
| 65 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 66 |
+
*st.session_state["messages"]
|
| 67 |
+
],
|
| 68 |
+
max_tokens=150,
|
| 69 |
+
temperature=0.7
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# Extract GPT response message
|
| 73 |
+
gpt_message = response.choices[0].message.content.strip()
|
| 74 |
+
|
| 75 |
+
# Append GPT response to session state
|
| 76 |
+
st.session_state["messages"].append({"role": "assistant", "content": gpt_message})
|
| 77 |
+
st.chat_message("assistant").write(gpt_message)
|
| 78 |
|
| 79 |
except Exception as e:
|
| 80 |
st.error(f"An error occurred: {e}")
|