Spaces:
Sleeping
Sleeping
File size: 1,584 Bytes
26124be 0e84e8e c9bf01b 2bbbcc0 8d45965 f339b32 8d45965 f339b32 8d45965 0e84e8e f339b32 8d45965 f83fc97 8d45965 0e84e8e f339b32 8d45965 0e84e8e 35c1251 8d45965 4efc4fe 35c1251 8d45965 35c1251 8d45965 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | import os
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_openai import ChatOpenAI
import streamlit as st
# Securely set API keys (e.g., in a .streamlit/secrets.toml file or environment variables)
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
if not OPENAI_API_KEY or not TAVILY_API_KEY:
st.error("API keys for OpenAI and Tavily are not set. Please configure them properly.")
st.stop()
# Create GPT-3.5 instance
llm = ChatOpenAI(model_name="gpt-4o", temperature=0.7, max_tokens=4096)
# Use Tavily as the tool for web searching
tools = [TavilySearchResults(max_results=1)]
# Retrieve the agent prompt from LangChain Hub
try:
prompt = hub.pull("hwchase17/openai-tools-agent")
except Exception as e:
st.error(f"Failed to load prompt from hub: {e}")
st.stop()
# Construct the OpenAI tools agent
agent = create_openai_tools_agent(llm, tools, prompt)
# Create the agent executor by passing in the agent and tools
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
# Build the Streamlit web app
st.title("ChatGPT ")
user_input = st.text_input("What do you want to know?")
# Process input and display response
if user_input:
try:
response = agent_executor.invoke({"input": user_input})
st.write(response.get("output", "No output received. Please try again."))
except Exception as e:
st.error(f"Error processing your request: {e}")
|