Spaces:
Sleeping
Sleeping
| import os | |
| from langchain import hub | |
| from langchain.agents import AgentExecutor, create_openai_tools_agent | |
| from langchain_community.tools.tavily_search import TavilySearchResults | |
| from langchain_openai import ChatOpenAI | |
| import streamlit as st | |
| # Securely set API keys (e.g., in a .streamlit/secrets.toml file or environment variables) | |
| OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
| TAVILY_API_KEY = os.getenv('TAVILY_API_KEY') | |
| if not OPENAI_API_KEY or not TAVILY_API_KEY: | |
| st.error("API keys for OpenAI and Tavily are not set. Please configure them properly.") | |
| st.stop() | |
| # Create GPT-3.5 instance | |
| llm = ChatOpenAI(model_name="gpt-4o", temperature=0.7, max_tokens=4096) | |
| # Use Tavily as the tool for web searching | |
| tools = [TavilySearchResults(max_results=1)] | |
| # Retrieve the agent prompt from LangChain Hub | |
| try: | |
| prompt = hub.pull("hwchase17/openai-tools-agent") | |
| except Exception as e: | |
| st.error(f"Failed to load prompt from hub: {e}") | |
| st.stop() | |
| # Construct the OpenAI tools agent | |
| agent = create_openai_tools_agent(llm, tools, prompt) | |
| # Create the agent executor by passing in the agent and tools | |
| agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) | |
| # Build the Streamlit web app | |
| st.title("ChatGPT ") | |
| user_input = st.text_input("What do you want to know?") | |
| # Process input and display response | |
| if user_input: | |
| try: | |
| response = agent_executor.invoke({"input": user_input}) | |
| st.write(response.get("output", "No output received. Please try again.")) | |
| except Exception as e: | |
| st.error(f"Error processing your request: {e}") | |