Spaces:
Build error
Build error
T-K-O-H
commited on
Commit
·
600c235
1
Parent(s):
c9027fd
Update code to use environment variables and improve error handling
Browse files- app.py +3 -75
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -1,88 +1,16 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import chainlit as cl
|
| 3 |
-
from agent_graph import agent_node
|
| 4 |
-
from dotenv import load_dotenv # Import dotenv
|
| 5 |
-
from typing import List, Dict
|
| 6 |
import gradio as gr
|
| 7 |
from test_agent import process_query
|
| 8 |
import logging
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
# Load environment variables
|
| 11 |
load_dotenv()
|
| 12 |
|
| 13 |
-
# Ensure your OpenAI API key is set up in environment variables
|
| 14 |
-
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 15 |
-
|
| 16 |
-
if not openai_api_key:
|
| 17 |
-
raise ValueError("OpenAI API key is missing in the .env file")
|
| 18 |
-
|
| 19 |
-
# Store chat history
|
| 20 |
-
chat_histories: Dict[str, List[Dict[str, str]]] = {}
|
| 21 |
-
|
| 22 |
# Set up logging
|
| 23 |
logging.basicConfig(level=logging.INFO)
|
| 24 |
logger = logging.getLogger(__name__)
|
| 25 |
|
| 26 |
-
@cl.on_chat_start
|
| 27 |
-
async def start_chat():
|
| 28 |
-
# Initialize empty chat history for this session
|
| 29 |
-
chat_histories[cl.user_session.get("id")] = []
|
| 30 |
-
|
| 31 |
-
welcome_message = """👋 Welcome to the Stock Price Calculator!
|
| 32 |
-
|
| 33 |
-
I can help you with:
|
| 34 |
-
• Getting real-time stock prices
|
| 35 |
-
• Calculating how many shares you can buy
|
| 36 |
-
|
| 37 |
-
Try these examples:
|
| 38 |
-
• Type 'AAPL' to get Apple's stock price
|
| 39 |
-
• Ask 'How many MSFT shares can I buy with $10000?'
|
| 40 |
-
|
| 41 |
-
What would you like to know?"""
|
| 42 |
-
|
| 43 |
-
await cl.Message(content=welcome_message).send()
|
| 44 |
-
|
| 45 |
-
@cl.on_message
|
| 46 |
-
async def handle_message(message: cl.Message):
|
| 47 |
-
try:
|
| 48 |
-
# Get chat history for this session
|
| 49 |
-
session_id = cl.user_session.get("id")
|
| 50 |
-
history = chat_histories.get(session_id, [])
|
| 51 |
-
|
| 52 |
-
# Add current message to history
|
| 53 |
-
history.append({"role": "user", "content": message.content})
|
| 54 |
-
|
| 55 |
-
# Create state dictionary with history
|
| 56 |
-
state = {
|
| 57 |
-
"input": message.content,
|
| 58 |
-
"chat_history": history
|
| 59 |
-
}
|
| 60 |
-
|
| 61 |
-
# Process the message with agent_node
|
| 62 |
-
response = agent_node(state)
|
| 63 |
-
|
| 64 |
-
# Send the response back to the user
|
| 65 |
-
if isinstance(response, dict) and "output" in response:
|
| 66 |
-
# Add response to history
|
| 67 |
-
history.append({"role": "assistant", "content": response["output"]})
|
| 68 |
-
await cl.Message(content=response["output"]).send()
|
| 69 |
-
else:
|
| 70 |
-
await cl.Message(content="Received an invalid response format from the agent.").send()
|
| 71 |
-
|
| 72 |
-
# Update history in storage
|
| 73 |
-
chat_histories[session_id] = history
|
| 74 |
-
|
| 75 |
-
except Exception as e:
|
| 76 |
-
print(f"Error occurred: {e}")
|
| 77 |
-
await cl.Message(content="Sorry, something went wrong while processing your request.").send()
|
| 78 |
-
|
| 79 |
-
@cl.on_chat_end
|
| 80 |
-
async def end_chat():
|
| 81 |
-
# Clean up chat history when session ends
|
| 82 |
-
session_id = cl.user_session.get("id")
|
| 83 |
-
if session_id in chat_histories:
|
| 84 |
-
del chat_histories[session_id]
|
| 85 |
-
|
| 86 |
def respond(message, history):
|
| 87 |
"""Process the user's message and return the response."""
|
| 88 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from test_agent import process_query
|
| 3 |
import logging
|
| 4 |
+
import os
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
|
| 7 |
+
# Load environment variables
|
| 8 |
load_dotenv()
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
# Set up logging
|
| 11 |
logging.basicConfig(level=logging.INFO)
|
| 12 |
logger = logging.getLogger(__name__)
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
def respond(message, history):
|
| 15 |
"""Process the user's message and return the response."""
|
| 16 |
try:
|
requirements.txt
CHANGED
|
@@ -2,4 +2,5 @@ langchain==0.0.267
|
|
| 2 |
openai==0.28.0
|
| 3 |
python-dotenv==1.0.0
|
| 4 |
yfinance==0.2.18
|
| 5 |
-
gradio==3.50.2
|
|
|
|
|
|
| 2 |
openai==0.28.0
|
| 3 |
python-dotenv==1.0.0
|
| 4 |
yfinance==0.2.18
|
| 5 |
+
gradio==3.50.2
|
| 6 |
+
requests==2.31.0
|