EmailAgent / app.py
abhlash
updated reflextion
999d8ef
raw
history blame
7.74 kB
import streamlit as st
import os
from groq import Groq
from dotenv import load_dotenv
import logging
import json
# Configure logging
logging.basicConfig(
filename='app.log',
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
# Test logging
logging.debug("Logging is configured correctly and this is a test message.")
# Load environment variables
load_dotenv()
reflection_cycles = 2
# Define the Groq API key and initialize the client
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
if not GROQ_API_KEY:
raise ValueError("API Key is not set. Please check your environment variables or .env file.")
client = Groq(api_key=GROQ_API_KEY)
# Define the Reflexion system prompt template
SYSTEM_PROMPT_TEMPLATE = (
"You are an advanced AI agent leveraging the Reflexion framework to iteratively improve ideas and responses through up to {} cycles of reflection. "
"Your goal is to provide the most meaningful, relevant, and impactful results while autonomously managing the process. Follow the structured workflow below:\n\n"
"Instructions:\n\n"
"Output the entire response in the following JSON structure:\n"
"{{\n"
" \"initial_response\": \"<Provide the initial response here as a string>\",\n"
" \"reflection_cycles\": [\n"
" {{\n"
" \"cycle\": {{cycle}},\n"
" \"alignment\": \"{{Reflection on alignment}}\",\n"
" \"feasibility\": \"{{Reflection on feasibility}}\",\n"
" \"depth\": \"{{Reflection on depth}}\",\n"
" \"impact\": \"{{Reflection on impact}}\",\n"
" \"refined_response\": \"{{Refined response after this reflection cycle}}\"\n"
" }}\n"
" ],\n"
" \"final_output\": \"{{Final, polished response}}\"\n"
"}}\n\n"
"Initial Response:\n"
"Begin with the following input and provide a well-considered, thoughtful initial answer:\n\n"
"{}\n\n"
"Reflection Cycles (Up to {}):\n"
"After each response, perform a critical reflection, considering the following:\n"
"- Alignment: Does the answer align with the user's intent?\n"
"- Feasibility: Are the ideas or solutions practical and actionable?\n"
"- Depth: Are there gaps, ambiguities, or missed perspectives?\n"
"- Impact: How meaningful and beneficial is the response to the user?\n"
"Use the feedback from this reflection to refine the response and document it in the JSON structure.\n\n"
"Final Output:\n"
"Provide a final, polished response as the \"final_output\" field in the JSON. The response should be thoughtful, comprehensive, and fully address the user's query.\n"
)
# Initialize Streamlit app
st.title("Reflexion AI Agent")
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Function to summarize user input if necessary
def summarize_input(user_input):
return user_input
# Function to generate responses using the Groq API
# Function to generate responses using the Groq API
# Function to generate responses using the Groq API
def generate_response(user_input, reflection_memory):
try:
# Prepare lengths for context management
combined_length = len(SYSTEM_PROMPT_TEMPLATE) + len(user_input)
reflection_memory_content = [msg["content"] for msg in reflection_memory[-3:]]
reflection_memory_length = len(" ".join(reflection_memory_content))
context_limit = 8192
logging.debug(f"Combined length of system prompt and user input: {combined_length}")
logging.debug(f"Reflection memory length: {reflection_memory_length}")
# Format the SYSTEM_PROMPT with actual user input and reflection cycles
formatted_prompt = SYSTEM_PROMPT_TEMPLATE.format(reflection_cycles, user_input, reflection_cycles)
# Send request to Groq API
chat_completion = client.chat.completions.create(
model="llama3-8b-8192",
messages=[
{"role": "system", "content": formatted_prompt},
{"role": "user", "content": user_input},
{"role": "assistant", "content": " ".join(reflection_memory_content)}
],
max_tokens=2048,
temperature=0.7,
top_p=0.9,
)
logging.debug(f"Full API Response: {chat_completion}")
# Ensure choices exist in the response
if not chat_completion.choices or len(chat_completion.choices) == 0:
raise ValueError("Invalid response format: No choices found.")
content = chat_completion.choices[0].message.content
if not content:
logging.warning("Received empty content in API response.")
return None
# Parse the JSON output
try:
# Clean and preprocess the content
content = content.strip()
if not content.startswith('{'):
start_idx = content.find('{')
if start_idx != -1:
content = content[start_idx:]
if not content.endswith('}'):
end_idx = content.rfind('}')
if end_idx != -1:
content = content[:end_idx + 1]
# Parse the JSON content
parsed_json = json.loads(content)
logging.debug(f"Parsed JSON Response: {parsed_json}")
return parsed_json
except json.JSONDecodeError as e:
logging.error(f"JSON parsing error: {e}\nContent: {content}")
# Return fallback response with raw content
return {
"initial_response": "Error parsing response",
"reflection_cycles": [],
"final_output": content,
}
except Exception as e:
logging.error(f"Error generating response: {e}", exc_info=True)
return {
"initial_response": "Error occurred",
"reflection_cycles": [],
"final_output": f"An error occurred: {str(e)}",
}
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
user_input = st.chat_input("You: ")
# Check if user input is submitted via Enter
if user_input:
# Display user message in chat message container
st.chat_message("user").markdown(user_input)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": user_input})
# Generate and display assistant response with a loading spinner
with st.spinner("Generating response..."):
response = generate_response(user_input, st.session_state.messages)
# Display refined responses dynamically
if response:
try:
reflection_cycles = response.get("reflection_cycles", [])
if reflection_cycles:
st.markdown("### Refined Responses")
for cycle in reflection_cycles:
refined_response = cycle.get("refined_response", None)
if refined_response:
st.chat_message("assistant").markdown(refined_response)
# Add to session state
st.session_state.messages.append({"role": "assistant", "content": refined_response})
else:
logging.warning("Refined response missing in cycle.")
else:
st.warning("No reflection cycles found in the response.")
except Exception as e:
logging.error(f"Error processing response: {e}")
st.error("Failed to process the response.")