abhlash commited on
Commit
42da68f
·
1 Parent(s): 407eb33

updateed the app.py with basic

Browse files
Files changed (3) hide show
  1. .gitignore +17 -0
  2. app.py +135 -0
  3. requirements.txt +4 -0
.gitignore ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore Python bytecode files
2
+ __pycache__/
3
+ *.py[cod]
4
+
5
+ # Ignore environment variables file
6
+ .env
7
+
8
+ # Ignore log files
9
+ *.log
10
+
11
+ # Ignore virtual environment directories
12
+ venv/
13
+ env/
14
+
15
+ # Ignore system files
16
+ .DS_Store
17
+ Thumbs.db
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from groq import Groq
4
+ from dotenv import load_dotenv
5
+ import logging
6
+
7
+ # Configure logging
8
+ logging.basicConfig(
9
+ filename='app.log',
10
+ level=logging.DEBUG,
11
+ format='%(asctime)s - %(levelname)s - %(message)s'
12
+ )
13
+
14
+ # Test logging
15
+ logging.debug("Logging is configured correctly and this is a test message.")
16
+
17
+ # Load environment variables
18
+ load_dotenv()
19
+ reflection_cycles = 2
20
+
21
+ # Define the Groq API key and initialize the client
22
+ GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
23
+ if not GROQ_API_KEY:
24
+ raise ValueError("API Key is not set. Please check your environment variables or .env file.")
25
+
26
+ client = Groq(api_key=GROQ_API_KEY)
27
+
28
+ # Define the Reflexion system prompt with formatted user variables
29
+ SYSTEM_PROMPT = (
30
+ "You are an advanced AI agent leveraging the Reflexion framework to iteratively improve ideas and responses through up to {reflection_cycles} cycles of reflection. "
31
+ "Your goal is to provide the most meaningful, relevant, and impactful results while autonomously managing the process. Follow the structured workflow below:\n\n"
32
+ "Instructions:\n\n"
33
+ "Initial Response:\n"
34
+ "Begin with the {user_input} and provide a well-considered, thoughtful initial answer. Ensure clarity, relevance, and creativity in addressing the query.\n\n"
35
+ "Reflection Cycles (Up to {reflection_cycles}):\n"
36
+ "After each response, perform a critical reflection, considering the following:\n"
37
+ "Alignment: Does the answer align with the user's intent?\n"
38
+ "Feasibility: Are the ideas or solutions practical and actionable?\n"
39
+ "Depth: Are there gaps, ambiguities, or missed perspectives?\n"
40
+ "Impact: How meaningful and beneficial is the response to the user?\n"
41
+ "Use the feedback from this reflection to refine the response, document insights in a temporary memory buffer, and iterate the process.\n\n"
42
+ "Continue refining and evolving the response for up to {reflection_cycles} cycles or until you reach a well-optimized conclusion.\n\n"
43
+ "Episodic Memory Storage:\n"
44
+ "Maintain a temporary memory buffer to track reflections and refinements. Use this to avoid redundant steps and ensure improvements are based on accumulated insights.\n\n"
45
+ "Final Output:\n"
46
+ "Deliver a final, polished answer that is thoughtful, comprehensive, and fully addresses the user's query.\n"
47
+ "Provide a summary of the ideation journey, highlighting key outcomes and how the response evolved over the iterations."
48
+ ).format(reflection_cycles=reflection_cycles, user_input="{user_input}")
49
+
50
+ # Initialize Streamlit app
51
+ st.title("Reflexion AI Agent")
52
+
53
+ # Initialize session state
54
+ if "messages" not in st.session_state:
55
+ st.session_state.messages = []
56
+
57
+ # Function to summarize user input if necessary
58
+ def summarize_input(user_input):
59
+ return user_input
60
+
61
+ # Function to generate responses using the Groq API
62
+ def generate_response(user_input, reflection_memory):
63
+ try:
64
+ combined_length = len(SYSTEM_PROMPT) + len(user_input)
65
+ reflection_memory_content = [msg["content"] for msg in reflection_memory[-3:]]
66
+ reflection_memory_length = len(" ".join(reflection_memory_content))
67
+ context_limit = 8192
68
+
69
+ logging.debug(f"Combined length of system prompt and user input: {combined_length}")
70
+ logging.debug(f"Reflection memory length: {reflection_memory_length}")
71
+
72
+ if combined_length + reflection_memory_length > context_limit:
73
+ user_input = summarize_input(user_input)
74
+ combined_length = len(SYSTEM_PROMPT) + len(user_input)
75
+ logging.debug(f"Summarized input length: {len(user_input)}")
76
+ logging.debug(f"New combined length: {combined_length + reflection_memory_length}")
77
+
78
+ chat_completion = client.chat.completions.create(
79
+ model="llama3-8b-8192",
80
+ messages=[
81
+ {"role": "system", "content": SYSTEM_PROMPT},
82
+ {"role": "user", "content": user_input},
83
+ {"role": "assistant", "content": " ".join(reflection_memory_content)}
84
+ ],
85
+ max_tokens=1024,
86
+ temperature=0.6,
87
+ top_p=0.9,
88
+ )
89
+
90
+ logging.debug(f"Full API Response: {chat_completion}")
91
+
92
+ if chat_completion.choices and len(chat_completion.choices) > 0:
93
+ content = chat_completion.choices[0].message.content
94
+ if content:
95
+ # Extract the "Final Output" section if it exists, otherwise use the entire content
96
+ final_output_start = content.find("Final Output:")
97
+ if final_output_start != -1:
98
+ final_output = content[final_output_start:].strip()
99
+ else:
100
+ final_output = content.strip()
101
+ return final_output
102
+ else:
103
+ logging.warning("Received empty content in API response.")
104
+ return None
105
+ else:
106
+ raise ValueError("Invalid response format: No choices found.")
107
+
108
+ except Exception as e:
109
+ logging.error(f"Error generating response: {e}", exc_info=True)
110
+ st.error(f"Error generating response: {e}")
111
+ return None
112
+
113
+ # Display chat messages from history on app rerun
114
+ for message in st.session_state.messages:
115
+ with st.chat_message(message["role"]):
116
+ st.markdown(message["content"])
117
+
118
+ # Accept user input
119
+ if user_input := st.chat_input("You: "):
120
+ # Display user message in chat message container
121
+ st.chat_message("user").markdown(user_input)
122
+ # Add user message to chat history
123
+ st.session_state.messages.append({"role": "user", "content": user_input})
124
+
125
+ # Generate and display assistant response
126
+ last_valid_response = None
127
+ for _ in range(reflection_cycles):
128
+ bot_response = generate_response(user_input, st.session_state.messages)
129
+ if bot_response:
130
+ last_valid_response = bot_response
131
+
132
+ # Display only the final output
133
+ if last_valid_response:
134
+ st.chat_message("assistant").markdown(last_valid_response)
135
+ st.session_state.messages.append({"role": "assistant", "content": last_valid_response})
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ requests
3
+ python-dotenv
4
+ groq