abhlash commited on
Commit
7ac50fc
·
1 Parent(s): 999d8ef

updated app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -78
app.py CHANGED
@@ -4,6 +4,7 @@ from groq import Groq
4
  from dotenv import load_dotenv
5
  import logging
6
  import json
 
7
 
8
  # Configure logging
9
  logging.basicConfig(
@@ -28,7 +29,7 @@ client = Groq(api_key=GROQ_API_KEY)
28
 
29
  # Define the Reflexion system prompt template
30
  SYSTEM_PROMPT_TEMPLATE = (
31
- "You are an advanced AI agent leveraging the Reflexion framework to iteratively improve ideas and responses through up to {} cycles of reflection. "
32
  "Your goal is to provide the most meaningful, relevant, and impactful results while autonomously managing the process. Follow the structured workflow below:\n\n"
33
  "Instructions:\n\n"
34
  "Output the entire response in the following JSON structure:\n"
@@ -36,20 +37,20 @@ SYSTEM_PROMPT_TEMPLATE = (
36
  " \"initial_response\": \"<Provide the initial response here as a string>\",\n"
37
  " \"reflection_cycles\": [\n"
38
  " {{\n"
39
- " \"cycle\": {{cycle}},\n"
40
- " \"alignment\": \"{{Reflection on alignment}}\",\n"
41
- " \"feasibility\": \"{{Reflection on feasibility}}\",\n"
42
- " \"depth\": \"{{Reflection on depth}}\",\n"
43
- " \"impact\": \"{{Reflection on impact}}\",\n"
44
- " \"refined_response\": \"{{Refined response after this reflection cycle}}\"\n"
45
  " }}\n"
46
  " ],\n"
47
- " \"final_output\": \"{{Final, polished response}}\"\n"
48
  "}}\n\n"
49
  "Initial Response:\n"
50
  "Begin with the following input and provide a well-considered, thoughtful initial answer:\n\n"
51
- "{}\n\n"
52
- "Reflection Cycles (Up to {}):\n"
53
  "After each response, perform a critical reflection, considering the following:\n"
54
  "- Alignment: Does the answer align with the user's intent?\n"
55
  "- Feasibility: Are the ideas or solutions practical and actionable?\n"
@@ -57,44 +58,45 @@ SYSTEM_PROMPT_TEMPLATE = (
57
  "- Impact: How meaningful and beneficial is the response to the user?\n"
58
  "Use the feedback from this reflection to refine the response and document it in the JSON structure.\n\n"
59
  "Final Output:\n"
60
- "Provide a final, polished response as the \"final_output\" field in the JSON. The response should be thoughtful, comprehensive, and fully address the user's query.\n"
 
61
  )
62
 
63
  # Initialize Streamlit app
64
- st.title("Reflexion AI Agent")
65
 
66
  # Initialize session state
67
  if "messages" not in st.session_state:
68
  st.session_state.messages = []
 
 
69
 
70
- # Function to summarize user input if necessary
71
- def summarize_input(user_input):
72
- return user_input
73
 
74
  # Function to generate responses using the Groq API
75
- # Function to generate responses using the Groq API
76
- # Function to generate responses using the Groq API
77
- def generate_response(user_input, reflection_memory):
78
  try:
79
- # Prepare lengths for context management
80
- combined_length = len(SYSTEM_PROMPT_TEMPLATE) + len(user_input)
81
- reflection_memory_content = [msg["content"] for msg in reflection_memory[-3:]]
82
- reflection_memory_length = len(" ".join(reflection_memory_content))
83
- context_limit = 8192
84
-
85
- logging.debug(f"Combined length of system prompt and user input: {combined_length}")
86
- logging.debug(f"Reflection memory length: {reflection_memory_length}")
 
 
87
 
88
- # Format the SYSTEM_PROMPT with actual user input and reflection cycles
89
- formatted_prompt = SYSTEM_PROMPT_TEMPLATE.format(reflection_cycles, user_input, reflection_cycles)
90
 
91
- # Send request to Groq API
92
  chat_completion = client.chat.completions.create(
93
  model="llama3-8b-8192",
94
  messages=[
95
  {"role": "system", "content": formatted_prompt},
96
  {"role": "user", "content": user_input},
97
- {"role": "assistant", "content": " ".join(reflection_memory_content)}
98
  ],
99
  max_tokens=2048,
100
  temperature=0.7,
@@ -103,41 +105,31 @@ def generate_response(user_input, reflection_memory):
103
 
104
  logging.debug(f"Full API Response: {chat_completion}")
105
 
106
- # Ensure choices exist in the response
107
  if not chat_completion.choices or len(chat_completion.choices) == 0:
108
- raise ValueError("Invalid response format: No choices found.")
109
 
110
  content = chat_completion.choices[0].message.content
111
  if not content:
112
  logging.warning("Received empty content in API response.")
113
  return None
114
 
115
- # Parse the JSON output
116
- try:
117
- # Clean and preprocess the content
118
- content = content.strip()
119
- if not content.startswith('{'):
120
- start_idx = content.find('{')
121
- if start_idx != -1:
122
- content = content[start_idx:]
123
- if not content.endswith('}'):
124
- end_idx = content.rfind('}')
125
- if end_idx != -1:
126
- content = content[:end_idx + 1]
127
-
128
- # Parse the JSON content
129
- parsed_json = json.loads(content)
130
- logging.debug(f"Parsed JSON Response: {parsed_json}")
131
- return parsed_json
132
- except json.JSONDecodeError as e:
133
- logging.error(f"JSON parsing error: {e}\nContent: {content}")
134
-
135
- # Return fallback response with raw content
136
- return {
137
- "initial_response": "Error parsing response",
138
- "reflection_cycles": [],
139
- "final_output": content,
140
- }
141
 
142
  except Exception as e:
143
  logging.error(f"Error generating response: {e}", exc_info=True)
@@ -147,7 +139,6 @@ def generate_response(user_input, reflection_memory):
147
  "final_output": f"An error occurred: {str(e)}",
148
  }
149
 
150
-
151
  # Display chat messages from history on app rerun
152
  for message in st.session_state.messages:
153
  with st.chat_message(message["role"]):
@@ -160,29 +151,21 @@ user_input = st.chat_input("You: ")
160
  if user_input:
161
  # Display user message in chat message container
162
  st.chat_message("user").markdown(user_input)
163
- # Add user message to chat history
164
  st.session_state.messages.append({"role": "user", "content": user_input})
165
 
166
- # Generate and display assistant response with a loading spinner
167
  with st.spinner("Generating response..."):
168
- response = generate_response(user_input, st.session_state.messages)
169
- # Display refined responses dynamically
170
  if response:
171
  try:
172
- reflection_cycles = response.get("reflection_cycles", [])
173
-
174
- if reflection_cycles:
175
- st.markdown("### Refined Responses")
176
- for cycle in reflection_cycles:
177
- refined_response = cycle.get("refined_response", None)
178
- if refined_response:
179
- st.chat_message("assistant").markdown(refined_response)
180
- # Add to session state
181
- st.session_state.messages.append({"role": "assistant", "content": refined_response})
182
- else:
183
- logging.warning("Refined response missing in cycle.")
184
- else:
185
- st.warning("No reflection cycles found in the response.")
186
  except Exception as e:
187
- logging.error(f"Error processing response: {e}")
188
  st.error("Failed to process the response.")
 
4
  from dotenv import load_dotenv
5
  import logging
6
  import json
7
+ import re
8
 
9
  # Configure logging
10
  logging.basicConfig(
 
29
 
30
  # Define the Reflexion system prompt template
31
  SYSTEM_PROMPT_TEMPLATE = (
32
+ "You are an advanced AI agent leveraging the Reflexion framework to iteratively improve ideas and responses through up to {reflection_cycles} cycles of reflection. "
33
  "Your goal is to provide the most meaningful, relevant, and impactful results while autonomously managing the process. Follow the structured workflow below:\n\n"
34
  "Instructions:\n\n"
35
  "Output the entire response in the following JSON structure:\n"
 
37
  " \"initial_response\": \"<Provide the initial response here as a string>\",\n"
38
  " \"reflection_cycles\": [\n"
39
  " {{\n"
40
+ " \"cycle\": <Cycle number>,\n"
41
+ " \"alignment\": \"<Reflection on alignment>\",\n"
42
+ " \"feasibility\": \"<Reflection on feasibility>\",\n"
43
+ " \"depth\": \"<Reflection on depth>\",\n"
44
+ " \"impact\": \"<Reflection on impact>\",\n"
45
+ " \"refined_response\": \"<Refined response after this reflection cycle>\"\n"
46
  " }}\n"
47
  " ],\n"
48
+ " \"final_output\": \"<Final, polished response>\"\n"
49
  "}}\n\n"
50
  "Initial Response:\n"
51
  "Begin with the following input and provide a well-considered, thoughtful initial answer:\n\n"
52
+ "{user_input}\n\n"
53
+ "Reflection Cycles (Up to {reflection_cycles}):\n"
54
  "After each response, perform a critical reflection, considering the following:\n"
55
  "- Alignment: Does the answer align with the user's intent?\n"
56
  "- Feasibility: Are the ideas or solutions practical and actionable?\n"
 
58
  "- Impact: How meaningful and beneficial is the response to the user?\n"
59
  "Use the feedback from this reflection to refine the response and document it in the JSON structure.\n\n"
60
  "Final Output:\n"
61
+ "Provide a final, polished response as the \"final_output\" field in the JSON. The response should be thoughtful, comprehensive, and fully address the user's query.\n\n"
62
+ "Previous Context:\n{history_context}\n"
63
  )
64
 
65
  # Initialize Streamlit app
66
+ st.title("Reflexion AI Chatbot")
67
 
68
  # Initialize session state
69
  if "messages" not in st.session_state:
70
  st.session_state.messages = []
71
+ if "refined_history" not in st.session_state:
72
+ st.session_state.refined_history = []
73
 
74
+ def sanitize_json(json_str):
75
+ json_str = re.sub(r'[\x00-\x1F\x7F]', '', json_str) # Remove control characters
76
+ return json_str
77
 
78
  # Function to generate responses using the Groq API
79
+ def generate_response(user_input, refined_history):
 
 
80
  try:
81
+ # Limit the number of historical responses
82
+ MAX_HISTORY = 5
83
+ history_context = " ".join(refined_history[-MAX_HISTORY:])
84
+
85
+ # Format the system prompt with history and current input
86
+ formatted_prompt = SYSTEM_PROMPT_TEMPLATE.format(
87
+ reflection_cycles=reflection_cycles,
88
+ user_input=user_input,
89
+ history_context=history_context
90
+ )
91
 
92
+ logging.debug(f"Formatted Prompt Sent: {formatted_prompt}")
 
93
 
94
+ # Send API request
95
  chat_completion = client.chat.completions.create(
96
  model="llama3-8b-8192",
97
  messages=[
98
  {"role": "system", "content": formatted_prompt},
99
  {"role": "user", "content": user_input},
 
100
  ],
101
  max_tokens=2048,
102
  temperature=0.7,
 
105
 
106
  logging.debug(f"Full API Response: {chat_completion}")
107
 
108
+ # Ensure choices exist
109
  if not chat_completion.choices or len(chat_completion.choices) == 0:
110
+ raise ValueError("No valid choices found in response.")
111
 
112
  content = chat_completion.choices[0].message.content
113
  if not content:
114
  logging.warning("Received empty content in API response.")
115
  return None
116
 
117
+ # Parse JSON response
118
+ content = content.strip()
119
+ if not content.startswith('{'):
120
+ start_idx = content.find('{')
121
+ if start_idx != -1:
122
+ content = content[start_idx:]
123
+ if not content.endswith('}'):
124
+ end_idx = content.rfind('}')
125
+ if end_idx != -1:
126
+ content = content[:end_idx + 1]
127
+
128
+ # Sanitize JSON before parsing
129
+ cleaned_json = sanitize_json(content)
130
+ parsed_json = json.loads(cleaned_json)
131
+ logging.debug(f"Parsed JSON Response: {parsed_json}")
132
+ return parsed_json
 
 
 
 
 
 
 
 
 
 
133
 
134
  except Exception as e:
135
  logging.error(f"Error generating response: {e}", exc_info=True)
 
139
  "final_output": f"An error occurred: {str(e)}",
140
  }
141
 
 
142
  # Display chat messages from history on app rerun
143
  for message in st.session_state.messages:
144
  with st.chat_message(message["role"]):
 
151
  if user_input:
152
  # Display user message in chat message container
153
  st.chat_message("user").markdown(user_input)
 
154
  st.session_state.messages.append({"role": "user", "content": user_input})
155
 
156
+ # Generate and display assistant response
157
  with st.spinner("Generating response..."):
158
+ response = generate_response(user_input, st.session_state.refined_history)
 
159
  if response:
160
  try:
161
+ refined_response = response["final_output"]
162
+
163
+ # Add the refined response to history
164
+ st.session_state.refined_history.append(refined_response)
165
+
166
+ # Display the refined response
167
+ st.chat_message("assistant").markdown(refined_response)
168
+ st.session_state.messages.append({"role": "assistant", "content": refined_response})
 
 
 
 
 
 
169
  except Exception as e:
170
+ logging.error(f"Error parsing response: {e}")
171
  st.error("Failed to process the response.")