abhlash commited on
Commit
5fac443
·
1 Parent(s): 0acb20f

updated with react framework

Browse files
Files changed (1) hide show
  1. app.py +119 -104
app.py CHANGED
@@ -8,13 +8,12 @@ import re
8
 
9
  # Configure logging
10
  logging.basicConfig(
11
- filename='app.log',
12
  level=logging.DEBUG,
13
- format='%(asctime)s - %(levelname)s - %(message)s'
14
  )
15
 
16
- # Test logging
17
- logging.debug("Logging is configured correctly and this is a test message.")
18
 
19
  # Load environment variables
20
  load_dotenv()
@@ -27,79 +26,69 @@ if not GROQ_API_KEY:
27
 
28
  client = Groq(api_key=GROQ_API_KEY)
29
 
30
- # Define the Reflexion system prompt template
31
- SYSTEM_PROMPT_TEMPLATE = (
32
- "You are an advanced AI agent leveraging the Reflexion framework to iteratively improve ideas and responses through up to {reflection_cycles} cycles of reflection. "
33
- "Your goal is to provide the most meaningful, relevant, and impactful results while autonomously managing the process. Follow the structured workflow below:\n\n"
34
- "CRITICAL: Your entire response MUST be a valid JSON object. Follow these strict formatting rules:\n"
35
- "1. Use double quotes (\") for all JSON keys and string values\n"
36
- "2. Escape any double quotes within string values with a backslash (\\\")\n"
37
- "3. Avoid using any unescaped special characters or line breaks within string values\n"
38
- "4. Ensure all JSON keys and values are properly enclosed and comma-separated\n\n"
39
- "Required JSON Structure:\n"
40
- "{{\n"
41
- " \"initial_response\": \"<Provide the initial response here as a string>\",\n"
42
- " \"reflection_cycles\": [\n"
43
- " {{\n"
44
- " \"cycle\": <Cycle number>,\n"
45
- " \"alignment\": \"<Reflection on alignment>\",\n"
46
- " \"feasibility\": \"<Reflection on feasibility>\",\n"
47
- " \"depth\": \"<Reflection on depth>\",\n"
48
- " \"impact\": \"<Reflection on impact>\",\n"
49
- " \"refined_response\": \"<Refined response after this reflection cycle>\"\n"
50
- " }}\n"
51
- " ],\n"
52
- " \"final_output\": \"<Final, polished response>\"\n"
53
- "}}\n\n"
54
- "Initial Response:\n"
55
- "Begin with the following input and provide a well-considered, thoughtful initial answer:\n\n"
56
- "{user_input}\n\n"
57
- "Reflection Cycles (Up to {reflection_cycles}):\n"
58
- "After each response, perform a critical reflection, considering the following:\n"
59
- "- Alignment: Does the answer align with the user's intent?\n"
60
- "- Feasibility: Are the ideas or solutions practical and actionable?\n"
61
- "- Depth: Are there gaps, ambiguities, or missed perspectives?\n"
62
- "- Impact: How meaningful and beneficial is the response to the user?\n"
63
- "Use the feedback from this reflection to refine the response and document it in the JSON structure.\n\n"
64
- "Final Output:\n"
65
- "Provide a final, polished response as the \"final_output\" field in the JSON. The response should be thoughtful, comprehensive, and fully address the user's query.\n\n"
66
- "Previous Context:\n{history_context}\n\n"
67
- "REMINDER: Verify that your response is valid JSON before completing. Do not include any text outside of the JSON structure."
68
- )
69
 
70
  # Initialize Streamlit app
71
- st.title("Reflexion AI Chatbot")
72
 
73
  # Initialize session state
74
  if "messages" not in st.session_state:
75
  st.session_state.messages = []
76
- if "refined_history" not in st.session_state:
77
- st.session_state.refined_history = []
 
78
 
79
  def sanitize_json(json_str):
80
- json_str = re.sub(r'[\x00-\x1F\x7F]', '', json_str) # Remove control characters
81
  return json_str
82
 
83
- # Function to generate responses using the Groq API
84
- def generate_response(user_input, refined_history):
 
 
85
  try:
86
- # Filter out None values and ensure all items are strings
87
- valid_history = [str(item) for item in refined_history if item is not None]
88
-
89
- # Limit the number of historical responses
90
  MAX_HISTORY = 5
91
- history_context = " ".join(valid_history[-MAX_HISTORY:])
 
92
 
93
- # Format the system prompt with history and current input
94
- formatted_prompt = SYSTEM_PROMPT_TEMPLATE.format(
95
- reflection_cycles=reflection_cycles,
96
- user_input=user_input,
97
- history_context=history_context
98
- )
99
 
100
- logging.debug(f"Formatted Prompt Sent: {formatted_prompt}")
 
 
 
 
 
101
 
102
- # Send API request
103
  chat_completion = client.chat.completions.create(
104
  model="llama3-8b-8192",
105
  messages=[
@@ -111,69 +100,95 @@ def generate_response(user_input, refined_history):
111
  top_p=0.9,
112
  )
113
 
114
- logging.debug(f"Full API Response: {chat_completion}")
115
-
116
- # Ensure choices exist
117
- if not chat_completion.choices or len(chat_completion.choices) == 0:
118
- raise ValueError("No valid choices found in response.")
119
 
 
120
  content = chat_completion.choices[0].message.content
121
  if not content:
122
  logging.warning("Received empty content in API response.")
123
  return None
124
 
125
- # Parse JSON response
126
- content = content.strip()
127
- if not content.startswith('{'):
128
- start_idx = content.find('{')
129
- if start_idx != -1:
130
- content = content[start_idx:]
131
- if not content.endswith('}'):
132
- end_idx = content.rfind('}')
133
- if end_idx != -1:
134
- content = content[:end_idx + 1]
135
-
136
- # Sanitize JSON before parsing
137
- cleaned_json = sanitize_json(content)
138
- parsed_json = json.loads(cleaned_json)
139
- logging.debug(f"Parsed JSON Response: {parsed_json}")
140
- return parsed_json
 
 
 
 
 
 
 
 
 
 
 
141
 
142
  except Exception as e:
143
- logging.error(f"Error generating response: {e}", exc_info=True)
144
  return {
145
- "initial_response": "Error occurred",
146
- "reflection_cycles": [],
147
- "final_output": f"An error occurred: {str(e)}",
148
  }
149
 
150
- # Display chat messages from history on app rerun
 
 
151
  for message in st.session_state.messages:
152
  with st.chat_message(message["role"]):
153
  st.markdown(message["content"])
154
 
155
  # Accept user input
156
- user_input = st.chat_input("You: ")
157
 
158
- # Check if user input is submitted via Enter
159
  if user_input:
160
- # Display user message in chat message container
161
  st.chat_message("user").markdown(user_input)
162
  st.session_state.messages.append({"role": "user", "content": user_input})
163
 
164
- # Generate and display assistant response
165
- with st.spinner("Generating response..."):
166
- response = generate_response(user_input, st.session_state.refined_history)
 
167
  if response:
168
  try:
169
- refined_response = response.get("final_output", "")
170
- if refined_response: # Only append non-empty responses
171
- # Add the refined response to history
172
- st.session_state.refined_history.append(refined_response)
173
-
174
- # Display the refined response
175
- st.chat_message("assistant").markdown(refined_response)
176
- st.session_state.messages.append({"role": "assistant", "content": refined_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  except Exception as e:
178
- logging.error(f"Error parsing response: {e}")
179
- st.error("Failed to process the response.")
 
8
 
9
  # Configure logging
10
  logging.basicConfig(
11
+ filename="app.log",
12
  level=logging.DEBUG,
13
+ format="%(asctime)s - %(levelname)s - %(message)s",
14
  )
15
 
16
+ logging.debug("Logging is configured correctly.")
 
17
 
18
  # Load environment variables
19
  load_dotenv()
 
26
 
27
  client = Groq(api_key=GROQ_API_KEY)
28
 
29
+ # Define the ReAct system prompt template
30
+ SYSTEM_PROMPT_TEMPLATE = """
31
+ You are an advanced AI agent using the ReAct (Reasoning + Action) framework to solve complex tasks. Follow these steps iteratively:
32
+ 1. Generate a "Thought" based on the current input or observations.
33
+ 2. Decide on an "Action" (e.g., search, calculation, etc.) to take.
34
+ 3. Return an "Observation" after the action to guide the next step.
35
+ Continue this loop until the task is solved or no further actions are needed. Return the result in this JSON format:
36
+
37
+ {{
38
+ "thoughts": [
39
+ {{
40
+ "thought": "<Reasoning step>",
41
+ "action": "<Action taken>",
42
+ "observation": "<Result of the action>"
43
+ }}
44
+ ],
45
+ "final_result": "<Final answer or solution>"
46
+ }}
47
+
48
+ Previous Context:
49
+ {history_context}
50
+
51
+ Input:
52
+ {user_input}
53
+ """
54
+
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  # Initialize Streamlit app
57
+ st.title("ReAct AI Chatbot")
58
 
59
  # Initialize session state
60
  if "messages" not in st.session_state:
61
  st.session_state.messages = []
62
+ if "react_history" not in st.session_state:
63
+ st.session_state.react_history = []
64
+
65
 
66
  def sanitize_json(json_str):
67
+ json_str = re.sub(r"[\x00-\x1F\x7F]", "", json_str) # Remove control characters
68
  return json_str
69
 
70
+ def generate_react_response(user_input, react_history):
71
+ """
72
+ Generate a ReAct-based response for the given input.
73
+ """
74
  try:
75
+ # Combine history context
 
 
 
76
  MAX_HISTORY = 5
77
+ history_context = "\n".join(react_history[-MAX_HISTORY:])
78
+ logging.debug(f"History Context: {history_context}")
79
 
80
+ # Ensure the user_input is sanitized
81
+ user_input = sanitize_json(user_input)
82
+ logging.debug(f"Sanitized User Input: {user_input}")
 
 
 
83
 
84
+ # Format the system prompt
85
+ formatted_prompt = SYSTEM_PROMPT_TEMPLATE.format_map({
86
+ "user_input": user_input,
87
+ "history_context": history_context or "No context available."
88
+ })
89
+ logging.debug(f"Formatted Prompt: {formatted_prompt}")
90
 
91
+ # Send the request to the Groq API
92
  chat_completion = client.chat.completions.create(
93
  model="llama3-8b-8192",
94
  messages=[
 
100
  top_p=0.9,
101
  )
102
 
103
+ logging.debug(f"Raw API Response: {chat_completion}")
 
 
 
 
104
 
105
+ # Extract content from the response
106
  content = chat_completion.choices[0].message.content
107
  if not content:
108
  logging.warning("Received empty content in API response.")
109
  return None
110
 
111
+ # Updated regex patterns to capture full content including recipe details
112
+ thought_match = re.search(r'\*\*Thought:?\*\*:?\s*"?(.*?)(?="?\s*\*\*Action|\n\n|$)', content, re.DOTALL | re.IGNORECASE)
113
+ action_match = re.search(r'\*\*Action:?\*\*:?\s*"?(.*?)(?="?\s*\*\*Observation|\n\n|$)', content, re.DOTALL | re.IGNORECASE)
114
+ observation_match = re.search(r'\*\*Observation:?\*\*:?\s*"?(.*?)(?=\n\n\*\*Thought|\Z)', content, re.DOTALL | re.IGNORECASE)
115
+
116
+ # Extract and clean the matches
117
+ thought = thought_match.group(1).strip(' "') if thought_match else "No thought provided"
118
+ action = action_match.group(1).strip(' "') if action_match else "No action provided"
119
+ observation = observation_match.group(1).strip(' "') if observation_match else "No observation provided"
120
+
121
+ # Check if observation contains a recipe (indicated by "Ingredients:" or "Instructions:")
122
+ if "Ingredients:" in observation or "Instructions:" in observation:
123
+ final_result = observation # Use the full recipe text as the final result
124
+ else:
125
+ final_result = observation if observation != "No observation provided" else "Ready to provide assistance once preferences are specified."
126
+
127
+ parsed_response = {
128
+ "thoughts": [{
129
+ "thought": thought,
130
+ "action": action,
131
+ "observation": observation
132
+ }],
133
+ "final_result": final_result
134
+ }
135
+
136
+ logging.debug(f"Parsed Response: {parsed_response}")
137
+ return parsed_response
138
 
139
  except Exception as e:
140
+ logging.error(f"Error generating ReAct response: {e}", exc_info=True)
141
  return {
142
+ "thoughts": [],
143
+ "final_result": f"An error occurred: {str(e)}",
 
144
  }
145
 
146
+
147
+
148
+ # Display chat messages from history
149
  for message in st.session_state.messages:
150
  with st.chat_message(message["role"]):
151
  st.markdown(message["content"])
152
 
153
  # Accept user input
154
+ user_input = st.chat_input("Enter your query:")
155
 
 
156
  if user_input:
157
+ # Display user message
158
  st.chat_message("user").markdown(user_input)
159
  st.session_state.messages.append({"role": "user", "content": user_input})
160
 
161
+ # Generate ReAct-based response
162
+ with st.spinner("Thinking..."):
163
+ response = generate_react_response(user_input, st.session_state.react_history)
164
+
165
  if response:
166
  try:
167
+ # Process thoughts and actions
168
+ thoughts = response.get("thoughts", [])
169
+ for step in thoughts:
170
+ thought = step.get("thought", "No thought provided.")
171
+ action = step.get("action", "No action taken.")
172
+ observation = step.get("observation", "No observation.")
173
+
174
+ st.chat_message("assistant").markdown(
175
+ f"**Thought:** {thought}\n\n**Action:** {action}\n\n**Observation:** {observation}"
176
+ )
177
+ st.session_state.messages.append(
178
+ {
179
+ "role": "assistant",
180
+ "content": f"**Thought:** {thought}\n\n**Action:** {action}\n\n**Observation:** {observation}",
181
+ }
182
+ )
183
+
184
+ # Final result
185
+ final_result = response.get("final_result", "No final result.")
186
+ st.chat_message("assistant").markdown(f"**Final Result:** {final_result}")
187
+ st.session_state.messages.append({"role": "assistant", "content": f"**Final Result:** {final_result}"})
188
+
189
+ # Update history
190
+ st.session_state.react_history.append(f"User: {user_input}\nAI: {final_result}")
191
+
192
  except Exception as e:
193
+ logging.error(f"Error processing ReAct response: {e}")
194
+ st.error("Failed to process the ReAct response.")