abhlash commited on
Commit
0acb20f
·
1 Parent(s): 7ac50fc

updated the system prompt

Browse files
Files changed (1) hide show
  1. app.py +20 -12
app.py CHANGED
@@ -31,8 +31,12 @@ client = Groq(api_key=GROQ_API_KEY)
31
  SYSTEM_PROMPT_TEMPLATE = (
32
  "You are an advanced AI agent leveraging the Reflexion framework to iteratively improve ideas and responses through up to {reflection_cycles} cycles of reflection. "
33
  "Your goal is to provide the most meaningful, relevant, and impactful results while autonomously managing the process. Follow the structured workflow below:\n\n"
34
- "Instructions:\n\n"
35
- "Output the entire response in the following JSON structure:\n"
 
 
 
 
36
  "{{\n"
37
  " \"initial_response\": \"<Provide the initial response here as a string>\",\n"
38
  " \"reflection_cycles\": [\n"
@@ -59,7 +63,8 @@ SYSTEM_PROMPT_TEMPLATE = (
59
  "Use the feedback from this reflection to refine the response and document it in the JSON structure.\n\n"
60
  "Final Output:\n"
61
  "Provide a final, polished response as the \"final_output\" field in the JSON. The response should be thoughtful, comprehensive, and fully address the user's query.\n\n"
62
- "Previous Context:\n{history_context}\n"
 
63
  )
64
 
65
  # Initialize Streamlit app
@@ -78,9 +83,12 @@ def sanitize_json(json_str):
78
  # Function to generate responses using the Groq API
79
  def generate_response(user_input, refined_history):
80
  try:
 
 
 
81
  # Limit the number of historical responses
82
  MAX_HISTORY = 5
83
- history_context = " ".join(refined_history[-MAX_HISTORY:])
84
 
85
  # Format the system prompt with history and current input
86
  formatted_prompt = SYSTEM_PROMPT_TEMPLATE.format(
@@ -158,14 +166,14 @@ if user_input:
158
  response = generate_response(user_input, st.session_state.refined_history)
159
  if response:
160
  try:
161
- refined_response = response["final_output"]
162
-
163
- # Add the refined response to history
164
- st.session_state.refined_history.append(refined_response)
165
-
166
- # Display the refined response
167
- st.chat_message("assistant").markdown(refined_response)
168
- st.session_state.messages.append({"role": "assistant", "content": refined_response})
169
  except Exception as e:
170
  logging.error(f"Error parsing response: {e}")
171
  st.error("Failed to process the response.")
 
31
  SYSTEM_PROMPT_TEMPLATE = (
32
  "You are an advanced AI agent leveraging the Reflexion framework to iteratively improve ideas and responses through up to {reflection_cycles} cycles of reflection. "
33
  "Your goal is to provide the most meaningful, relevant, and impactful results while autonomously managing the process. Follow the structured workflow below:\n\n"
34
+ "CRITICAL: Your entire response MUST be a valid JSON object. Follow these strict formatting rules:\n"
35
+ "1. Use double quotes (\") for all JSON keys and string values\n"
36
+ "2. Escape any double quotes within string values with a backslash (\\\")\n"
37
+ "3. Avoid using any unescaped special characters or line breaks within string values\n"
38
+ "4. Ensure all JSON keys and values are properly enclosed and comma-separated\n\n"
39
+ "Required JSON Structure:\n"
40
  "{{\n"
41
  " \"initial_response\": \"<Provide the initial response here as a string>\",\n"
42
  " \"reflection_cycles\": [\n"
 
63
  "Use the feedback from this reflection to refine the response and document it in the JSON structure.\n\n"
64
  "Final Output:\n"
65
  "Provide a final, polished response as the \"final_output\" field in the JSON. The response should be thoughtful, comprehensive, and fully address the user's query.\n\n"
66
+ "Previous Context:\n{history_context}\n\n"
67
+ "REMINDER: Verify that your response is valid JSON before completing. Do not include any text outside of the JSON structure."
68
  )
69
 
70
  # Initialize Streamlit app
 
83
  # Function to generate responses using the Groq API
84
  def generate_response(user_input, refined_history):
85
  try:
86
+ # Filter out None values and ensure all items are strings
87
+ valid_history = [str(item) for item in refined_history if item is not None]
88
+
89
  # Limit the number of historical responses
90
  MAX_HISTORY = 5
91
+ history_context = " ".join(valid_history[-MAX_HISTORY:])
92
 
93
  # Format the system prompt with history and current input
94
  formatted_prompt = SYSTEM_PROMPT_TEMPLATE.format(
 
166
  response = generate_response(user_input, st.session_state.refined_history)
167
  if response:
168
  try:
169
+ refined_response = response.get("final_output", "")
170
+ if refined_response: # Only append non-empty responses
171
+ # Add the refined response to history
172
+ st.session_state.refined_history.append(refined_response)
173
+
174
+ # Display the refined response
175
+ st.chat_message("assistant").markdown(refined_response)
176
+ st.session_state.messages.append({"role": "assistant", "content": refined_response})
177
  except Exception as e:
178
  logging.error(f"Error parsing response: {e}")
179
  st.error("Failed to process the response.")