Rajan Sharma commited on
Commit
a990f93
·
verified ·
1 Parent(s): dfddecf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -16
app.py CHANGED
@@ -9,9 +9,9 @@ from typing import List, Dict, Any
9
  import gradio as gr
10
  import pandas as pd
11
  from datetime import datetime
 
12
 
13
  # --- BACKEND IMPORTS ---
14
- import regex as re2
15
  from langchain_cohere import ChatCohere
16
 
17
  # --- LOCAL MODULE IMPORTS ---
@@ -39,6 +39,7 @@ def _sanitize_text(s: str) -> str:
39
 
40
  def _create_python_script(user_scenario: str, schema_context: str) -> str:
41
  """Uses an LLM to act as an "AI Coder", writing a complete Python script."""
 
42
  prompt_for_coder = f"""
43
  You are an expert Python data scientist. Your sole job is to write a single, complete, and executable Python script to answer the user's request.
44
  You have access to a list of pandas dataframes loaded into a variable named `dfs`.
@@ -48,9 +49,10 @@ You have access to a list of pandas dataframes loaded into a variable named `dfs
48
  --- END SCHEMA ---
49
 
50
  CRITICAL RULES FOR YOUR SCRIPT:
51
- 1. **HANDLE DATA TYPES:** Before performing any mathematical operations, you MUST explicitly convert string values (e.g., '5.5%') to numeric types (`float` or `int`).
52
  2. **CHECK COLUMN NAMES:** You MUST use the exact, case-sensitive column names provided in the DATA SCHEMA. A `KeyError` will cause a failure.
53
- 3. **PRINT FINDINGS:** Use the `print()` function at each step to output your results as a formatted report.
 
54
 
55
  --- USER'S SCENARIO ---
56
  {user_scenario}
@@ -82,7 +84,7 @@ def ping_cohere() -> str:
82
  # --- THE CORE ANALYSIS ENGINE ---
83
 
84
  def handle(user_msg: str, files: list) -> str:
85
- """This is the powerful backend engine that supports both modes."""
86
  try:
87
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
88
  if blocked_in: return refusal_reply(reason_in)
@@ -90,7 +92,6 @@ def handle(user_msg: str, files: list) -> str:
90
  file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]
91
 
92
  if file_paths:
93
- # --- MODE 1: DATA ANALYST (files are present) ---
94
  dataframes = []
95
  schema_parts = []
96
  for i, p in enumerate(file_paths):
@@ -111,12 +112,12 @@ def handle(user_msg: str, files: list) -> str:
111
  try:
112
  with redirect_stdout(output_buffer):
113
  exec(analysis_script, execution_namespace)
 
114
  result = output_buffer.getvalue()
115
- return _sanitize_text(result or "(The script ran but produced no output.)")
116
  except Exception as e:
117
  return f"An error occurred executing the script: {e}\n\nGenerated Script:\n```python\n{analysis_script}\n```"
118
  else:
119
- # --- MODE 2: CONVERSATIONAL AI (no files are present) ---
120
  prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
121
  return _sanitize_text(cohere_chat(prompt) or "How can I help further?")
122
 
@@ -129,7 +130,7 @@ def handle(user_msg: str, files: list) -> str:
129
  PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
130
  TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")
131
 
132
- # ---------------- THE PROFESSIONAL UI WITH DUAL-MODE SUPPORT ----------------
133
  with gr.Blocks(theme="soft", css="style.css") as demo:
134
  assessment_history = gr.State([])
135
 
@@ -147,12 +148,11 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
147
  with gr.Row(variant="panel"):
148
  with gr.Column(scale=1):
149
  gr.Markdown("## New Assessment")
150
- gr.Markdown("<p style='font-size:0.9rem; color: #6C757D;'>Upload CSV files for data analysis, or just enter a prompt to chat with the AI.</p>") # UX Improvement
151
-
152
  files_input = gr.Files(label="Upload Data Files (.csv)", file_count="multiple", type="filepath", file_types=[".csv"])
153
  prompt_input = gr.Textbox(label="Prompt", placeholder="Paste your scenario or question here.", lines=15)
154
  with gr.Row():
155
- send_btn = gr.Button("▶️ Send / Run Analysis", variant="primary", scale=2) # UX Improvement
156
  clear_btn = gr.Button("🗑️ Clear")
157
  ping_btn = gr.Button("Ping Cohere")
158
  ping_out = gr.Markdown()
@@ -171,14 +171,13 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
171
  terms_link = gr.Button("Terms of Service", variant="link")
172
 
173
  def run_analysis_wrapper(prompt, files, chat_history_list, history_state_list):
174
- # --- THE LOGIC FIX IS HERE ---
175
  if not prompt:
176
  gr.Warning("Please enter a prompt.")
177
  yield chat_history_list, history_state_list, gr.update()
178
  return
179
 
180
  chat_with_user_msg = _append_msg(chat_history_list, "user", prompt)
181
- thinking_message = _append_msg(chat_with_user_msg, "assistant", "```\n🧠 Thinking... Please wait.\n```")
182
  yield thinking_message, history_state_list, gr.update()
183
 
184
  ai_response_text = handle(prompt, files)
@@ -186,7 +185,6 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
186
  final_chat = _append_msg(chat_with_user_msg, "assistant", ai_response_text)
187
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
188
 
189
- # Only save to history if it was a data analysis session
190
  if files:
191
  file_names = [os.path.basename(f.name if hasattr(f, 'name') else f) for f in files]
192
  new_assessment = {"id": timestamp, "prompt": prompt, "files": file_names, "response": ai_response_text}
@@ -194,10 +192,8 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
194
  history_labels = [f"{item['id']} - {item['prompt'][:40]}..." for item in updated_history]
195
  yield final_chat, updated_history, gr.update(choices=history_labels)
196
  else:
197
- # For simple chat, just update the chat window
198
  yield final_chat, history_state_list, gr.update()
199
 
200
-
201
  def view_history(selection, history_state_list):
202
  if not selection or not history_state_list: return ""
203
  selected_id = selection.split(" - ")
 
9
  import gradio as gr
10
  import pandas as pd
11
  from datetime import datetime
12
+ import regex as re2
13
 
14
  # --- BACKEND IMPORTS ---
 
15
  from langchain_cohere import ChatCohere
16
 
17
  # --- LOCAL MODULE IMPORTS ---
 
39
 
40
  def _create_python_script(user_scenario: str, schema_context: str) -> str:
41
  """Uses an LLM to act as an "AI Coder", writing a complete Python script."""
42
+ # --- THE FINAL PROMPT FIX IS HERE ---
43
  prompt_for_coder = f"""
44
  You are an expert Python data scientist. Your sole job is to write a single, complete, and executable Python script to answer the user's request.
45
  You have access to a list of pandas dataframes loaded into a variable named `dfs`.
 
49
  --- END SCHEMA ---
50
 
51
  CRITICAL RULES FOR YOUR SCRIPT:
52
+ 1. **ROBUST STRING CLEANING:** Before converting a string to a number (e.g., with `.astype(float)`), you MUST first remove ALL non-numeric characters that are not a digit or a decimal point. This includes characters like `$`, `%`, `~`, and commas. Use `.str.replace()` with a regular expression like `r'[^0-9.-]'` to do this safely. Failure to do this will cause a fatal `ValueError`.
53
  2. **CHECK COLUMN NAMES:** You MUST use the exact, case-sensitive column names provided in the DATA SCHEMA. A `KeyError` will cause a failure.
54
+ 3. **USE THE DATAFRAMES:** Your script MUST use the `dfs` list to access the data.
55
+ 4. **PRINT FINDINGS:** Use the `print()` function at each step to output your results as a formatted report.
56
 
57
  --- USER'S SCENARIO ---
58
  {user_scenario}
 
84
  # --- THE CORE ANALYSIS ENGINE ---
85
 
86
  def handle(user_msg: str, files: list) -> str:
87
+ """This is the powerful backend engine using the "Coder" pattern."""
88
  try:
89
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
90
  if blocked_in: return refusal_reply(reason_in)
 
92
  file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]
93
 
94
  if file_paths:
 
95
  dataframes = []
96
  schema_parts = []
97
  for i, p in enumerate(file_paths):
 
112
  try:
113
  with redirect_stdout(output_buffer):
114
  exec(analysis_script, execution_namespace)
115
+
116
  result = output_buffer.getvalue()
117
+ return _sanitize_text(result or "(The analysis script ran but produced no output.)")
118
  except Exception as e:
119
  return f"An error occurred executing the script: {e}\n\nGenerated Script:\n```python\n{analysis_script}\n```"
120
  else:
 
121
  prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
122
  return _sanitize_text(cohere_chat(prompt) or "How can I help further?")
123
 
 
130
  PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
131
  TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")
132
 
133
+ # ---------------- THE PROFESSIONAL UI WITH INTEGRATED LEGAL DOCS ----------------
134
  with gr.Blocks(theme="soft", css="style.css") as demo:
135
  assessment_history = gr.State([])
136
 
 
148
  with gr.Row(variant="panel"):
149
  with gr.Column(scale=1):
150
  gr.Markdown("## New Assessment")
151
+ gr.Markdown("<p style='font-size:0.9rem; color: #6C757D;'>Upload CSV files for data analysis, or just enter a prompt to chat with the AI.</p>")
 
152
  files_input = gr.Files(label="Upload Data Files (.csv)", file_count="multiple", type="filepath", file_types=[".csv"])
153
  prompt_input = gr.Textbox(label="Prompt", placeholder="Paste your scenario or question here.", lines=15)
154
  with gr.Row():
155
+ send_btn = gr.Button("▶️ Send / Run Analysis", variant="primary", scale=2)
156
  clear_btn = gr.Button("🗑️ Clear")
157
  ping_btn = gr.Button("Ping Cohere")
158
  ping_out = gr.Markdown()
 
171
  terms_link = gr.Button("Terms of Service", variant="link")
172
 
173
  def run_analysis_wrapper(prompt, files, chat_history_list, history_state_list):
 
174
  if not prompt:
175
  gr.Warning("Please enter a prompt.")
176
  yield chat_history_list, history_state_list, gr.update()
177
  return
178
 
179
  chat_with_user_msg = _append_msg(chat_history_list, "user", prompt)
180
+ thinking_message = _append_msg(chat_with_user_msg, "assistant", "```\n🧠 Generating and executing analysis script... This may take a moment.\n```")
181
  yield thinking_message, history_state_list, gr.update()
182
 
183
  ai_response_text = handle(prompt, files)
 
185
  final_chat = _append_msg(chat_with_user_msg, "assistant", ai_response_text)
186
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
187
 
 
188
  if files:
189
  file_names = [os.path.basename(f.name if hasattr(f, 'name') else f) for f in files]
190
  new_assessment = {"id": timestamp, "prompt": prompt, "files": file_names, "response": ai_response_text}
 
192
  history_labels = [f"{item['id']} - {item['prompt'][:40]}..." for item in updated_history]
193
  yield final_chat, updated_history, gr.update(choices=history_labels)
194
  else:
 
195
  yield final_chat, history_state_list, gr.update()
196
 
 
197
  def view_history(selection, history_state_list):
198
  if not selection or not history_state_list: return ""
199
  selected_id = selection.split(" - ")