Rajan Sharma commited on
Commit
dfddecf
·
verified ·
1 Parent(s): e4e9017

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -27
app.py CHANGED
@@ -10,10 +10,8 @@ import gradio as gr
10
  import pandas as pd
11
  from datetime import datetime
12
 
13
- # --- THE FINAL FIX IS HERE: Re-introducing the missing import ---
14
- import regex as re2
15
-
16
  # --- BACKEND IMPORTS ---
 
17
  from langchain_cohere import ChatCohere
18
 
19
  # --- LOCAL MODULE IMPORTS ---
@@ -37,7 +35,6 @@ def load_markdown_text(filepath: str) -> str:
37
 
38
  def _sanitize_text(s: str) -> str:
39
  if not isinstance(s, str): return s
40
- # This now works because 're2' is defined from the import above
41
  return re2.sub(r'[\p{C}--[\n\t]]+', '', s)
42
 
43
  def _create_python_script(user_scenario: str, schema_context: str) -> str:
@@ -51,11 +48,9 @@ You have access to a list of pandas dataframes loaded into a variable named `dfs
51
  --- END SCHEMA ---
52
 
53
  CRITICAL RULES FOR YOUR SCRIPT:
54
- 1. **HANDLE DATA TYPES:** Before performing any mathematical operations (like addition or division), you MUST explicitly convert string values (e.g., '5.5%', '$100') to numeric types (`float` or `int`). Failure to do this will cause a fatal `TypeError`.
55
  2. **CHECK COLUMN NAMES:** You MUST use the exact, case-sensitive column names provided in the DATA SCHEMA. A `KeyError` will cause a failure.
56
- 3. **USE THE DATAFRAMES:** Your script MUST use the `dfs` list to access the data.
57
- 4. **PRINT FINDINGS:** Use the `print()` function at each step to output your results as a formatted report.
58
- 5. **NO PLACEHOLDERS:** Do not use placeholder data.
59
 
60
  --- USER'S SCENARIO ---
61
  {user_scenario}
@@ -87,7 +82,7 @@ def ping_cohere() -> str:
87
  # --- THE CORE ANALYSIS ENGINE ---
88
 
89
  def handle(user_msg: str, files: list) -> str:
90
- """This is the powerful backend engine using the "Coder" pattern."""
91
  try:
92
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
93
  if blocked_in: return refusal_reply(reason_in)
@@ -95,16 +90,15 @@ def handle(user_msg: str, files: list) -> str:
95
  file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]
96
 
97
  if file_paths:
 
98
  dataframes = []
99
  schema_parts = []
100
  for i, p in enumerate(file_paths):
101
  if p.endswith('.csv'):
102
- try:
103
- df = pd.read_csv(p)
104
- except UnicodeDecodeError:
105
- df = pd.read_csv(p, encoding='latin1')
106
  dataframes.append(df)
107
- schema_parts.append(f"DataFrame `dfs[{i}]` (from file `{os.path.basename(p)}`):\n{df.head().to_markdown()}\n")
108
 
109
  if not dataframes: return "Please upload at least one CSV file."
110
 
@@ -117,12 +111,12 @@ def handle(user_msg: str, files: list) -> str:
117
  try:
118
  with redirect_stdout(output_buffer):
119
  exec(analysis_script, execution_namespace)
120
-
121
  result = output_buffer.getvalue()
122
- return _sanitize_text(result or "(The analysis script ran but produced no output.)")
123
  except Exception as e:
124
  return f"An error occurred executing the script: {e}\n\nGenerated Script:\n```python\n{analysis_script}\n```"
125
  else:
 
126
  prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
127
  return _sanitize_text(cohere_chat(prompt) or "How can I help further?")
128
 
@@ -135,7 +129,7 @@ def handle(user_msg: str, files: list) -> str:
135
  PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
136
  TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")
137
 
138
- # ---------------- THE PROFESSIONAL UI WITH INTEGRATED LEGAL DOCS ----------------
139
  with gr.Blocks(theme="soft", css="style.css") as demo:
140
  assessment_history = gr.State([])
141
 
@@ -153,13 +147,16 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
153
  with gr.Row(variant="panel"):
154
  with gr.Column(scale=1):
155
  gr.Markdown("## New Assessment")
 
 
156
  files_input = gr.Files(label="Upload Data Files (.csv)", file_count="multiple", type="filepath", file_types=[".csv"])
157
- prompt_input = gr.Textbox(label="Prompt", placeholder="Paste your scenario here.", lines=15)
158
  with gr.Row():
159
- send_btn = gr.Button("▶️ Run Analysis", variant="primary", scale=2)
160
  clear_btn = gr.Button("🗑️ Clear")
161
  ping_btn = gr.Button("Ping Cohere")
162
  ping_out = gr.Markdown()
 
163
  with gr.Column(scale=2):
164
  with gr.Tabs():
165
  with gr.TabItem("Current Assessment", id=0):
@@ -174,24 +171,32 @@ with gr.Blocks(theme="soft", css="style.css") as demo:
174
  terms_link = gr.Button("Terms of Service", variant="link")
175
 
176
  def run_analysis_wrapper(prompt, files, chat_history_list, history_state_list):
177
- if not prompt or not files:
178
- gr.Warning("Please provide both a prompt and at least one data file.")
 
179
  yield chat_history_list, history_state_list, gr.update()
180
  return
181
 
182
  chat_with_user_msg = _append_msg(chat_history_list, "user", prompt)
183
- thinking_message = _append_msg(chat_with_user_msg, "assistant", "```\n🧠 Generating and executing analysis script... This may take a moment.\n```")
184
  yield thinking_message, history_state_list, gr.update()
185
 
186
  ai_response_text = handle(prompt, files)
187
 
188
  final_chat = _append_msg(chat_with_user_msg, "assistant", ai_response_text)
189
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
190
- file_names = [os.path.basename(f.name if hasattr(f, 'name') else f) for f in files]
191
- new_assessment = {"id": timestamp, "prompt": prompt, "files": file_names, "response": ai_response_text}
192
- updated_history = history_state_list + [new_assessment]
193
- history_labels = [f"{item['id']} - {item['prompt'][:40]}..." for item in updated_history]
194
- yield final_chat, updated_history, gr.update(choices=history_labels)
 
 
 
 
 
 
 
195
 
196
  def view_history(selection, history_state_list):
197
  if not selection or not history_state_list: return ""
 
10
  import pandas as pd
11
  from datetime import datetime
12
 
 
 
 
13
  # --- BACKEND IMPORTS ---
14
+ import regex as re2
15
  from langchain_cohere import ChatCohere
16
 
17
  # --- LOCAL MODULE IMPORTS ---
 
35
 
36
  def _sanitize_text(s: str) -> str:
37
  if not isinstance(s, str): return s
 
38
  return re2.sub(r'[\p{C}--[\n\t]]+', '', s)
39
 
40
  def _create_python_script(user_scenario: str, schema_context: str) -> str:
 
48
  --- END SCHEMA ---
49
 
50
  CRITICAL RULES FOR YOUR SCRIPT:
51
+ 1. **HANDLE DATA TYPES:** Before performing any mathematical operations, you MUST explicitly convert string values (e.g., '5.5%') to numeric types (`float` or `int`).
52
  2. **CHECK COLUMN NAMES:** You MUST use the exact, case-sensitive column names provided in the DATA SCHEMA. A `KeyError` will cause a failure.
53
+ 3. **PRINT FINDINGS:** Use the `print()` function at each step to output your results as a formatted report.
 
 
54
 
55
  --- USER'S SCENARIO ---
56
  {user_scenario}
 
82
  # --- THE CORE ANALYSIS ENGINE ---
83
 
84
  def handle(user_msg: str, files: list) -> str:
85
+ """This is the powerful backend engine that supports both modes."""
86
  try:
87
  safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
88
  if blocked_in: return refusal_reply(reason_in)
 
90
  file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]
91
 
92
  if file_paths:
93
+ # --- MODE 1: DATA ANALYST (files are present) ---
94
  dataframes = []
95
  schema_parts = []
96
  for i, p in enumerate(file_paths):
97
  if p.endswith('.csv'):
98
+ try: df = pd.read_csv(p)
99
+ except UnicodeDecodeError: df = pd.read_csv(p, encoding='latin1')
 
 
100
  dataframes.append(df)
101
+ schema_parts.append(f"DataFrame `dfs[{i}]` (from `{os.path.basename(p)}`):\n{df.head().to_markdown()}\n")
102
 
103
  if not dataframes: return "Please upload at least one CSV file."
104
 
 
111
  try:
112
  with redirect_stdout(output_buffer):
113
  exec(analysis_script, execution_namespace)
 
114
  result = output_buffer.getvalue()
115
+ return _sanitize_text(result or "(The script ran but produced no output.)")
116
  except Exception as e:
117
  return f"An error occurred executing the script: {e}\n\nGenerated Script:\n```python\n{analysis_script}\n```"
118
  else:
119
+ # --- MODE 2: CONVERSATIONAL AI (no files are present) ---
120
  prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
121
  return _sanitize_text(cohere_chat(prompt) or "How can I help further?")
122
 
 
129
  PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
130
  TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")
131
 
132
+ # ---------------- THE PROFESSIONAL UI WITH DUAL-MODE SUPPORT ----------------
133
  with gr.Blocks(theme="soft", css="style.css") as demo:
134
  assessment_history = gr.State([])
135
 
 
147
  with gr.Row(variant="panel"):
148
  with gr.Column(scale=1):
149
  gr.Markdown("## New Assessment")
150
+ gr.Markdown("<p style='font-size:0.9rem; color: #6C757D;'>Upload CSV files for data analysis, or just enter a prompt to chat with the AI.</p>") # UX Improvement
151
+
152
  files_input = gr.Files(label="Upload Data Files (.csv)", file_count="multiple", type="filepath", file_types=[".csv"])
153
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Paste your scenario or question here.", lines=15)
154
  with gr.Row():
155
+ send_btn = gr.Button("▶️ Send / Run Analysis", variant="primary", scale=2) # UX Improvement
156
  clear_btn = gr.Button("🗑️ Clear")
157
  ping_btn = gr.Button("Ping Cohere")
158
  ping_out = gr.Markdown()
159
+
160
  with gr.Column(scale=2):
161
  with gr.Tabs():
162
  with gr.TabItem("Current Assessment", id=0):
 
171
  terms_link = gr.Button("Terms of Service", variant="link")
172
 
173
  def run_analysis_wrapper(prompt, files, chat_history_list, history_state_list):
174
+ # --- THE LOGIC FIX IS HERE ---
175
+ if not prompt:
176
+ gr.Warning("Please enter a prompt.")
177
  yield chat_history_list, history_state_list, gr.update()
178
  return
179
 
180
  chat_with_user_msg = _append_msg(chat_history_list, "user", prompt)
181
+ thinking_message = _append_msg(chat_with_user_msg, "assistant", "```\n🧠 Thinking... Please wait.\n```")
182
  yield thinking_message, history_state_list, gr.update()
183
 
184
  ai_response_text = handle(prompt, files)
185
 
186
  final_chat = _append_msg(chat_with_user_msg, "assistant", ai_response_text)
187
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
188
+
189
+ # Only save to history if it was a data analysis session
190
+ if files:
191
+ file_names = [os.path.basename(f.name if hasattr(f, 'name') else f) for f in files]
192
+ new_assessment = {"id": timestamp, "prompt": prompt, "files": file_names, "response": ai_response_text}
193
+ updated_history = history_state_list + [new_assessment]
194
+ history_labels = [f"{item['id']} - {item['prompt'][:40]}..." for item in updated_history]
195
+ yield final_chat, updated_history, gr.update(choices=history_labels)
196
+ else:
197
+ # For simple chat, just update the chat window
198
+ yield final_chat, history_state_list, gr.update()
199
+
200
 
201
  def view_history(selection, history_state_list):
202
  if not selection or not history_state_list: return ""