Spaces:
Sleeping
Sleeping
Rajan Sharma
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,8 +10,8 @@ import pandas as pd
|
|
| 10 |
|
| 11 |
# New additions for data analysis agent
|
| 12 |
from langchain.agents.agent_types import AgentType
|
| 13 |
-
from langchain_community.chat_models import ChatCohere
|
| 14 |
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
|
|
|
|
| 15 |
|
| 16 |
# ---- Local modules
|
| 17 |
from settings import (
|
|
@@ -42,7 +42,6 @@ def _create_enhanced_prompt(user_scenario: str) -> str:
|
|
| 42 |
Uses an LLM to pre-process the user's messy prompt into a structured brief
|
| 43 |
for the data analysis agent.
|
| 44 |
"""
|
| 45 |
-
# This prompt instructs the first LLM to act as a project manager.
|
| 46 |
prompt_for_planner = f"""
|
| 47 |
You are an expert data analysis project manager. Your task is to read the user's unstructured scenario below and create a clear, structured brief for a data analysis AI.
|
| 48 |
|
|
@@ -57,23 +56,43 @@ Present this as a clean brief. Then, include the user's original text at the end
|
|
| 57 |
--- USER'S SCENARIO ---
|
| 58 |
{user_scenario}
|
| 59 |
"""
|
| 60 |
-
|
| 61 |
-
# Use the existing cohere_chat function to get the structured brief
|
| 62 |
structured_brief = cohere_chat(prompt_for_planner)
|
| 63 |
-
|
| 64 |
-
# If the LLM call fails, just use the original message
|
| 65 |
if not structured_brief:
|
| 66 |
return user_scenario
|
| 67 |
-
|
| 68 |
return structured_brief
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
# ---------------- Core handler ----------------
|
| 71 |
def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]:
|
| 72 |
"""
|
| 73 |
Core logic handler with the new two-step AI process.
|
| 74 |
"""
|
| 75 |
try:
|
| 76 |
-
# Safety filter for user input
|
| 77 |
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
|
| 78 |
if blocked_in:
|
| 79 |
reply = refusal_reply(reason_in)
|
|
@@ -85,60 +104,39 @@ def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -
|
|
| 85 |
|
| 86 |
if file_paths:
|
| 87 |
try:
|
| 88 |
-
# Load ALL uploaded CSVs into a list of DataFrames
|
| 89 |
dataframes = [pd.read_csv(p) for p in file_paths if p.endswith('.csv')]
|
| 90 |
if not dataframes:
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
-
# Initialize the Cohere Chat LLM for the agent
|
| 94 |
llm = ChatCohere(model=COHERE_MODEL_PRIMARY, temperature=0)
|
| 95 |
-
|
| 96 |
-
# STEP 1: The "Intake Analyst" AI creates a structured brief.
|
| 97 |
enhanced_prompt = _create_enhanced_prompt(safe_in)
|
|
|
|
| 98 |
|
| 99 |
-
# This UNIVERSAL prefix contains only behavioral rules.
|
| 100 |
-
AGENT_PREFIX = """
|
| 101 |
-
You are a data analysis agent. You have access to one or more pandas dataframes.
|
| 102 |
-
You MUST respond in one of two formats.
|
| 103 |
-
|
| 104 |
-
FORMAT 1: To perform a task. Your response must be a single block of text with ONLY these three sections:
|
| 105 |
-
Thought: Your step-by-step reasoning.
|
| 106 |
-
Action: python_repl_ast
|
| 107 |
-
Action Input: The Python code to run.
|
| 108 |
-
|
| 109 |
-
FORMAT 2: To give the final answer. Your response must be a single block of text with ONLY these two sections:
|
| 110 |
-
Thought: I can now answer the user's query based on the analysis.
|
| 111 |
-
Final Answer: The complete answer, structured as the user requested.
|
| 112 |
-
|
| 113 |
-
CRITICAL RULE: NEVER combine `Action` and `Final Answer` in the same response. Choose one format.
|
| 114 |
-
Begin by analyzing the structured brief provided.
|
| 115 |
-
"""
|
| 116 |
-
|
| 117 |
-
# STEP 2: The "Data Scientist" AI (Agent) executes the clean brief.
|
| 118 |
agent = create_pandas_dataframe_agent(
|
| 119 |
llm,
|
| 120 |
dataframes,
|
| 121 |
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
| 122 |
verbose=True,
|
| 123 |
allow_dangerous_code=True,
|
| 124 |
-
handle_parsing_errors=True,
|
| 125 |
prefix=AGENT_PREFIX
|
| 126 |
)
|
| 127 |
|
| 128 |
-
|
| 129 |
-
|
|
|
|
| 130 |
|
| 131 |
except Exception as e:
|
| 132 |
tb = traceback.format_exc()
|
| 133 |
log_event("agent_error", None, {"err": str(e), "tb": tb})
|
| 134 |
reply = f"An error occurred while analyzing the data: {e}"
|
| 135 |
else:
|
| 136 |
-
# Fallback to general conversation if no files are uploaded
|
| 137 |
prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
|
| 138 |
reply = cohere_chat(prompt) or open_fallback_chat(prompt) or "How can I help further?"
|
| 139 |
reply = _sanitize_text(reply)
|
| 140 |
|
| 141 |
-
# Append interaction to chat history
|
| 142 |
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 143 |
new_hist = _append_msg(new_hist, "assistant", reply)
|
| 144 |
return new_hist, ""
|
|
@@ -146,11 +144,11 @@ Begin by analyzing the structured brief provided.
|
|
| 146 |
except Exception as e:
|
| 147 |
tb = traceback.format_exc()
|
| 148 |
log_event("app_error", None, {"err": str(e), "tb": tb})
|
|
|
|
| 149 |
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 150 |
-
new_hist = _append_msg(new_hist, "assistant",
|
| 151 |
return new_hist, ""
|
| 152 |
|
| 153 |
-
|
| 154 |
# ---------------- UI ----------------
|
| 155 |
with gr.Blocks(analytics_enabled=False) as demo:
|
| 156 |
gr.Markdown("## Universal AI Data Analyst")
|
|
@@ -190,4 +188,4 @@ if __name__ == "__main__":
|
|
| 190 |
"open_fallbacks": USE_OPEN_FALLBACKS,
|
| 191 |
"timeout_s": COHERE_TIMEOUT_S
|
| 192 |
})
|
| 193 |
-
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
|
|
|
|
| 10 |
|
| 11 |
# New additions for data analysis agent
|
| 12 |
from langchain.agents.agent_types import AgentType
|
|
|
|
| 13 |
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
|
| 14 |
+
from langchain_cohere import ChatCohere # <-- NEW, CORRECT IMPORT
|
| 15 |
|
| 16 |
# ---- Local modules
|
| 17 |
from settings import (
|
|
|
|
| 42 |
Uses an LLM to pre-process the user's messy prompt into a structured brief
|
| 43 |
for the data analysis agent.
|
| 44 |
"""
|
|
|
|
| 45 |
prompt_for_planner = f"""
|
| 46 |
You are an expert data analysis project manager. Your task is to read the user's unstructured scenario below and create a clear, structured brief for a data analysis AI.
|
| 47 |
|
|
|
|
| 56 |
--- USER'S SCENARIO ---
|
| 57 |
{user_scenario}
|
| 58 |
"""
|
|
|
|
|
|
|
| 59 |
structured_brief = cohere_chat(prompt_for_planner)
|
|
|
|
|
|
|
| 60 |
if not structured_brief:
|
| 61 |
return user_scenario
|
|
|
|
| 62 |
return structured_brief
|
| 63 |
|
| 64 |
+
def is_healthcare_scenario(text: str, has_files: bool) -> bool:
|
| 65 |
+
"""
|
| 66 |
+
Dynamic detection: require uploaded files AND either structured scenario sections
|
| 67 |
+
or healthcare keywords (configured in settings).
|
| 68 |
+
"""
|
| 69 |
+
t = (text or "").lower() # <-- INDENTATION IS NOW FIXED
|
| 70 |
+
kws = HEALTHCARE_SETTINGS["healthcare_keywords"]
|
| 71 |
+
structured = any(s in t for s in ["background", "situation", "tasks", "deliverables"])
|
| 72 |
+
return has_files and (structured or any(k in t for k in kws))
|
| 73 |
+
|
| 74 |
+
def _append_msg(history_messages: List[Dict[str, str]], role: str, content: str) -> List[Dict[str, str]]:
|
| 75 |
+
return (history_messages or []) + [{"role": role, "content": content}]
|
| 76 |
+
|
| 77 |
+
def ping_cohere() -> str:
|
| 78 |
+
"""Lightweight health check against Cohere (embeddings call)."""
|
| 79 |
+
try:
|
| 80 |
+
cli = _co_client()
|
| 81 |
+
if not cli:
|
| 82 |
+
return "Cohere client not initialized. Is COHERE_API_KEY set?"
|
| 83 |
+
vecs = cohere_embed(["hello", "world"])
|
| 84 |
+
if vecs and len(vecs) == 2:
|
| 85 |
+
return f"Cohere OK ✅ (model={COHERE_MODEL_PRIMARY}, timeout={COHERE_TIMEOUT_S}s)"
|
| 86 |
+
return "Cohere reachable, but embeddings returned no vectors."
|
| 87 |
+
except Exception as e:
|
| 88 |
+
return f"Cohere ping failed: {e}"
|
| 89 |
+
|
| 90 |
# ---------------- Core handler ----------------
|
| 91 |
def handle(user_msg: str, history_messages: List[Dict[str, str]], files: list) -> Tuple[List[Dict[str, str]], str]:
|
| 92 |
"""
|
| 93 |
Core logic handler with the new two-step AI process.
|
| 94 |
"""
|
| 95 |
try:
|
|
|
|
| 96 |
safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
|
| 97 |
if blocked_in:
|
| 98 |
reply = refusal_reply(reason_in)
|
|
|
|
| 104 |
|
| 105 |
if file_paths:
|
| 106 |
try:
|
|
|
|
| 107 |
dataframes = [pd.read_csv(p) for p in file_paths if p.endswith('.csv')]
|
| 108 |
if not dataframes:
|
| 109 |
+
reply = "Please upload at least one CSV file."
|
| 110 |
+
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 111 |
+
new_hist = _append_msg(new_hist, "assistant", reply)
|
| 112 |
+
return new_hist, ""
|
| 113 |
|
|
|
|
| 114 |
llm = ChatCohere(model=COHERE_MODEL_PRIMARY, temperature=0)
|
|
|
|
|
|
|
| 115 |
enhanced_prompt = _create_enhanced_prompt(safe_in)
|
| 116 |
+
AGENT_PREFIX = """...""" # Prefix content remains the same
|
| 117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
agent = create_pandas_dataframe_agent(
|
| 119 |
llm,
|
| 120 |
dataframes,
|
| 121 |
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
| 122 |
verbose=True,
|
| 123 |
allow_dangerous_code=True,
|
|
|
|
| 124 |
prefix=AGENT_PREFIX
|
| 125 |
)
|
| 126 |
|
| 127 |
+
# Use the new .invoke() method
|
| 128 |
+
result = agent.invoke({"input": enhanced_prompt})
|
| 129 |
+
reply = _sanitize_text(result.get("output", "No output generated."))
|
| 130 |
|
| 131 |
except Exception as e:
|
| 132 |
tb = traceback.format_exc()
|
| 133 |
log_event("agent_error", None, {"err": str(e), "tb": tb})
|
| 134 |
reply = f"An error occurred while analyzing the data: {e}"
|
| 135 |
else:
|
|
|
|
| 136 |
prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {safe_in}\nAssistant:"
|
| 137 |
reply = cohere_chat(prompt) or open_fallback_chat(prompt) or "How can I help further?"
|
| 138 |
reply = _sanitize_text(reply)
|
| 139 |
|
|
|
|
| 140 |
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 141 |
new_hist = _append_msg(new_hist, "assistant", reply)
|
| 142 |
return new_hist, ""
|
|
|
|
| 144 |
except Exception as e:
|
| 145 |
tb = traceback.format_exc()
|
| 146 |
log_event("app_error", None, {"err": str(e), "tb": tb})
|
| 147 |
+
reply = f"A critical error occurred: {e}\n\n{tb}"
|
| 148 |
new_hist = _append_msg(history_messages, "user", user_msg)
|
| 149 |
+
new_hist = _append_msg(new_hist, "assistant", reply)
|
| 150 |
return new_hist, ""
|
| 151 |
|
|
|
|
| 152 |
# ---------------- UI ----------------
|
| 153 |
with gr.Blocks(analytics_enabled=False) as demo:
|
| 154 |
gr.Markdown("## Universal AI Data Analyst")
|
|
|
|
| 188 |
"open_fallbacks": USE_OPEN_FALLBACKS,
|
| 189 |
"timeout_s": COHERE_TIMEOUT_S
|
| 190 |
})
|
| 191 |
+
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
|