File size: 9,055 Bytes
b8bcf55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
"""
Dynamic prompt generation for small context window models.
Loads only relevant tools based on user intent to reduce token usage.
"""

from typing import List, Dict, Set
import re

# Intent categories and their keywords
INTENT_KEYWORDS = {
    "data_quality": ["clean", "missing", "outlier", "quality", "duplicates", "null", "na", "impute"],
    "visualization": ["plot", "chart", "graph", "visualize", "dashboard", "scatter", "histogram", "heatmap"],
    "feature_engineering": ["feature", "encode", "transform", "scale", "normalize", "binning", "interaction"],
    "model_training": ["train", "model", "predict", "classify", "regression", "forecast", "xgboost", "accuracy"],
    "eda": ["profile", "describe", "summary", "statistics", "distribution", "correlation", "eda"],
    "time_series": ["time", "date", "datetime", "temporal", "trend", "seasonality", "forecast"],
    "optimization": ["tune", "optimize", "hyperparameter", "improve", "best parameters"],
    "code_execution": ["execute", "run code", "calculate", "custom", "python"],
}

# Tool categories mapping
TOOL_CATEGORIES = {
    "data_quality": [
        "detect_data_quality_issues",
        "clean_missing_values",
        "handle_outliers",
        "detect_and_remove_duplicates",
        "force_numeric_conversion",
    ],
    "visualization": [
        "generate_interactive_scatter",
        "generate_interactive_histogram",
        "generate_interactive_correlation_heatmap",
        "generate_interactive_box_plots",
        "generate_interactive_time_series",
        "generate_plotly_dashboard",
        "generate_all_plots",
        "generate_data_quality_plots",
        "generate_eda_plots",
    ],
    "feature_engineering": [
        "encode_categorical",
        "perform_feature_scaling",
        "create_time_features",
        "create_ratio_features",
        "create_statistical_features",
        "create_log_features",
        "create_binned_features",
        "auto_feature_engineering",
    ],
    "model_training": [
        "train_baseline_models",
        "hyperparameter_tuning",
        "train_ensemble_models",
        "perform_cross_validation",
        "handle_imbalanced_data",
        "auto_ml_pipeline",
    ],
    "eda": [
        "profile_dataset",
        "generate_ydata_profiling_report",
        "analyze_distribution",
        "detect_trends_and_seasonality",
        "perform_hypothesis_testing",
    ],
    "time_series": [
        "create_time_features",
        "forecast_time_series",
        "detect_trends_and_seasonality",
        "generate_interactive_time_series",
    ],
    "optimization": [
        "hyperparameter_tuning",
        "auto_feature_selection",
        "detect_and_handle_multicollinearity",
    ],
    "code_execution": [
        "execute_python_code",
        "execute_code_from_file",
    ],
}

# Core tools always included (used in all workflows)
CORE_TOOLS = [
    "profile_dataset",
    "detect_data_quality_issues",
    "clean_missing_values",
    "encode_categorical",
]


def detect_intent(query: str) -> Set[str]:
    """
    Detect user intent from query using keyword matching.
    
    Args:
        query: User's natural language query
        
    Returns:
        Set of intent categories detected
    """
    query_lower = query.lower()
    detected_intents = set()
    
    for intent, keywords in INTENT_KEYWORDS.items():
        for keyword in keywords:
            if keyword in query_lower:
                detected_intents.add(intent)
                break
    
    # Default to EDA if no specific intent detected
    if not detected_intents:
        detected_intents.add("eda")
    
    return detected_intents


def get_relevant_tools(intents: Set[str]) -> List[str]:
    """
    Get list of relevant tools based on detected intents.
    
    Args:
        intents: Set of detected intent categories
        
    Returns:
        List of tool names to include in prompt
    """
    tools = set(CORE_TOOLS)  # Always include core tools
    
    for intent in intents:
        if intent in TOOL_CATEGORIES:
            tools.update(TOOL_CATEGORIES[intent])
    
    return sorted(list(tools))


def build_compact_system_prompt(user_query: str = None, detected_intents: Set[str] = None) -> str:
    """
    Build a compact system prompt with only relevant tools.
    
    Args:
        user_query: Optional user query to detect intent
        detected_intents: Optional pre-detected intents
        
    Returns:
        Compact system prompt string
    """
    # Detect intents if not provided
    if detected_intents is None and user_query:
        detected_intents = detect_intent(user_query)
    elif detected_intents is None:
        detected_intents = {"eda"}  # Default
    
    # Get relevant tools
    relevant_tools = get_relevant_tools(detected_intents)
    
    # Build tool list string
    tool_list = "\n".join([f"- {tool}" for tool in relevant_tools])
    
    prompt = f"""You are an autonomous Data Science Agent. You EXECUTE tasks, not advise.

**TOOL CALLING FORMAT:**
When you need to use a tool, respond with JSON:
```json
{{
  "tool": "tool_name",
  "arguments": {{"param1": "value1"}}
}}
```

**RELEVANT TOOLS FOR THIS TASK:**
{tool_list}

**WORKFLOW RULES:**
1. **Execute tools sequentially** - ONE tool per response
2. **Use tool outputs** as inputs to next tool
3. **Save outputs** to ./outputs/data/ or ./outputs/plots/
4. **Error recovery**: If tool fails, retry with corrected parameters OR skip to next step
5. **Never repeat** successful tools
6. **Stop when done** - Don't continue after fulfilling user request

**COMMON WORKFLOWS:**

**Visualization Only:**
- User wants plots/charts/dashboard
- generate_plotly_dashboard OR generate_interactive_scatter β†’ STOP

**Data Profiling:**
- User wants "detailed report"
- generate_ydata_profiling_report β†’ STOP

**Full ML Pipeline:**
- User wants model training
- profile_dataset β†’ detect_data_quality_issues β†’ clean_missing_values β†’ 
  encode_categorical β†’ train_baseline_models β†’ generate_plotly_dashboard

**PARAMETER CORRECTIONS:**
- Use exact column names from error messages
- If "Did you mean X?" β†’ retry with X
- output_path (not output or output_dir)
- file_path for data files

**ERROR RECOVERY:**
- Column not found? Use suggested column from error
- File not found? Use last successful file
- Missing param? Add the required parameter
- Tool failed? Skip to next step (don't get stuck)

Execute the user's task efficiently with relevant tools."""
    
    return prompt


def get_full_system_prompt() -> str:
    """
    Get the original full system prompt for models with large context windows.
    This is the complete version used with Gemini 2.5 Flash.
    """
    # Import the original prompt from orchestrator
    from src.orchestrator import DataScienceCopilot
    copilot = DataScienceCopilot.__new__(DataScienceCopilot)
    return copilot._build_system_prompt()


# Quick stats
def get_prompt_stats(prompt: str) -> Dict[str, int]:
    """Get token count estimate and character count for prompt."""
    chars = len(prompt)
    # Rough estimate: 1 token β‰ˆ 4 characters
    tokens = chars // 4
    lines = len(prompt.split('\n'))
    
    return {
        "characters": chars,
        "estimated_tokens": tokens,
        "lines": lines,
    }


if __name__ == "__main__":
    # Demo: Compare full vs compact prompts
    print("=" * 80)
    print("DYNAMIC PROMPT SYSTEM DEMO")
    print("=" * 80)
    
    # Example 1: Visualization request
    query1 = "Generate interactive plots for magnitude and latitude"
    intents1 = detect_intent(query1)
    prompt1 = build_compact_system_prompt(user_query=query1)
    stats1 = get_prompt_stats(prompt1)
    
    print(f"\nπŸ“Š Example 1: '{query1}'")
    print(f"Detected intents: {intents1}")
    print(f"Tools loaded: {len(get_relevant_tools(intents1))}")
    print(f"Prompt stats: {stats1['estimated_tokens']} tokens, {stats1['lines']} lines")
    
    # Example 2: Full ML pipeline
    query2 = "Train a model to predict earthquake magnitude"
    intents2 = detect_intent(query2)
    prompt2 = build_compact_system_prompt(user_query=query2)
    stats2 = get_prompt_stats(prompt2)
    
    print(f"\nπŸ€– Example 2: '{query2}'")
    print(f"Detected intents: {intents2}")
    print(f"Tools loaded: {len(get_relevant_tools(intents2))}")
    print(f"Prompt stats: {stats2['estimated_tokens']} tokens, {stats2['lines']} lines")
    
    # Example 3: Data profiling
    query3 = "Generate a detailed profiling report"
    intents3 = detect_intent(query3)
    prompt3 = build_compact_system_prompt(user_query=query3)
    stats3 = get_prompt_stats(prompt3)
    
    print(f"\nπŸ“ˆ Example 3: '{query3}'")
    print(f"Detected intents: {intents3}")
    print(f"Tools loaded: {len(get_relevant_tools(intents3))}")
    print(f"Prompt stats: {stats3['estimated_tokens']} tokens, {stats3['lines']} lines")
    
    print("\n" + "=" * 80)
    print("SUMMARY: Compact prompts reduce tokens by 80-90% for small context models!")
    print("=" * 80)