Spaces:
Running
Running
File size: 12,391 Bytes
226ac39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 |
"""
Tools Registry for Groq Function Calling
Defines all available tools in Groq's function calling format.
"""
TOOLS = [
# Data Profiling Tools
{
"type": "function",
"function": {
"name": "profile_dataset",
"description": "Get comprehensive statistics about a dataset including shape, data types, memory usage, null counts, and unique values. Use this as the first step to understand any new dataset.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Absolute or relative path to the CSV or Parquet file"
}
},
"required": ["file_path"]
}
}
},
{
"type": "function",
"function": {
"name": "detect_data_quality_issues",
"description": "Detect data quality issues including outliers (using IQR method), duplicate rows, inconsistent formats, and data anomalies. Returns a prioritized list of issues with severity levels.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the dataset file"
}
},
"required": ["file_path"]
}
}
},
{
"type": "function",
"function": {
"name": "analyze_correlations",
"description": "Compute correlation matrix and identify top correlations. If a target column is specified, shows features most correlated with the target. Useful for feature selection and understanding relationships.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the dataset file"
},
"target": {
"type": "string",
"description": "Optional target column name to analyze correlations with"
}
},
"required": ["file_path"]
}
}
},
# Data Cleaning Tools
{
"type": "function",
"function": {
"name": "clean_missing_values",
"description": "Handle missing values using appropriate strategies based on column type. Strategies include median/mean for numeric, mode for categorical, forward_fill for time series, or drop. Will not impute ID columns.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the dataset file"
},
"strategy": {
"type": "object",
"description": "Dictionary mapping column names to strategies ('median', 'mean', 'mode', 'forward_fill', 'drop'). Use 'auto' to let the tool decide based on data type.",
"additionalProperties": {
"type": "string"
}
},
"output_path": {
"type": "string",
"description": "Path to save cleaned dataset"
}
},
"required": ["file_path", "strategy", "output_path"]
}
}
},
{
"type": "function",
"function": {
"name": "handle_outliers",
"description": "Detect and handle outliers in numeric columns using IQR method. Methods: 'clip' (cap at boundaries), 'winsorize' (cap at percentiles), or 'remove' (delete rows).",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the dataset file"
},
"method": {
"type": "string",
"enum": ["clip", "winsorize", "remove"],
"description": "Method to handle outliers"
},
"columns": {
"type": "array",
"items": {"type": "string"},
"description": "List of column names to check for outliers. Use 'all' to check all numeric columns."
},
"output_path": {
"type": "string",
"description": "Path to save cleaned dataset"
}
},
"required": ["file_path", "method", "columns", "output_path"]
}
}
},
{
"type": "function",
"function": {
"name": "fix_data_types",
"description": "Auto-detect and fix incorrect data types. Handles dates, booleans, categoricals, and numeric columns. Fixes common issues like 'null' strings and mixed types.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the dataset file"
},
"type_mapping": {
"type": "object",
"description": "Optional dictionary mapping column names to target types ('int', 'float', 'string', 'date', 'bool', 'category'). Use 'auto' for automatic detection.",
"additionalProperties": {
"type": "string"
}
},
"output_path": {
"type": "string",
"description": "Path to save dataset with fixed types"
}
},
"required": ["file_path", "output_path"]
}
}
},
# Feature Engineering Tools
{
"type": "function",
"function": {
"name": "create_time_features",
"description": "Extract comprehensive time-based features from datetime columns including year, month, day, day_of_week, quarter, is_weekend, and cyclical encodings (sin/cos for month and hour).",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the dataset file"
},
"date_col": {
"type": "string",
"description": "Name of the datetime column to extract features from"
},
"output_path": {
"type": "string",
"description": "Path to save dataset with new features"
}
},
"required": ["file_path", "date_col", "output_path"]
}
}
},
{
"type": "function",
"function": {
"name": "encode_categorical",
"description": "Encode categorical variables using one-hot encoding, target encoding, or frequency encoding. Handles high-cardinality columns intelligently.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the dataset file"
},
"method": {
"type": "string",
"enum": ["one_hot", "target", "frequency"],
"description": "Encoding method to use"
},
"columns": {
"type": "array",
"items": {"type": "string"},
"description": "List of categorical columns to encode. Use 'all' to encode all categorical columns."
},
"target_col": {
"type": "string",
"description": "Required for target encoding: name of the target column"
},
"output_path": {
"type": "string",
"description": "Path to save dataset with encoded features"
}
},
"required": ["file_path", "method", "columns", "output_path"]
}
}
},
# Model Training Tools
{
"type": "function",
"function": {
"name": "train_baseline_models",
"description": "Train multiple baseline models (Logistic Regression, Random Forest, XGBoost) and compare their performance. Automatically detects task type (classification/regression) and returns the best model with metrics.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to the prepared dataset file"
},
"target_col": {
"type": "string",
"description": "Name of the target column to predict"
},
"task_type": {
"type": "string",
"enum": ["classification", "regression", "auto"],
"description": "Type of ML task. Use 'auto' to detect automatically."
},
"test_size": {
"type": "number",
"description": "Proportion of data to use for testing (default: 0.2)"
},
"random_state": {
"type": "integer",
"description": "Random seed for reproducibility (default: 42)"
}
},
"required": ["file_path", "target_col"]
}
}
},
{
"type": "function",
"function": {
"name": "generate_model_report",
"description": "Generate comprehensive model evaluation report including metrics, confusion matrix (for classification), feature importance, and SHAP values for top features. Saves report as JSON.",
"parameters": {
"type": "object",
"properties": {
"model_path": {
"type": "string",
"description": "Path to saved model file (.pkl or .joblib)"
},
"test_data_path": {
"type": "string",
"description": "Path to test dataset file"
},
"target_col": {
"type": "string",
"description": "Name of the target column"
},
"output_path": {
"type": "string",
"description": "Path to save the report JSON file"
}
},
"required": ["model_path", "test_data_path", "target_col", "output_path"]
}
}
}
]
def get_tool_by_name(tool_name: str) -> dict:
"""
Get tool definition by name.
Args:
tool_name: Name of the tool
Returns:
Tool definition dictionary
Raises:
ValueError: If tool not found
"""
for tool in TOOLS:
if tool["function"]["name"] == tool_name:
return tool
raise ValueError(f"Tool '{tool_name}' not found in registry")
def get_all_tool_names() -> list:
"""
Get list of all tool names.
Returns:
List of tool names
"""
return [tool["function"]["name"] for tool in TOOLS]
|