File size: 14,263 Bytes
e33dd31
9efb4fd
e7e9105
 
9efb4fd
1365f1b
 
9efb4fd
1365f1b
481c6c3
e7e9105
9efb4fd
e7e9105
481c6c3
e7e9105
 
 
9efb4fd
 
 
e7e9105
 
 
 
 
 
 
 
481c6c3
e7e9105
 
9efb4fd
e7e9105
481c6c3
e7e9105
 
 
 
 
 
 
9efb4fd
 
 
e7e9105
 
 
 
481c6c3
e7e9105
 
 
 
e33dd31
9efb4fd
7c67699
8c1d338
 
 
e33dd31
481c6c3
1365f1b
9efb4fd
44840a4
1365f1b
44840a4
 
481c6c3
44840a4
 
 
 
 
9efb4fd
44840a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1365f1b
481c6c3
1365f1b
 
481c6c3
 
 
9efb4fd
481c6c3
1365f1b
481c6c3
1365f1b
 
481c6c3
 
9efb4fd
481c6c3
1365f1b
481c6c3
 
 
 
 
 
9efb4fd
481c6c3
 
 
 
9efb4fd
481c6c3
1365f1b
9efb4fd
481c6c3
1365f1b
9efb4fd
8c1d338
9efb4fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1365f1b
9efb4fd
 
481c6c3
 
 
 
 
1365f1b
9efb4fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1365f1b
9efb4fd
 
481c6c3
 
ee6fc91
9efb4fd
 
 
 
 
 
 
 
481c6c3
 
e33dd31
9efb4fd
8c1d338
9efb4fd
 
 
 
 
 
 
 
 
 
 
 
 
 
481c6c3
 
 
 
 
 
7c67699
1365f1b
e33dd31
 
 
481c6c3
e33dd31
481c6c3
9efb4fd
8c1d338
 
 
481c6c3
1365f1b
8c1d338
e33dd31
9efb4fd
 
 
 
 
 
 
 
 
 
60101ec
9efb4fd
 
 
 
 
 
 
 
 
 
 
 
 
 
60101ec
 
 
9efb4fd
 
1365f1b
9efb4fd
 
 
481c6c3
60101ec
481c6c3
60101ec
1c8a171
60101ec
 
 
1c8a171
118e07a
60101ec
7bbf227
 
 
 
60101ec
 
 
481c6c3
 
9efb4fd
1c8a171
118e07a
60101ec
 
 
 
7bbf227
60101ec
7bbf227
60101ec
 
481c6c3
9efb4fd
60101ec
118e07a
60101ec
 
 
 
 
118e07a
60101ec
 
 
 
 
 
 
481c6c3
60101ec
1365f1b
9efb4fd
481c6c3
8c1d338
b364cf2
 
 
 
 
 
 
 
 
 
 
 
9efb4fd
481c6c3
44840a4
4c71974
8c1d338
9d5b628
8c1d338
 
0610802
6149566
0610802
 
 
6149566
0610802
0e1e6f4
 
 
 
 
 
8c1d338
 
9d5b628
8c1d338
9efb4fd
 
 
 
 
8c1d338
 
0610802
6149566
0610802
 
 
6149566
0610802
0e1e6f4
 
 
 
 
 
8c1d338
 
 
 
 
0610802
6149566
0610802
 
 
6149566
0610802
0e1e6f4
 
 
 
 
 
8c1d338
 
9d5b628
8c1d338
9efb4fd
 
 
 
 
8c1d338
 
0610802
6149566
0610802
 
 
6149566
0610802
0e1e6f4
 
5b76cf5
0e1e6f4
 
 
9efb4fd
44840a4
 
e33dd31
 
e7e9105
 
 
481c6c3
e7e9105
481c6c3
e7e9105
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
import json
import os
import subprocess
import sys

import gradio as gr
import modal
import requests

# --- Backend Management ---


def check_modal_backend_running():
    """Check if the Modal backend is already running."""
    try:
        result = subprocess.run(
            ["modal", "app", "list", "--json"],
            capture_output=True,
            text=True,
            timeout=30,
        )
        if result.returncode == 0:
            apps = json.loads(result.stdout)
            for app in apps:
                if app.get("name") == "mlops-backend" and app.get("state") == "ready":
                    return True
        return False
    except Exception as e:
        print(f"Error checking Modal backend: {e}")
        return False


def deploy_modal_backend():
    """Deploy the Modal backend if not running."""
    if check_modal_backend_running():
        print("βœ… Modal backend is already running")
        return True
    print("πŸš€ Deploying Modal backend...")
    try:
        result = subprocess.run(
            ["modal", "deploy", "modal_backend.py"],
            capture_output=True,
            text=True,
            timeout=300,
        )
        if result.returncode == 0:
            print("βœ… Modal backend deployed successfully")
            return True
        print(f"❌ Failed to deploy Modal backend: {result.stderr}")
        return False
    except Exception as e:
        print(f"❌ Error deploying Modal backend: {e}")
        return False


# Initialize Modal function references
f_analyze = modal.Function.from_name("mlops-backend", "analyze_data")
f_train = modal.Function.from_name("mlops-backend", "train_model")
f_check = modal.Function.from_name("mlops-backend", "check_model")

# --- Core Logic ---


def get_file_content(file_input) -> str:
    """
    Retrieves content from a file path, URL, or Gradio file object.
    Handles all input types seamlessly for both web and MCP interfaces.

    Args:
        file_input: Can be:
                   - String path/URL to file
                   - Gradio file object with .name attribute
                   - File object with path

    Returns:
        File content as string
    """
    if not file_input:
        return "Error: No file provided."

    # Handle Gradio file objects
    if hasattr(file_input, 'name'):
        file_path = file_input.name
        print(f"DEBUG: Processing Gradio file: {file_path}")
    elif hasattr(file_input, 'path'):
        file_path = file_input.path
        print(f"DEBUG: Processing file with path: {file_path}")
    else:
        file_path = str(file_input)
        print(f"DEBUG: Processing file path/URL: {file_path}")

    # Handle URLs
    if file_path.startswith("http://") or file_path.startswith("https://"):
        # Try to extract local path from Gradio file URL to avoid self-download loops
        if "/file=" in file_path:
            local_path = file_path.split("/file=")[1]
            # URL decode the path
            import urllib.parse
            local_path = urllib.parse.unquote(local_path)

            print(f"DEBUG: Extracted local_path: {local_path}")
            if os.path.exists(local_path):
                print("DEBUG: Local file exists. Reading directly.")
                with open(local_path, "r") as f:
                    return f.read()
            else:
                print(f"DEBUG: Local file does not exist at {local_path}")

        # Fallback: Download from URL
        print(f"Downloading file from: {file_path}")
        try:
            response = requests.get(file_path, timeout=30)
            response.raise_for_status()
            return response.text
        except Exception as e:
            return f"Error downloading file: {str(e)}"

    # Handle local file paths
    if os.path.exists(file_path):
        with open(file_path, "r") as f:
            return f.read()

    return f"Error: File not found at {file_path}"


# --- Tool Definitions (MCP & Logic) ---


def analyze_data_tool(file_path: str) -> str:
    """
    πŸ“Š Analyze CSV Dataset

    Analyzes your CSV dataset and provides comprehensive statistical metadata including:
    - Dataset shape (rows, columns)
    - Column data types
    - Missing values analysis
    - Statistical summaries for numerical columns

    Args:
        file_path: URL or local path to your CSV file

    Returns:
        JSON formatted analysis report with dataset statistics
    """
    content = get_file_content(file_path)
    if content.startswith("Error"):
        return content
    try:
        result = f_analyze.remote(content)
        return json.dumps(result, indent=2)
    except Exception as e:
        return f"Error in analysis: {str(e)}"


def train_model_tool(
    file_path: str, target_column: str, task_type: str = "classification"
) -> str:
    """
    🚈 Train Machine Learning Model

    Trains a production-ready machine learning model on your CSV dataset.

    Args:
        file_path: URL or local path to your CSV file
        target_column: Name of the column you want to predict
        task_type: Type of machine learning task:
                  - 'classification': Predict categories (default)
                  - 'regression': Predict numerical values
                  - 'time_series': Forecast time-based data

    Returns:
        JSON response containing:
        - Training status message
        - Unique model ID for deployment
        - Performance metrics (accuracy, F1 score, etc.)
    """
    content = get_file_content(file_path)
    if content.startswith("Error"):
        return content
    try:
        result = f_train.remote(content, target_column, task_type)
        metrics_str = ", ".join([f"{k}: {v:.4f}" for k, v in result["metrics"].items()])
        return json.dumps(
            {
                "message": result["message"],
                "model_id": result["model_id"],
                "metrics": metrics_str,
            },
            indent=2,
        )
    except Exception as e:
        return f"Error in training: {str(e)}"


def deploy_model_tool(model_id: str) -> str:
    """
    πŸš€ Deploy Model to Production API

    Deploys your trained model to a live production API endpoint.

    Args:
        model_id: Unique identifier of the trained model (returned by training tool)

    Returns:
        Deployment information including:
        - Live API endpoint URL
        - Python code examples for making predictions
        - cURL command examples for API testing
    """
    try:
        check = f_check.remote(model_id)
        if not check["exists"]:
            return f"Error: Model {model_id} not found."
    except Exception as e:
        return f"Error checking model: {str(e)}"

    api_url = "https://abidali899--mlops-backend-predict-api.modal.run"
    usage_code = f"""
import requests
url = "{api_url}"
payload = {{ "model_id": "{model_id}", "data": {{ "col1": "val1" }} }}
response = requests.post(url, json=payload)
print(response.json())"""

    curl_code = f"""
curl -X POST {api_url} \\
-H "Content-Type: application/json" \\
-d '{{ "model_id": "{model_id}", "data": {{ "col1": "val1" }} }}'"""

    return f"Model {model_id} is live!\n\nEndpoint: {api_url}\n\n### Usage (Python):\n```python\n{usage_code}\n```\n\n### Usage (cURL):\n```bash\n{curl_code}\n```"


def auto_deploy_tool(
    file_path: str, target_column: str, task_type: str = "classification"
) -> str:
    """
    ⚑ Auto Deploy - Complete ML Pipeline

    One-click solution to analyze your data, train a model, and deploy to production.

    This end-to-end pipeline automatically:
    1. Performs comprehensive dataset analysis and insights
    2. Trains an optimized machine learning model
    3. Deploys the model to a live production API
    4. Provides complete deployment report with usage examples

    Args:
        file_path: URL or local path to your CSV file
        target_column: Name of the column you want to predict
        task_type: Type of machine learning task:
                  - 'classification': Predict categories (default)
                  - 'regression': Predict numerical values
                  - 'time_series': Forecast time-based data

    Returns:
        Comprehensive deployment report including:
        - Detailed dataset analysis and insights
        - Data quality assessment and recommendations
        - Model performance metrics and evaluation
        - Live API endpoint and usage examples
    """
    content = get_file_content(file_path)
    if content.startswith("Error"):
        return content

    try:
        # 1. Comprehensive Analysis
        analysis = f_analyze.remote(content)

        # Extract basic analysis information
        shape = analysis.get('shape', 'Unknown')
        columns = analysis.get('columns', [])

        # Build simple analysis section
        analysis_section = f"""### πŸ“Š Dataset Analysis

- **Dataset Shape**: {shape}
- **Total Columns**: {len(columns)}
- **Target Column**: `{target_column}`
- **Task Type**: {task_type.title()}
"""

        # 2. Model Training
        train_result = f_train.remote(content, target_column, task_type)
        model_id = train_result["model_id"]

        # Build performance metrics section
        training_section = "### ⭐ Performance Metrics\n\n"
        metrics = train_result.get("metrics", {})
        for metric_name, metric_value in metrics.items():
            if isinstance(metric_value, (int, float)):
                if metric_name.lower() in ['accuracy', 'precision', 'recall', 'f1']:
                    training_section += f"- **{metric_name.title()}**: {metric_value:.4f} ({metric_value*100:.2f}%)\n"
                else:
                    training_section += f"- **{metric_name.title()}**: {metric_value:.4f}\n"

        # 3. Deployment Information
        deploy_info = deploy_model_tool(model_id)

        # 4. Final comprehensive report
        return f"""## πŸ“‡ Auto-Deployment Complete Report

{analysis_section}

{training_section}

### 🌐 Deployment Information

{deploy_info}

---

*πŸ“ˆ Auto-generated by Auto-Deployer MCP Server*"""

    except Exception as e:
        return f"❌ Error in auto-deployment: {str(e)}"


# --- UI Construction ---
with gr.Blocks() as demo:
    gr.Markdown(
        """
        # 🚈 Auto-Deployer MCP Server
        
        >From CSV to Deployed ML API in 30 seconds.
        
        Connect your AI agent to this server to:
        *   πŸ‘€ **Analyze** datasets
        *   🚈 **Train** models on serverless CPUs (Modal)
        *   πŸš€ **Deploy** production-ready APIs
        """
    )

    # Manual Interface (Web)
    # Core tools work with both file uploads and URLs seamlessly

    with gr.Tab("Analyze"):
        an_file = gr.File(label="CSV File", file_types=[".csv"])
        an_btn = gr.Button("Analyze Data")
        an_out = gr.JSON(label="Output")
        an_btn.click(
            fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=an_btn
        ).then(
            fn=analyze_data_tool, inputs=[an_file], outputs=an_out
        ).then(
            fn=lambda: gr.Button(value="Analyze Data", interactive=True), outputs=an_btn
        )
        
        gr.Examples(
            examples=[[os.path.join("upload_files", "heart.csv")]],
            inputs=[an_file],
            label="Example Dataset"
        )

    with gr.Tab("Train"):
        t_file = gr.File(label="CSV File", file_types=[".csv"])
        t_col = gr.Textbox(label="Target Column")
        t_type = gr.Dropdown(
            ["classification", "regression", "time_series"],
            label="Task Type",
            value="classification",
        )
        t_btn = gr.Button("Train")
        t_out = gr.JSON(label="Output")
        t_btn.click(
            fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=t_btn
        ).then(
            fn=train_model_tool, inputs=[t_file, t_col, t_type], outputs=t_out
        ).then(
            fn=lambda: gr.Button(value="Train", interactive=True), outputs=t_btn
        )
        
        gr.Examples(
            examples=[[os.path.join("upload_files", "heart.csv"), "HeartDisease", "classification"]],
            inputs=[t_file, t_col, t_type],
            label="Example Training Config"
        )

    with gr.Tab("Deploy"):
        d_id = gr.Textbox(label="Model ID")
        d_btn = gr.Button("Deploy")
        d_out = gr.Markdown(label="Output")
        d_btn.click(
            fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=d_btn
        ).then(
            fn=deploy_model_tool, inputs=[d_id], outputs=d_out
        ).then(
            fn=lambda: gr.Button(value="Deploy", interactive=True), outputs=d_btn
        )
        
        gr.Examples(
            examples=[["model_1764524701"]],
            inputs=[d_id],
            label="Example Model ID"
        )

    with gr.Tab("Auto Deploy"):
        a_file = gr.File(label="CSV File", file_types=[".csv"])
        a_col = gr.Textbox(label="Target Column")
        a_type = gr.Dropdown(
            ["classification", "regression", "time_series"],
            label="Task Type",
            value="classification",
        )
        a_btn = gr.Button("Auto Deploy")
        a_out = gr.Markdown(label="Output")
        a_btn.click(
            fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=a_btn
        ).then(
            fn=auto_deploy_tool, inputs=[a_file, a_col, a_type], outputs=a_out
        ).then(
            fn=lambda: gr.Button(value="Auto Deploy", interactive=True), outputs=a_btn
        )
        
        gr.Examples(
            examples=[[os.path.join("upload_files", "housing.csv"), "price", "regression"]],
            inputs=[a_file, a_col, a_type],
            label="Example Auto-Deploy Config"
        )

    # MCP tools are automatically generated from the visible interface above
    # The core tools handle both file uploads and URL inputs seamlessly

if __name__ == "__main__":
    print("πŸ” Checking Modal backend status...")
    if deploy_modal_backend():
        print("πŸŽ‰ Backend is ready. Starting MCP server...")
        demo.launch(mcp_server=True)
    else:
        print("❌ Failed to deploy backend.")
        sys.exit(1)