Spaces:
Running
Running
Abid Ali Awan
feat: Display "Processing..." on buttons during execution and correct a column name in the README example.
6149566 | import json | |
| import os | |
| import subprocess | |
| import sys | |
| import gradio as gr | |
| import modal | |
| import requests | |
| # --- Backend Management --- | |
| def check_modal_backend_running(): | |
| """Check if the Modal backend is already running.""" | |
| try: | |
| result = subprocess.run( | |
| ["modal", "app", "list", "--json"], | |
| capture_output=True, | |
| text=True, | |
| timeout=30, | |
| ) | |
| if result.returncode == 0: | |
| apps = json.loads(result.stdout) | |
| for app in apps: | |
| if app.get("name") == "mlops-backend" and app.get("state") == "ready": | |
| return True | |
| return False | |
| except Exception as e: | |
| print(f"Error checking Modal backend: {e}") | |
| return False | |
| def deploy_modal_backend(): | |
| """Deploy the Modal backend if not running.""" | |
| if check_modal_backend_running(): | |
| print("β Modal backend is already running") | |
| return True | |
| print("π Deploying Modal backend...") | |
| try: | |
| result = subprocess.run( | |
| ["modal", "deploy", "modal_backend.py"], | |
| capture_output=True, | |
| text=True, | |
| timeout=300, | |
| ) | |
| if result.returncode == 0: | |
| print("β Modal backend deployed successfully") | |
| return True | |
| print(f"β Failed to deploy Modal backend: {result.stderr}") | |
| return False | |
| except Exception as e: | |
| print(f"β Error deploying Modal backend: {e}") | |
| return False | |
| # Initialize Modal function references | |
| f_analyze = modal.Function.from_name("mlops-backend", "analyze_data") | |
| f_train = modal.Function.from_name("mlops-backend", "train_model") | |
| f_check = modal.Function.from_name("mlops-backend", "check_model") | |
| # --- Core Logic --- | |
| def get_file_content(file_input) -> str: | |
| """ | |
| Retrieves content from a file path, URL, or Gradio file object. | |
| Handles all input types seamlessly for both web and MCP interfaces. | |
| Args: | |
| file_input: Can be: | |
| - String path/URL to file | |
| - Gradio file object with .name attribute | |
| - File object with path | |
| Returns: | |
| File content as string | |
| """ | |
| if not file_input: | |
| return "Error: No file provided." | |
| # Handle Gradio file objects | |
| if hasattr(file_input, 'name'): | |
| file_path = file_input.name | |
| print(f"DEBUG: Processing Gradio file: {file_path}") | |
| elif hasattr(file_input, 'path'): | |
| file_path = file_input.path | |
| print(f"DEBUG: Processing file with path: {file_path}") | |
| else: | |
| file_path = str(file_input) | |
| print(f"DEBUG: Processing file path/URL: {file_path}") | |
| # Handle URLs | |
| if file_path.startswith("http://") or file_path.startswith("https://"): | |
| # Try to extract local path from Gradio file URL to avoid self-download loops | |
| if "/file=" in file_path: | |
| local_path = file_path.split("/file=")[1] | |
| # URL decode the path | |
| import urllib.parse | |
| local_path = urllib.parse.unquote(local_path) | |
| print(f"DEBUG: Extracted local_path: {local_path}") | |
| if os.path.exists(local_path): | |
| print("DEBUG: Local file exists. Reading directly.") | |
| with open(local_path, "r") as f: | |
| return f.read() | |
| else: | |
| print(f"DEBUG: Local file does not exist at {local_path}") | |
| # Fallback: Download from URL | |
| print(f"Downloading file from: {file_path}") | |
| try: | |
| response = requests.get(file_path, timeout=30) | |
| response.raise_for_status() | |
| return response.text | |
| except Exception as e: | |
| return f"Error downloading file: {str(e)}" | |
| # Handle local file paths | |
| if os.path.exists(file_path): | |
| with open(file_path, "r") as f: | |
| return f.read() | |
| return f"Error: File not found at {file_path}" | |
| # --- Tool Definitions (MCP & Logic) --- | |
| def analyze_data_tool(file_path: str) -> str: | |
| """ | |
| π Analyze CSV Dataset | |
| Analyzes your CSV dataset and provides comprehensive statistical metadata including: | |
| - Dataset shape (rows, columns) | |
| - Column data types | |
| - Missing values analysis | |
| - Statistical summaries for numerical columns | |
| Args: | |
| file_path: URL or local path to your CSV file | |
| Returns: | |
| JSON formatted analysis report with dataset statistics | |
| """ | |
| content = get_file_content(file_path) | |
| if content.startswith("Error"): | |
| return content | |
| try: | |
| result = f_analyze.remote(content) | |
| return json.dumps(result, indent=2) | |
| except Exception as e: | |
| return f"Error in analysis: {str(e)}" | |
| def train_model_tool( | |
| file_path: str, target_column: str, task_type: str = "classification" | |
| ) -> str: | |
| """ | |
| π Train Machine Learning Model | |
| Trains a production-ready machine learning model on your CSV dataset. | |
| Args: | |
| file_path: URL or local path to your CSV file | |
| target_column: Name of the column you want to predict | |
| task_type: Type of machine learning task: | |
| - 'classification': Predict categories (default) | |
| - 'regression': Predict numerical values | |
| - 'time_series': Forecast time-based data | |
| Returns: | |
| JSON response containing: | |
| - Training status message | |
| - Unique model ID for deployment | |
| - Performance metrics (accuracy, F1 score, etc.) | |
| """ | |
| content = get_file_content(file_path) | |
| if content.startswith("Error"): | |
| return content | |
| try: | |
| result = f_train.remote(content, target_column, task_type) | |
| metrics_str = ", ".join([f"{k}: {v:.4f}" for k, v in result["metrics"].items()]) | |
| return json.dumps( | |
| { | |
| "message": result["message"], | |
| "model_id": result["model_id"], | |
| "metrics": metrics_str, | |
| }, | |
| indent=2, | |
| ) | |
| except Exception as e: | |
| return f"Error in training: {str(e)}" | |
| def deploy_model_tool(model_id: str) -> str: | |
| """ | |
| π Deploy Model to Production API | |
| Deploys your trained model to a live production API endpoint. | |
| Args: | |
| model_id: Unique identifier of the trained model (returned by training tool) | |
| Returns: | |
| Deployment information including: | |
| - Live API endpoint URL | |
| - Python code examples for making predictions | |
| - cURL command examples for API testing | |
| """ | |
| try: | |
| check = f_check.remote(model_id) | |
| if not check["exists"]: | |
| return f"Error: Model {model_id} not found." | |
| except Exception as e: | |
| return f"Error checking model: {str(e)}" | |
| api_url = "https://abidali899--mlops-backend-predict-api.modal.run" | |
| usage_code = f""" | |
| import requests | |
| url = "{api_url}" | |
| payload = {{ "model_id": "{model_id}", "data": {{ "col1": "val1" }} }} | |
| response = requests.post(url, json=payload) | |
| print(response.json())""" | |
| curl_code = f""" | |
| curl -X POST {api_url} \\ | |
| -H "Content-Type: application/json" \\ | |
| -d '{{ "model_id": "{model_id}", "data": {{ "col1": "val1" }} }}'""" | |
| return f"Model {model_id} is live!\n\nEndpoint: {api_url}\n\n### Usage (Python):\n```python\n{usage_code}\n```\n\n### Usage (cURL):\n```bash\n{curl_code}\n```" | |
| def auto_deploy_tool( | |
| file_path: str, target_column: str, task_type: str = "classification" | |
| ) -> str: | |
| """ | |
| β‘ Auto Deploy - Complete ML Pipeline | |
| One-click solution to analyze your data, train a model, and deploy to production. | |
| This end-to-end pipeline automatically: | |
| 1. Performs comprehensive dataset analysis and insights | |
| 2. Trains an optimized machine learning model | |
| 3. Deploys the model to a live production API | |
| 4. Provides complete deployment report with usage examples | |
| Args: | |
| file_path: URL or local path to your CSV file | |
| target_column: Name of the column you want to predict | |
| task_type: Type of machine learning task: | |
| - 'classification': Predict categories (default) | |
| - 'regression': Predict numerical values | |
| - 'time_series': Forecast time-based data | |
| Returns: | |
| Comprehensive deployment report including: | |
| - Detailed dataset analysis and insights | |
| - Data quality assessment and recommendations | |
| - Model performance metrics and evaluation | |
| - Live API endpoint and usage examples | |
| """ | |
| content = get_file_content(file_path) | |
| if content.startswith("Error"): | |
| return content | |
| try: | |
| # 1. Comprehensive Analysis | |
| analysis = f_analyze.remote(content) | |
| # Extract basic analysis information | |
| shape = analysis.get('shape', 'Unknown') | |
| columns = analysis.get('columns', []) | |
| # Build simple analysis section | |
| analysis_section = f"""### π Dataset Analysis | |
| - **Dataset Shape**: {shape} | |
| - **Total Columns**: {len(columns)} | |
| - **Target Column**: `{target_column}` | |
| - **Task Type**: {task_type.title()} | |
| """ | |
| # 2. Model Training | |
| train_result = f_train.remote(content, target_column, task_type) | |
| model_id = train_result["model_id"] | |
| # Build performance metrics section | |
| training_section = "### β Performance Metrics\n\n" | |
| metrics = train_result.get("metrics", {}) | |
| for metric_name, metric_value in metrics.items(): | |
| if isinstance(metric_value, (int, float)): | |
| if metric_name.lower() in ['accuracy', 'precision', 'recall', 'f1']: | |
| training_section += f"- **{metric_name.title()}**: {metric_value:.4f} ({metric_value*100:.2f}%)\n" | |
| else: | |
| training_section += f"- **{metric_name.title()}**: {metric_value:.4f}\n" | |
| # 3. Deployment Information | |
| deploy_info = deploy_model_tool(model_id) | |
| # 4. Final comprehensive report | |
| return f"""## π Auto-Deployment Complete Report | |
| {analysis_section} | |
| {training_section} | |
| ### π Deployment Information | |
| {deploy_info} | |
| --- | |
| *π Auto-generated by Auto-Deployer MCP Server*""" | |
| except Exception as e: | |
| return f"β Error in auto-deployment: {str(e)}" | |
| # --- UI Construction --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown( | |
| """ | |
| # π Auto-Deployer MCP Server | |
| >From CSV to Deployed ML API in 30 seconds. | |
| Connect your AI agent to this server to: | |
| * π **Analyze** datasets | |
| * π **Train** models on serverless CPUs (Modal) | |
| * π **Deploy** production-ready APIs | |
| """ | |
| ) | |
| # Manual Interface (Web) | |
| # Core tools work with both file uploads and URLs seamlessly | |
| with gr.Tab("Analyze"): | |
| an_file = gr.File(label="CSV File", file_types=[".csv"]) | |
| an_btn = gr.Button("Analyze Data") | |
| an_out = gr.JSON(label="Output") | |
| an_btn.click( | |
| fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=an_btn | |
| ).then( | |
| fn=analyze_data_tool, inputs=[an_file], outputs=an_out | |
| ).then( | |
| fn=lambda: gr.Button(value="Analyze Data", interactive=True), outputs=an_btn | |
| ) | |
| gr.Examples( | |
| examples=[[os.path.join("upload_files", "heart.csv")]], | |
| inputs=[an_file], | |
| label="Example Dataset" | |
| ) | |
| with gr.Tab("Train"): | |
| t_file = gr.File(label="CSV File", file_types=[".csv"]) | |
| t_col = gr.Textbox(label="Target Column") | |
| t_type = gr.Dropdown( | |
| ["classification", "regression", "time_series"], | |
| label="Task Type", | |
| value="classification", | |
| ) | |
| t_btn = gr.Button("Train") | |
| t_out = gr.JSON(label="Output") | |
| t_btn.click( | |
| fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=t_btn | |
| ).then( | |
| fn=train_model_tool, inputs=[t_file, t_col, t_type], outputs=t_out | |
| ).then( | |
| fn=lambda: gr.Button(value="Train", interactive=True), outputs=t_btn | |
| ) | |
| gr.Examples( | |
| examples=[[os.path.join("upload_files", "heart.csv"), "HeartDisease", "classification"]], | |
| inputs=[t_file, t_col, t_type], | |
| label="Example Training Config" | |
| ) | |
| with gr.Tab("Deploy"): | |
| d_id = gr.Textbox(label="Model ID") | |
| d_btn = gr.Button("Deploy") | |
| d_out = gr.Markdown(label="Output") | |
| d_btn.click( | |
| fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=d_btn | |
| ).then( | |
| fn=deploy_model_tool, inputs=[d_id], outputs=d_out | |
| ).then( | |
| fn=lambda: gr.Button(value="Deploy", interactive=True), outputs=d_btn | |
| ) | |
| gr.Examples( | |
| examples=[["model_1764524701"]], | |
| inputs=[d_id], | |
| label="Example Model ID" | |
| ) | |
| with gr.Tab("Auto Deploy"): | |
| a_file = gr.File(label="CSV File", file_types=[".csv"]) | |
| a_col = gr.Textbox(label="Target Column") | |
| a_type = gr.Dropdown( | |
| ["classification", "regression", "time_series"], | |
| label="Task Type", | |
| value="classification", | |
| ) | |
| a_btn = gr.Button("Auto Deploy") | |
| a_out = gr.Markdown(label="Output") | |
| a_btn.click( | |
| fn=lambda: gr.Button(value="Processing...", interactive=False), outputs=a_btn | |
| ).then( | |
| fn=auto_deploy_tool, inputs=[a_file, a_col, a_type], outputs=a_out | |
| ).then( | |
| fn=lambda: gr.Button(value="Auto Deploy", interactive=True), outputs=a_btn | |
| ) | |
| gr.Examples( | |
| examples=[[os.path.join("upload_files", "housing.csv"), "price", "regression"]], | |
| inputs=[a_file, a_col, a_type], | |
| label="Example Auto-Deploy Config" | |
| ) | |
| # MCP tools are automatically generated from the visible interface above | |
| # The core tools handle both file uploads and URL inputs seamlessly | |
| if __name__ == "__main__": | |
| print("π Checking Modal backend status...") | |
| if deploy_modal_backend(): | |
| print("π Backend is ready. Starting MCP server...") | |
| demo.launch(mcp_server=True) | |
| else: | |
| print("β Failed to deploy backend.") | |
| sys.exit(1) | |