Abid Ali Awan commited on
Commit
1365f1b
·
1 Parent(s): 4c376f5

feat: Add URL and Gradio file handling to tools and include sample heart.csv dataset.

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. README.md +35 -0
  3. app.py +89 -51
  4. upload_files/heart.csv +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.csv filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -59,3 +59,38 @@ You can now connect any MCP-compliant client (e.g., Claude Desktop) to this serv
59
  1. Upload a CSV file.
60
  2. Specify a target column.
61
  3. Receive a deployed API URL for the trained model.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  1. Upload a CSV file.
60
  2. Specify a target column.
61
  3. Receive a deployed API URL for the trained model.
62
+
63
+ ## Client Configuration
64
+
65
+ To enable your AI agent to upload local files to this server, you can add the following tool definition to your MCP client configuration (e.g., `claude_desktop_config.json`).
66
+
67
+ **Note**: This tool requires `uv` to be installed on your local machine.
68
+
69
+ ```json
70
+ {
71
+ "mcpServers": {
72
+ "auto-deployer": {
73
+ "command": "uvx",
74
+ "args": [
75
+ "gradio",
76
+ "mcp",
77
+ "https://mcp-1st-birthday-auto-deployer.hf.space/gradio_api/mcp/"
78
+ ]
79
+ },
80
+ "upload_files_to_gradio": {
81
+ "command": "uvx",
82
+ "args": [
83
+ "--from",
84
+ "gradio[mcp]",
85
+ "gradio",
86
+ "upload-mcp",
87
+ "https://mcp-1st-birthday-auto-deployer.hf.space",
88
+ "./upload_files"
89
+ ]
90
+ }
91
+ }
92
+ }
93
+ ```
94
+
95
+ ### Tool Description
96
+ The `upload_files_to_gradio` tool uploads files from your local `<UPLOAD_DIRECTORY>` (or any of its subdirectories) to the Gradio app. This is needed because MCP servers require files to be provided as URLs. You can omit this tool if you prefer to upload files manually.
app.py CHANGED
@@ -1,9 +1,12 @@
1
- import gradio as gr
2
- import modal
3
  import json
4
  import subprocess
5
  import sys
6
  import os
 
 
 
 
 
7
 
8
  def check_modal_backend_running():
9
  """
@@ -18,12 +21,13 @@ def check_modal_backend_running():
18
  ["modal", "app", "list", "--json"],
19
  capture_output=True,
20
  text=True,
21
- timeout=30
22
  )
23
 
24
  if result.returncode == 0:
25
  # Parse the JSON output to check if mlops-backend is running
26
  import json
 
27
  apps = json.loads(result.stdout)
28
  for app in apps:
29
  if app.get("name") == "mlops-backend" and app.get("state") == "ready":
@@ -40,6 +44,7 @@ def check_modal_backend_running():
40
  print(f"Error checking Modal backend status: {e}")
41
  return False
42
 
 
43
  def deploy_modal_backend():
44
  """
45
  Deploy the Modal backend if it's not already running.
@@ -58,7 +63,7 @@ def deploy_modal_backend():
58
  ["modal", "deploy", "modal_backend.py"],
59
  capture_output=True,
60
  text=True,
61
- timeout=300 # 5 minutes timeout
62
  )
63
 
64
  if result.returncode == 0:
@@ -75,32 +80,59 @@ def deploy_modal_backend():
75
  print(f"❌ Error deploying Modal backend: {e}")
76
  return False
77
 
 
78
  # Initialize Modal function references
79
  f_analyze = modal.Function.from_name("mlops-backend", "analyze_data")
80
  f_train = modal.Function.from_name("mlops-backend", "train_model")
81
  f_check = modal.Function.from_name("mlops-backend", "check_model")
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  def analyze_data_tool(file_path: str) -> str:
84
  """
85
  Analyzes the uploaded CSV file and returns statistical metadata.
86
-
87
  Args:
88
  file_path: The path to the uploaded CSV file.
89
  Returns:
90
  str: JSON string with analysis results.
91
  """
92
- with open(file_path, "r") as f:
93
- content = f.read()
94
-
95
  # Call Modal backend
96
  result = f_analyze.remote(content)
97
-
98
  return json.dumps(result, indent=2)
99
 
100
- def train_model_tool(file_path: str, target_column: str, task_type: str = "classification") -> str:
 
 
 
101
  """
102
  Trains a model on the uploaded CSV file.
103
-
104
  Args:
105
  file_path: The path to the uploaded CSV file.
106
  target_column: The name of the column to predict.
@@ -108,22 +140,25 @@ def train_model_tool(file_path: str, target_column: str, task_type: str = "class
108
  Returns:
109
  str: JSON string with training results.
110
  """
111
- with open(file_path, "r") as f:
112
- content = f.read()
113
-
114
  # Call Modal backend
115
  result = f_train.remote(content, target_column, task_type)
116
-
117
- return json.dumps({
118
- "message": result['message'],
119
- "model_id": result['model_id'],
120
- "metric": f"{result['metric_name']}: {result['metric_value']:.4f}"
121
- }, indent=2)
 
 
 
 
122
 
123
  def deploy_model_tool(model_id: str) -> str:
124
  """
125
  Deploys a trained model and returns the API usage code.
126
-
127
  Args:
128
  model_id: The ID of the model to deploy.
129
  Returns:
@@ -131,12 +166,12 @@ def deploy_model_tool(model_id: str) -> str:
131
  """
132
  # Verify model exists
133
  check = f_check.remote(model_id)
134
- if not check['exists']:
135
  return f"Error: Model {model_id} not found."
136
 
137
  # Construct the API usage example
138
- api_url = "https://abidali899--mlops-backend-predict-api.modal.run"
139
-
140
  usage_code = f"""
141
  import requests
142
 
@@ -148,7 +183,7 @@ payload = {{
148
  response = requests.post(url, json=payload)
149
  print(response.json())
150
  """
151
-
152
  curl_code = f"""
153
  curl -X POST {api_url} \\
154
  -H "Content-Type: application/json" \\
@@ -157,13 +192,16 @@ curl -X POST {api_url} \\
157
  "data": {{ "col1": "val1", "col2": "val2" }}
158
  }}'
159
  """
160
-
161
  return f"Model {model_id} is live!\n\nEndpoint: {api_url}\n\n### Usage (Python):\n```python\n{usage_code}\n```\n\n### Usage (cURL):\n```bash\n{curl_code}\n```"
162
 
163
- def auto_deploy_tool(file_path: str, target_column: str, task_type: str = "classification") -> str:
 
 
 
164
  """
165
  Full Pipeline: Analyzes data, trains model, evaluates, and deploys it in one go.
166
-
167
  Args:
168
  file_path: The path to the uploaded CSV file.
169
  target_column: The name of the column to predict.
@@ -171,21 +209,20 @@ def auto_deploy_tool(file_path: str, target_column: str, task_type: str = "class
171
  Returns:
172
  str: A detailed report of the pipeline execution.
173
  """
174
- with open(file_path, "r") as f:
175
- content = f.read()
176
-
177
  # 1. Analyze
178
  analysis = f_analyze.remote(content)
179
-
180
  # 2. Train & Evaluate
181
  train_result = f_train.remote(content, target_column, task_type)
182
- model_id = train_result['model_id']
183
- metric_val = train_result['metric_value']
184
- metric_name = train_result['metric_name']
185
-
186
  # 3. Deploy (Construct Info)
187
  api_url = "https://abidali899--mlops-backend-predict-api.modal.run"
188
-
189
  usage_code = f"""
190
  import requests
191
 
@@ -206,12 +243,12 @@ curl -X POST {api_url} \\
206
  "data": {{ "col1": "val1", "col2": "val2" }}
207
  }}'
208
  """
209
-
210
  report = f"""# Auto-Deployment Report
211
 
212
  ## 1. Data Analysis
213
- - **Shape**: {analysis['shape']}
214
- - **Columns**: {', '.join(analysis['columns'])}
215
 
216
  ## 2. Model Training
217
  - **Task**: {task_type}
@@ -233,6 +270,7 @@ The model is live at: `{api_url}`
233
  """
234
  return report
235
 
 
236
  with gr.Blocks() as demo:
237
  gr.Markdown("# Auto-Deployer MCP Server")
238
  gr.Markdown("This server exposes the following tools to MCP clients:")
@@ -240,15 +278,9 @@ with gr.Blocks() as demo:
240
  gr.Markdown("- `train_model_tool`")
241
  gr.Markdown("- `deploy_model_tool`")
242
  gr.Markdown("- `auto_deploy_tool`")
243
-
244
- # # Register tools using gr.api
245
- # gr.api(analyze_data_tool)
246
- # gr.api(train_model_tool)
247
- # gr.api(deploy_model_tool)
248
- # gr.api(auto_deploy_tool)
249
 
250
  gr.Markdown("## Manual Testing Interface")
251
-
252
  with gr.Tab("Analyze"):
253
  an_file = gr.File(label="CSV File")
254
  an_btn = gr.Button("Analyze Data")
@@ -258,7 +290,11 @@ with gr.Blocks() as demo:
258
  with gr.Tab("Train"):
259
  t_file = gr.File(label="CSV File")
260
  t_col = gr.Textbox(label="Target Column")
261
- t_type = gr.Dropdown(["classification", "regression", "time_series"], label="Task Type", value="classification")
 
 
 
 
262
  t_btn = gr.Button("Train")
263
  t_out = gr.JSON(label="Output")
264
  t_btn.click(train_model_tool, [t_file, t_col, t_type], t_out)
@@ -272,7 +308,11 @@ with gr.Blocks() as demo:
272
  with gr.Tab("Auto Deploy"):
273
  a_file = gr.File(label="CSV File")
274
  a_col = gr.Textbox(label="Target Column")
275
- a_type = gr.Dropdown(["classification", "regression", "time_series"], label="Task Type", value="classification")
 
 
 
 
276
  a_btn = gr.Button("Auto Deploy")
277
  a_out = gr.Markdown(label="Output")
278
  a_btn.click(auto_deploy_tool, [a_file, a_col, a_type], a_out)
@@ -288,5 +328,3 @@ if __name__ == "__main__":
288
  sys.exit(1)
289
 
290
  demo.launch(mcp_server=True)
291
-
292
-
 
 
 
1
  import json
2
  import subprocess
3
  import sys
4
  import os
5
+ import requests
6
+
7
+ import gradio as gr
8
+ import modal
9
+
10
 
11
  def check_modal_backend_running():
12
  """
 
21
  ["modal", "app", "list", "--json"],
22
  capture_output=True,
23
  text=True,
24
+ timeout=30,
25
  )
26
 
27
  if result.returncode == 0:
28
  # Parse the JSON output to check if mlops-backend is running
29
  import json
30
+
31
  apps = json.loads(result.stdout)
32
  for app in apps:
33
  if app.get("name") == "mlops-backend" and app.get("state") == "ready":
 
44
  print(f"Error checking Modal backend status: {e}")
45
  return False
46
 
47
+
48
  def deploy_modal_backend():
49
  """
50
  Deploy the Modal backend if it's not already running.
 
63
  ["modal", "deploy", "modal_backend.py"],
64
  capture_output=True,
65
  text=True,
66
+ timeout=300, # 5 minutes timeout
67
  )
68
 
69
  if result.returncode == 0:
 
80
  print(f"❌ Error deploying Modal backend: {e}")
81
  return False
82
 
83
+
84
  # Initialize Modal function references
85
  f_analyze = modal.Function.from_name("mlops-backend", "analyze_data")
86
  f_train = modal.Function.from_name("mlops-backend", "train_model")
87
  f_check = modal.Function.from_name("mlops-backend", "check_model")
88
 
89
+
90
+ def get_file_content(file_path: str) -> str:
91
+ """
92
+ Retrieves content from a file path or URL.
93
+ Handles Gradio file URLs by extracting the local path if possible.
94
+ """
95
+ if file_path.startswith("http://") or file_path.startswith("https://"):
96
+ # Try to extract local path from Gradio file URL
97
+ if "/file=" in file_path:
98
+ local_path = file_path.split("/file=")[1]
99
+ if os.path.exists(local_path):
100
+ with open(local_path, "r") as f:
101
+ return f.read()
102
+
103
+ # Download from URL
104
+ print(f"Downloading file from: {file_path}")
105
+ response = requests.get(file_path)
106
+ response.raise_for_status()
107
+ return response.text
108
+
109
+ with open(file_path, "r") as f:
110
+ return f.read()
111
+
112
+
113
  def analyze_data_tool(file_path: str) -> str:
114
  """
115
  Analyzes the uploaded CSV file and returns statistical metadata.
116
+
117
  Args:
118
  file_path: The path to the uploaded CSV file.
119
  Returns:
120
  str: JSON string with analysis results.
121
  """
122
+ content = get_file_content(file_path)
123
+
 
124
  # Call Modal backend
125
  result = f_analyze.remote(content)
126
+
127
  return json.dumps(result, indent=2)
128
 
129
+
130
+ def train_model_tool(
131
+ file_path: str, target_column: str, task_type: str = "classification"
132
+ ) -> str:
133
  """
134
  Trains a model on the uploaded CSV file.
135
+
136
  Args:
137
  file_path: The path to the uploaded CSV file.
138
  target_column: The name of the column to predict.
 
140
  Returns:
141
  str: JSON string with training results.
142
  """
143
+ content = get_file_content(file_path)
144
+
 
145
  # Call Modal backend
146
  result = f_train.remote(content, target_column, task_type)
147
+
148
+ return json.dumps(
149
+ {
150
+ "message": result["message"],
151
+ "model_id": result["model_id"],
152
+ "metric": f"{result['metric_name']}: {result['metric_value']:.4f}",
153
+ },
154
+ indent=2,
155
+ )
156
+
157
 
158
  def deploy_model_tool(model_id: str) -> str:
159
  """
160
  Deploys a trained model and returns the API usage code.
161
+
162
  Args:
163
  model_id: The ID of the model to deploy.
164
  Returns:
 
166
  """
167
  # Verify model exists
168
  check = f_check.remote(model_id)
169
+ if not check["exists"]:
170
  return f"Error: Model {model_id} not found."
171
 
172
  # Construct the API usage example
173
+ api_url = "https://abidali899--mlops-backend-predict-api.modal.run"
174
+
175
  usage_code = f"""
176
  import requests
177
 
 
183
  response = requests.post(url, json=payload)
184
  print(response.json())
185
  """
186
+
187
  curl_code = f"""
188
  curl -X POST {api_url} \\
189
  -H "Content-Type: application/json" \\
 
192
  "data": {{ "col1": "val1", "col2": "val2" }}
193
  }}'
194
  """
195
+
196
  return f"Model {model_id} is live!\n\nEndpoint: {api_url}\n\n### Usage (Python):\n```python\n{usage_code}\n```\n\n### Usage (cURL):\n```bash\n{curl_code}\n```"
197
 
198
+
199
+ def auto_deploy_tool(
200
+ file_path: str, target_column: str, task_type: str = "classification"
201
+ ) -> str:
202
  """
203
  Full Pipeline: Analyzes data, trains model, evaluates, and deploys it in one go.
204
+
205
  Args:
206
  file_path: The path to the uploaded CSV file.
207
  target_column: The name of the column to predict.
 
209
  Returns:
210
  str: A detailed report of the pipeline execution.
211
  """
212
+ content = get_file_content(file_path)
213
+
 
214
  # 1. Analyze
215
  analysis = f_analyze.remote(content)
216
+
217
  # 2. Train & Evaluate
218
  train_result = f_train.remote(content, target_column, task_type)
219
+ model_id = train_result["model_id"]
220
+ metric_val = train_result["metric_value"]
221
+ metric_name = train_result["metric_name"]
222
+
223
  # 3. Deploy (Construct Info)
224
  api_url = "https://abidali899--mlops-backend-predict-api.modal.run"
225
+
226
  usage_code = f"""
227
  import requests
228
 
 
243
  "data": {{ "col1": "val1", "col2": "val2" }}
244
  }}'
245
  """
246
+
247
  report = f"""# Auto-Deployment Report
248
 
249
  ## 1. Data Analysis
250
+ - **Shape**: {analysis["shape"]}
251
+ - **Columns**: {", ".join(analysis["columns"])}
252
 
253
  ## 2. Model Training
254
  - **Task**: {task_type}
 
270
  """
271
  return report
272
 
273
+
274
  with gr.Blocks() as demo:
275
  gr.Markdown("# Auto-Deployer MCP Server")
276
  gr.Markdown("This server exposes the following tools to MCP clients:")
 
278
  gr.Markdown("- `train_model_tool`")
279
  gr.Markdown("- `deploy_model_tool`")
280
  gr.Markdown("- `auto_deploy_tool`")
 
 
 
 
 
 
281
 
282
  gr.Markdown("## Manual Testing Interface")
283
+
284
  with gr.Tab("Analyze"):
285
  an_file = gr.File(label="CSV File")
286
  an_btn = gr.Button("Analyze Data")
 
290
  with gr.Tab("Train"):
291
  t_file = gr.File(label="CSV File")
292
  t_col = gr.Textbox(label="Target Column")
293
+ t_type = gr.Dropdown(
294
+ ["classification", "regression", "time_series"],
295
+ label="Task Type",
296
+ value="classification",
297
+ )
298
  t_btn = gr.Button("Train")
299
  t_out = gr.JSON(label="Output")
300
  t_btn.click(train_model_tool, [t_file, t_col, t_type], t_out)
 
308
  with gr.Tab("Auto Deploy"):
309
  a_file = gr.File(label="CSV File")
310
  a_col = gr.Textbox(label="Target Column")
311
+ a_type = gr.Dropdown(
312
+ ["classification", "regression", "time_series"],
313
+ label="Task Type",
314
+ value="classification",
315
+ )
316
  a_btn = gr.Button("Auto Deploy")
317
  a_out = gr.Markdown(label="Output")
318
  a_btn.click(auto_deploy_tool, [a_file, a_col, a_type], a_out)
 
328
  sys.exit(1)
329
 
330
  demo.launch(mcp_server=True)
 
 
upload_files/heart.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:948420b084d8a3a0ca42b8419fce9aee175879e43f8aedf712377899a67aa49b
3
+ size 35921