Nyha15 commited on
Commit
10f610b
Β·
1 Parent(s): 161ef7d

Removed LLM call

Browse files
Files changed (1) hide show
  1. app.py +31 -8
app.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- Data Analyst Duo MCP without OpenAI Integration
3
  """
4
 
5
  import os
@@ -7,11 +7,9 @@ import json
7
  import uuid
8
  import logging
9
  import datetime
10
- from io import StringIO
11
 
12
  import pandas as pd
13
  import numpy as np
14
- import requests
15
  import gradio as gr
16
 
17
  # β€”β€”β€” Logging setup β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
@@ -156,7 +154,7 @@ class InterpretAgent(MCPAgent):
156
  # rule-based tools
157
  self.register_tool(MCPTool("interpret_statistics", "", self._int_stats))
158
  self.register_tool(MCPTool("interpret_correlation", "", self._int_corr))
159
- # LLM stubs
160
  self.register_tool(MCPTool("llm_interpret", "LLM stub", self._llm_interpret))
161
  self.register_tool(MCPTool("llm_report", "LLM stub", self._llm_report))
162
 
@@ -173,11 +171,9 @@ class InterpretAgent(MCPAgent):
173
  return {"status": "success", "insights": ["Correlation computed"], "summary": "Rule-based corr"}
174
 
175
  def _llm_interpret(self, params):
176
- # stubbed out; no OpenAI
177
  return {"status": "skipped", "insights": [], "summary": "LLM removed"}
178
 
179
  def _llm_report(self, params):
180
- # stubbed out; no OpenAI
181
  return {"status": "skipped", "report_md": ""}
182
 
183
  def handle_message(self, m):
@@ -196,7 +192,7 @@ class InterpretAgent(MCPAgent):
196
  llm_res = self._llm_report({})
197
  self.send_message(m.sender, "report_result", llm_res)
198
 
199
- # β€”β€”β€” Orchestration (unchanged) β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
200
  class DataAnalystDuo:
201
  def __init__(self):
202
  self.C = ComputeAgent()
@@ -205,12 +201,39 @@ class DataAnalystDuo:
205
  self.I.connect(self.C)
206
 
207
  def run(self, url):
 
208
  self.I.send_message("ComputeAgent", "request_data_load", {"url": url})
209
  self.C.process(); self.I.process()
 
210
  self.I.send_message("ComputeAgent", "request_statistics", {})
211
  self.C.process(); self.I.process()
 
212
  self.I.send_message("ComputeAgent", "request_correlation", {})
213
  self.C.process(); self.I.process()
 
214
  self.C.send_message("InterpretAgent", "request_report", {"report_title": "Analysis Report"})
215
  self.I.process(); self.C.process()
216
- return self.C.get_history(), self.I.get_history()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ app.py β€” Data Analyst Duo MCP (no OpenAI) Gradio Space
3
  """
4
 
5
  import os
 
7
  import uuid
8
  import logging
9
  import datetime
 
10
 
11
  import pandas as pd
12
  import numpy as np
 
13
  import gradio as gr
14
 
15
  # β€”β€”β€” Logging setup β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
 
154
  # rule-based tools
155
  self.register_tool(MCPTool("interpret_statistics", "", self._int_stats))
156
  self.register_tool(MCPTool("interpret_correlation", "", self._int_corr))
157
+ # stubbed LLM tools
158
  self.register_tool(MCPTool("llm_interpret", "LLM stub", self._llm_interpret))
159
  self.register_tool(MCPTool("llm_report", "LLM stub", self._llm_report))
160
 
 
171
  return {"status": "success", "insights": ["Correlation computed"], "summary": "Rule-based corr"}
172
 
173
  def _llm_interpret(self, params):
 
174
  return {"status": "skipped", "insights": [], "summary": "LLM removed"}
175
 
176
  def _llm_report(self, params):
 
177
  return {"status": "skipped", "report_md": ""}
178
 
179
  def handle_message(self, m):
 
192
  llm_res = self._llm_report({})
193
  self.send_message(m.sender, "report_result", llm_res)
194
 
195
+ # β€”β€”β€” Orchestration β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
196
  class DataAnalystDuo:
197
  def __init__(self):
198
  self.C = ComputeAgent()
 
201
  self.I.connect(self.C)
202
 
203
  def run(self, url):
204
+ # 1) load data
205
  self.I.send_message("ComputeAgent", "request_data_load", {"url": url})
206
  self.C.process(); self.I.process()
207
+ # 2) stats
208
  self.I.send_message("ComputeAgent", "request_statistics", {})
209
  self.C.process(); self.I.process()
210
+ # 3) correlation
211
  self.I.send_message("ComputeAgent", "request_correlation", {})
212
  self.C.process(); self.I.process()
213
+ # 4) report
214
  self.C.send_message("InterpretAgent", "request_report", {"report_title": "Analysis Report"})
215
  self.I.process(); self.C.process()
216
+ return {
217
+ "compute_history": self.C.get_history(),
218
+ "interpret_history": self.I.get_history()
219
+ }
220
+
221
+ # β€”β€”β€” Gradio app entrypoint β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
222
+ def run_analysis(url: str):
223
+ duo = DataAnalystDuo()
224
+ return duo.run(url)
225
+
226
+ demo = gr.Interface(
227
+ fn=run_analysis,
228
+ inputs=gr.Textbox(label="CSV URL", placeholder="https://..."),
229
+ outputs=[
230
+ gr.JSON(label="Compute & Data-Load History"),
231
+ gr.JSON(label="Interpret & Report History")
232
+ ],
233
+ title="Data Analyst Duo",
234
+ description="Load a CSV URL and see compute + interpretation steps"
235
+ )
236
+
237
+ if __name__ == "__main__":
238
+ port = int(os.environ.get("PORT", 7860))
239
+ demo.launch(server_name="0.0.0.0", port=port)