pavanmutha commited on
Commit
b9c72b0
·
verified ·
1 Parent(s): d815800

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -55,7 +55,7 @@ agent = CodeAgent(
55
 
56
  def run_agent(_):
57
  if df_global is None:
58
- return "Please upload a file first."
59
 
60
  from tempfile import NamedTemporaryFile
61
  temp_file = NamedTemporaryFile(delete=False, suffix=".csv")
@@ -78,24 +78,15 @@ def run_agent(_):
78
  - File paths of the generated visualizations.
79
  """
80
 
81
- result = agent.run(
82
- prompt,
83
- additional_args={"source_file": temp_file.name}
84
- )
85
-
86
- # Extract image paths from output (assuming SmolAgent prints them)
87
- image_paths = []
88
- for line in result.splitlines():
89
- if line.strip().endswith(".png"):
90
- image_paths.append(line.strip())
91
 
 
92
  insights = "\n".join([line for line in result.splitlines() if not line.strip().endswith(".png")])
93
 
94
  return insights, image_paths
95
 
96
 
97
 
98
-
99
  def train_model(_):
100
  wandb.login(key=os.environ.get("WANDB_API_KEY"))
101
  #wandb_run = wandb.init(project="huggingface-data-analysis", name="Optuna_Run", reinit=True)
@@ -223,7 +214,6 @@ def explainability(_):
223
 
224
 
225
 
226
-
227
  with gr.Blocks() as demo:
228
  gr.Markdown("## 📊 AI-Powered Data Analysis with Hyperparameter Optimization")
229
 
@@ -247,7 +237,13 @@ with gr.Blocks() as demo:
247
  shap_img = gr.Image(label="SHAP Summary Plot")
248
  lime_img = gr.Image(label="LIME Explanation")
249
 
250
- agent_btn.click(fn=run_agent, inputs=df_output, outputs=insights_output)
 
 
 
 
 
 
251
  train_btn.click(fn=train_model, inputs=df_output, outputs=[metrics_output, trials_output])
252
  explain_btn.click(fn=explainability, inputs=df_output, outputs=[shap_img, lime_img])
253
 
 
55
 
56
  def run_agent(_):
57
  if df_global is None:
58
+ return "Please upload a file first.", []
59
 
60
  from tempfile import NamedTemporaryFile
61
  temp_file = NamedTemporaryFile(delete=False, suffix=".csv")
 
78
  - File paths of the generated visualizations.
79
  """
80
 
81
+ result = agent.run(prompt, additional_args={"source_file": temp_file.name})
 
 
 
 
 
 
 
 
 
82
 
83
+ image_paths = [line.strip() for line in result.splitlines() if line.strip().endswith(".png")]
84
  insights = "\n".join([line for line in result.splitlines() if not line.strip().endswith(".png")])
85
 
86
  return insights, image_paths
87
 
88
 
89
 
 
90
  def train_model(_):
91
  wandb.login(key=os.environ.get("WANDB_API_KEY"))
92
  #wandb_run = wandb.init(project="huggingface-data-analysis", name="Optuna_Run", reinit=True)
 
214
 
215
 
216
 
 
217
  with gr.Blocks() as demo:
218
  gr.Markdown("## 📊 AI-Powered Data Analysis with Hyperparameter Optimization")
219
 
 
237
  shap_img = gr.Image(label="SHAP Summary Plot")
238
  lime_img = gr.Image(label="LIME Explanation")
239
 
240
+ with gr.Row():
241
+ agent_btn = gr.Button("Run AI Agent (5 Insights + 5 Visualizations)")
242
+ insights_output = gr.Textbox(label="Insights from SmolAgent", lines=15)
243
+ visual_output = gr.Gallery(label="Generated Visualizations").style(grid=3, height="auto")
244
+
245
+ #agent_btn.click(fn=run_agent, inputs=df_output, outputs=insights_output)
246
+ agent_btn.click(fn=run_agent, inputs=df_output, outputs=[insights_output, visual_output])
247
  train_btn.click(fn=train_model, inputs=df_output, outputs=[metrics_output, trials_output])
248
  explain_btn.click(fn=explainability, inputs=df_output, outputs=[shap_img, lime_img])
249