Pulastya B commited on
Commit
1470b93
·
1 Parent(s): e82da77

Fix response template with strict format, fix data file downloads with proper links, fix plot URL formatting

Browse files
FRRONTEEEND/components/ChatInterface.tsx CHANGED
@@ -746,20 +746,29 @@ export const ChatInterface: React.FC<{ onBack: () => void }> = ({ onBack }) => {
746
  <h4 className="text-xs font-bold uppercase tracking-wider text-white/60">Visualizations ({allPlots.length})</h4>
747
  </div>
748
  <div className="space-y-2">
749
- {allPlots.map((plot, idx) => (
750
- <button
751
- key={idx}
752
- onClick={() => setReportModalUrl(plot.url)}
753
- className="w-full p-3 rounded-lg bg-white/5 border border-white/10 hover:bg-emerald-500/10 hover:border-emerald-500/30 transition-all text-left group"
754
- >
755
- <div className="flex items-center justify-between">
756
- <span className="text-sm text-white/80 truncate flex-1">{plot.title}</span>
757
- <ChevronRight className="w-4 h-4 text-white/40 group-hover:text-emerald-400 transition-all" />
758
- </div>
759
- <span className="text-xs text-white/40 mt-1 block">{plot.type || 'interactive'}</span>
760
- </button>
761
- ))}
762
- </div>
 
 
 
 
 
 
 
 
 
763
  </div>
764
  )}
765
 
@@ -774,21 +783,27 @@ export const ChatInterface: React.FC<{ onBack: () => void }> = ({ onBack }) => {
774
  {uniqueDataFiles.map((file, idx) => {
775
  // Extract filename from path
776
  const fileName = file.split('/').pop() || file;
777
- // Create download URL
778
- const downloadUrl = file.startsWith('/') ? file : `/${file}`;
 
 
 
 
 
779
 
780
  return (
781
- <button
782
  key={idx}
783
- onClick={() => window.open(downloadUrl, '_blank')}
784
- className="w-full p-3 rounded-lg bg-white/5 border border-white/10 hover:bg-blue-500/10 hover:border-blue-500/30 transition-all text-left group"
 
785
  >
786
  <div className="flex items-center justify-between">
787
  <span className="text-sm text-white/80 truncate flex-1">{fileName}</span>
788
  <ChevronRight className="w-4 h-4 text-white/40 group-hover:text-blue-400 transition-all" />
789
  </div>
790
  <span className="text-xs text-white/40 mt-1 block">Click to download</span>
791
- </button>
792
  );
793
  })}
794
  </div>
 
746
  <h4 className="text-xs font-bold uppercase tracking-wider text-white/60">Visualizations ({allPlots.length})</h4>
747
  </div>
748
  <div className="space-y-2">
749
+ {allPlots.map((plot, idx) => {
750
+ // Ensure URL is properly formatted
751
+ let plotUrl = plot.url;
752
+ if (plotUrl && plotUrl.startsWith('./outputs/')) {
753
+ plotUrl = plotUrl.replace('./outputs/', '/outputs/');
754
+ } else if (plotUrl && !plotUrl.startsWith('/outputs/')) {
755
+ plotUrl = `/outputs/${plotUrl.replace(/^outputs\//, '')}`;
756
+ }
757
+
758
+ return (
759
+ <button
760
+ key={idx}
761
+ onClick={() => setReportModalUrl(plotUrl || plot.url)}
762
+ className="w-full p-3 rounded-lg bg-white/5 border border-white/10 hover:bg-emerald-500/10 hover:border-emerald-500/30 transition-all text-left group"
763
+ >
764
+ <div className="flex items-center justify-between">
765
+ <span className="text-sm text-white/80 truncate flex-1">{plot.title}</span>
766
+ <ChevronRight className="w-4 h-4 text-white/40 group-hover:text-emerald-400 transition-all" />
767
+ </div>
768
+ <span className="text-xs text-white/40 mt-1 block">{plot.type || 'interactive'}</span>
769
+ </button>
770
+ );
771
+ })}\n </div>
772
  </div>
773
  )}
774
 
 
783
  {uniqueDataFiles.map((file, idx) => {
784
  // Extract filename from path
785
  const fileName = file.split('/').pop() || file;
786
+ // Create proper download URL
787
+ let downloadUrl = file;
788
+ if (downloadUrl.startsWith('./outputs/')) {
789
+ downloadUrl = downloadUrl.replace('./outputs/', '/outputs/');
790
+ } else if (!downloadUrl.startsWith('/outputs/')) {
791
+ downloadUrl = `/outputs/${file.replace(/^outputs\//, '')}`;
792
+ }
793
 
794
  return (
795
+ <a
796
  key={idx}
797
+ href={downloadUrl}
798
+ download={fileName}
799
+ className="block w-full p-3 rounded-lg bg-white/5 border border-white/10 hover:bg-blue-500/10 hover:border-blue-500/30 transition-all group"
800
  >
801
  <div className="flex items-center justify-between">
802
  <span className="text-sm text-white/80 truncate flex-1">{fileName}</span>
803
  <ChevronRight className="w-4 h-4 text-white/40 group-hover:text-blue-400 transition-all" />
804
  </div>
805
  <span className="text-xs text-white/40 mt-1 block">Click to download</span>
806
+ </a>
807
  );
808
  })}
809
  </div>
src/orchestrator.py CHANGED
@@ -1099,16 +1099,13 @@ You are a DOER. Complete workflows based on user intent."""
1099
  "url": f"/outputs/{nested_result['output_path'].replace('./outputs/', '')}"
1100
  })
1101
 
1102
- # Build enhanced text summary - start with metrics then LLM explanation
1103
- summary_lines = [
1104
- f"## 📊 Analysis Complete",
1105
- ""
1106
- ]
1107
 
1108
- # Show all baseline models comparison FIRST (before LLM summary)
1109
  if "all_models" in metrics and metrics["all_models"]:
1110
  summary_lines.extend([
1111
- "### 🔬 Baseline Models Comparison",
1112
  ""
1113
  ])
1114
 
@@ -1124,49 +1121,70 @@ You are a DOER. Complete workflows based on user intent."""
1124
  rmse = model_metrics.get("rmse", 0)
1125
  mae = model_metrics.get("mae", 0)
1126
 
1127
- # Highlight the best model with emoji
1128
  is_best = (
1129
  "best_model" in metrics and
1130
  metrics["best_model"].get("name", "") == model_name
1131
  )
1132
- prefix = "🏆 " if is_best else " "
1133
 
1134
  summary_lines.append(
1135
  f"{prefix}**{model_name.replace('_', ' ').title()}**: "
1136
- f"R²={r2:.4f}, RMSE={rmse:.4f}, MAE={mae:.4f}"
1137
  )
1138
 
1139
- summary_lines.append("")
1140
 
1141
- # Show tuned model separately if hyperparameter tuning was done
1142
  if "tuned_model" in metrics:
1143
  tuned = metrics["tuned_model"]
1144
  summary_lines.extend([
1145
- "### ⚙️ Hyperparameter Tuning Results",
1146
- f"- **Model Type**: {tuned.get('model_type', 'N/A')}",
1147
- f"- **Optimized Score**: {tuned.get('best_score', 0):.4f}",
 
 
1148
  ""
1149
  ])
1150
 
 
1151
  if "cross_validation" in metrics:
1152
  cv = metrics["cross_validation"]
1153
  summary_lines.extend([
1154
- "### ✅ Cross-Validation Results",
1155
- f"- **Mean Score**: {cv['mean_score']:.4f} (± {cv['std_score']:.4f})",
1156
- ""
1157
- ])
1158
-
1159
- # Add LLM's explanation after metrics
1160
- if llm_summary and llm_summary.strip():
1161
- summary_lines.extend([
1162
- "---",
1163
  "",
1164
- "### 📝 Analysis Summary",
1165
  "",
1166
- llm_summary,
1167
  ""
1168
  ])
1169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170
  # Add artifact links
1171
  if artifacts["models"]:
1172
  summary_lines.append("### �� Trained Models")
 
1099
  "url": f"/outputs/{nested_result['output_path'].replace('./outputs/', '')}"
1100
  })
1101
 
1102
+ # Build STRICT response template to prevent malformed output
1103
+ summary_lines = []
 
 
 
1104
 
1105
+ # SECTION 1: Model Performance (if models were trained)
1106
  if "all_models" in metrics and metrics["all_models"]:
1107
  summary_lines.extend([
1108
+ "## 🎯 Model Performance",
1109
  ""
1110
  ])
1111
 
 
1121
  rmse = model_metrics.get("rmse", 0)
1122
  mae = model_metrics.get("mae", 0)
1123
 
1124
+ # Highlight the best model
1125
  is_best = (
1126
  "best_model" in metrics and
1127
  metrics["best_model"].get("name", "") == model_name
1128
  )
1129
+ prefix = "🏆 " if is_best else "📊 "
1130
 
1131
  summary_lines.append(
1132
  f"{prefix}**{model_name.replace('_', ' ').title()}**: "
1133
+ f"R²={r2:.4f} | RMSE={rmse:.4f} | MAE={mae:.4f}"
1134
  )
1135
 
1136
+ summary_lines.extend(["", ""])
1137
 
1138
+ # SECTION 2: Tuning Results (if hyperparameter tuning was done)
1139
  if "tuned_model" in metrics:
1140
  tuned = metrics["tuned_model"]
1141
  summary_lines.extend([
1142
+ "## ⚙️ Hyperparameter Tuning",
1143
+ "",
1144
+ f"**Model**: {tuned.get('model_type', 'Unknown').title()}",
1145
+ f"**Optimized Score**: {tuned.get('best_score', 0):.4f}",
1146
+ "",
1147
  ""
1148
  ])
1149
 
1150
+ # SECTION 3: Cross-Validation (if performed)
1151
  if "cross_validation" in metrics:
1152
  cv = metrics["cross_validation"]
1153
  summary_lines.extend([
1154
+ "## ✅ Cross-Validation",
 
 
 
 
 
 
 
 
1155
  "",
1156
+ f"**Mean Score**: {cv['mean_score']:.4f} ± {cv['std_score']:.4f}",
1157
  "",
 
1158
  ""
1159
  ])
1160
 
1161
+ # SECTION 4: Analysis Summary (LLM explanation - cleaned)
1162
+ if llm_summary and llm_summary.strip():
1163
+ # Clean LLM summary aggressively
1164
+ cleaned_summary = llm_summary
1165
+ # Remove all file path patterns
1166
+ import re
1167
+ cleaned_summary = re.sub(r'\./outputs/[^\s\)\]]+', '', cleaned_summary)
1168
+ cleaned_summary = re.sub(r'/outputs/[^\s\)\]]+', '', cleaned_summary)
1169
+ cleaned_summary = re.sub(r'`[^`]*\.(csv|pkl|html|png|json)[^`]*`', '', cleaned_summary)
1170
+ cleaned_summary = re.sub(r'\([^\)]*\.(csv|pkl|html|png|json)[^\)]*\)', '', cleaned_summary)
1171
+ cleaned_summary = re.sub(r'Printed in logs.*?\)', '', cleaned_summary, flags=re.IGNORECASE)
1172
+ cleaned_summary = re.sub(r'\(see above\)', '', cleaned_summary, flags=re.IGNORECASE)
1173
+ cleaned_summary = re.sub(r'see above', '', cleaned_summary, flags=re.IGNORECASE)
1174
+ # Remove broken tables
1175
+ cleaned_summary = re.sub(r'^\s*\|\s*\|\s*$', '', cleaned_summary, flags=re.MULTILINE)
1176
+ cleaned_summary = re.sub(r'^\s*[-|]+\s*$', '', cleaned_summary, flags=re.MULTILINE)
1177
+ cleaned_summary = re.sub(r'\n{3,}', '\n\n', cleaned_summary)
1178
+ cleaned_summary = cleaned_summary.strip()
1179
+
1180
+ if cleaned_summary:
1181
+ summary_lines.extend([
1182
+ "## 📝 Workflow Summary",
1183
+ "",
1184
+ cleaned_summary,
1185
+ ""
1186
+ ])
1187
+
1188
  # Add artifact links
1189
  if artifacts["models"]:
1190
  summary_lines.append("### �� Trained Models")