Inframat-x commited on
Commit
a269863
·
verified ·
1 Parent(s): ed83d97

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +186 -8
app.py CHANGED
@@ -6,6 +6,9 @@
6
  # - Stable categoricals ("NA"); no over-strict completeness gate
7
  # - Fixed [[PAGE=...]] regex
8
  # - NEW: Lightweight instrumentation (JSONL logs per RAG turn)
 
 
 
9
  # ================================================================
10
 
11
  # ---------------------- Runtime flags (HF-safe) ----------------------
@@ -17,7 +20,7 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
17
  # ------------------------------- Imports ------------------------------
18
  import re, joblib, warnings, json, traceback, time, uuid, subprocess, sys
19
  from pathlib import Path
20
- from typing import List, Dict, Any
21
 
22
  import numpy as np
23
  import pandas as pd
@@ -740,7 +743,7 @@ def rag_reply(
740
  "retrieval": {"hits": retr_list, "latency_ms_retriever": latency_ms_retriever},
741
  "output": {
742
  "final_answer": final,
743
- "used_sentences": [{"sent": s["sent"], "doc": s["doc"], "page": s["page"]} for s in selected]
744
  },
745
  "latency_ms_total": total_ms,
746
  "latency_ms_llm": llm_latency_ms,
@@ -818,7 +821,7 @@ input[type="checkbox"], .gr-checkbox, .gr-checkbox > * { pointer-events: auto !i
818
  color: #eef6ff !important;
819
  }
820
 
821
- /* NEW — Evaluate tab dark/high-contrast styling */
822
  #eval-tab .block, #eval-tab .group, #eval-tab .accordion {
823
  background: linear-gradient(165deg, #0a0f1f 0%, #0d1a31 60%, #0a1c2e 100%) !important;
824
  border-radius: 12px;
@@ -849,6 +852,142 @@ input[type="checkbox"], .gr-checkbox, .gr-checkbox > * { pointer-events: auto !i
849
 
850
  /* Predictor output emphasis */
851
  #pred-out .wrap { font-size: 20px; font-weight: 700; color: #ecfdf5; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
852
  """
853
 
854
  theme = gr.themes.Soft(
@@ -866,6 +1005,31 @@ theme = gr.themes.Soft(
866
  )
867
 
868
  with gr.Blocks(css=CSS, theme=theme, fill_height=True) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
869
  gr.Markdown(
870
  "<h1 style='margin:0'>Self-Sensing Concrete Assistant</h1>"
871
  "<p style='opacity:.9'>"
@@ -987,14 +1151,14 @@ with gr.Blocks(css=CSS, theme=theme, fill_height=True) as demo:
987
  gr.Markdown("Upload your **gold.csv** and compute metrics against the app logs.")
988
  with gr.Row():
989
  gold_file = gr.File(label="gold.csv", file_types=[".csv"], interactive=True)
990
- k_slider = gr.Slider(3, 12, value=8, step=1, label="k for Hit/Recall/nDCG")
991
  with gr.Row():
992
  btn_eval = gr.Button("Compute Metrics", variant="primary")
993
  with gr.Row():
994
- out_perq = gr.File(label="Per-question metrics (CSV)")
995
- out_agg = gr.File(label="Aggregate metrics (JSON)")
996
- out_json = gr.JSON(label="Aggregate summary")
997
- out_log = gr.Markdown(label="Run log")
998
 
999
  def _run_eval_inproc(gold_path: str, k: int = 8):
1000
  import json as _json
@@ -1041,3 +1205,17 @@ with gr.Blocks(css=CSS, theme=theme, fill_height=True) as demo:
1041
  # ------------- Launch -------------
1042
  if __name__ == "__main__":
1043
  demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  # - Stable categoricals ("NA"); no over-strict completeness gate
7
  # - Fixed [[PAGE=...]] regex
8
  # - NEW: Lightweight instrumentation (JSONL logs per RAG turn)
9
+ # - UPDATED THEME: Dark-blue tabs + Evaluate tab + k-slider styling
10
+ # - PATCH: Per-question/aggregate File + JSON outputs now dark-themed via elem_id hooks
11
+ # - OPTIONAL JS: Adds .eval-active class when Evaluate tab is selected
12
  # ================================================================
13
 
14
  # ---------------------- Runtime flags (HF-safe) ----------------------
 
20
  # ------------------------------- Imports ------------------------------
21
  import re, joblib, warnings, json, traceback, time, uuid, subprocess, sys
22
  from pathlib import Path
23
+ from typing import List, Dict, Any, Optional
24
 
25
  import numpy as np
26
  import pandas as pd
 
743
  "retrieval": {"hits": retr_list, "latency_ms_retriever": latency_ms_retriever},
744
  "output": {
745
  "final_answer": final,
746
+ "used_sentences": [{"sent": s['sent'], "doc": s['doc'], "page": s['page']} for s in selected]
747
  },
748
  "latency_ms_total": total_ms,
749
  "latency_ms_llm": llm_latency_ms,
 
821
  color: #eef6ff !important;
822
  }
823
 
824
+ /* Evaluate tab dark/high-contrast styling */
825
  #eval-tab .block, #eval-tab .group, #eval-tab .accordion {
826
  background: linear-gradient(165deg, #0a0f1f 0%, #0d1a31 60%, #0a1c2e 100%) !important;
827
  border-radius: 12px;
 
852
 
853
  /* Predictor output emphasis */
854
  #pred-out .wrap { font-size: 20px; font-weight: 700; color: #ecfdf5; }
855
+
856
+ /* Tab header: darker blue theme for all tabs */
857
+ .gradio-container .tab-nav button[role="tab"] {
858
+ background: #0b1b34 !important;
859
+ color: #cfe6ff !important;
860
+ border: 1px solid #1e3a8a !important;
861
+ }
862
+ .gradio-container .tab-nav button[role="tab"][aria-selected="true"] {
863
+ background: #0e2a57 !important;
864
+ color: #e0f2fe !important;
865
+ border-color: #3b82f6 !important;
866
+ }
867
+
868
+ /* Evaluate tab: enforce dark-blue text for labels/marks */
869
+ #eval-tab .label,
870
+ #eval-tab label,
871
+ #eval-tab .gr-slider .label,
872
+ #eval-tab .wrap .label,
873
+ #eval-tab .prose,
874
+ #eval-tab .markdown,
875
+ #eval-tab p,
876
+ #eval-tab span {
877
+ color: #cfe6ff !important; /* softer than pure white */
878
+ }
879
+
880
+ /* Target the specific k-slider label strongly */
881
+ #k-slider .label,
882
+ #k-slider label,
883
+ #k-slider .wrap .label {
884
+ color: #cfe6ff !important;
885
+ text-shadow: 0 1px 0 rgba(0,0,0,0.35);
886
+ }
887
+
888
+ /* Slider track/thumb (dark blue gradient + blue thumb) */
889
+ #eval-tab input[type="range"] {
890
+ accent-color: #3b82f6 !important; /* fallback */
891
+ }
892
+
893
+ /* WebKit */
894
+ #eval-tab input[type="range"]::-webkit-slider-runnable-track {
895
+ height: 6px;
896
+ background: linear-gradient(90deg, #0b3b68, #1e3a8a);
897
+ border-radius: 4px;
898
+ }
899
+ #eval-tab input[type="range"]::-webkit-slider-thumb {
900
+ -webkit-appearance: none;
901
+ appearance: none;
902
+ margin-top: -6px; /* centers thumb on 6px track */
903
+ width: 18px; height: 18px;
904
+ background: #1d4ed8;
905
+ border: 1px solid #60a5fa;
906
+ border-radius: 50%;
907
+ }
908
+
909
+ /* Firefox */
910
+ #eval-tab input[type="range"]::-moz-range-track {
911
+ height: 6px;
912
+ background: linear-gradient(90deg, #0b3b68, #1e3a8a);
913
+ border-radius: 4px;
914
+ }
915
+ #eval-tab input[type="range"]::-moz-range-thumb {
916
+ width: 18px; height: 18px;
917
+ background: #1d4ed8;
918
+ border: 1px solid #60a5fa;
919
+ border-radius: 50%;
920
+ }
921
+
922
+ /* ======== PATCH: Style the File + JSON outputs by ID ======== */
923
+ #perq-file, #agg-file {
924
+ background: rgba(8, 13, 26, 0.9) !important;
925
+ border: 1px solid #3b82f6 !important;
926
+ border-radius: 12px !important;
927
+ padding: 8px !important;
928
+ }
929
+ #perq-file * , #agg-file * { color: #dbeafe !important; }
930
+ #perq-file a, #agg-file a {
931
+ background: #0e2a57 !important;
932
+ color: #e0f2fe !important;
933
+ border: 1px solid #60a5fa !important;
934
+ border-radius: 8px !important;
935
+ padding: 6px 10px !important;
936
+ text-decoration: none !important;
937
+ }
938
+ #perq-file a:hover, #agg-file a:hover {
939
+ background: #10356f !important;
940
+ border-color: #93c5fd !important;
941
+ }
942
+ /* File preview wrappers (covers multiple Gradio render modes) */
943
+ #perq-file .file-preview, #agg-file .file-preview,
944
+ #perq-file .wrap, #agg-file .wrap {
945
+ background: rgba(2, 6, 23, 0.85) !important;
946
+ border-radius: 10px !important;
947
+ border: 1px solid rgba(148,163,184,.3) !important;
948
+ }
949
+
950
+ /* JSON output: dark panel + readable text */
951
+ #agg-json {
952
+ background: rgba(2, 6, 23, 0.85) !important;
953
+ border: 1px solid rgba(148,163,184,.35) !important;
954
+ border-radius: 12px !important;
955
+ padding: 8px !important;
956
+ }
957
+ #agg-json *, #agg-json .json, #agg-json .wrap { color: #e6f2ff !important; }
958
+ #agg-json pre, #agg-json code {
959
+ background: rgba(4, 10, 24, 0.9) !important;
960
+ color: #e2e8f0 !important;
961
+ border: 1px solid rgba(148,163,184,.35) !important;
962
+ border-radius: 10px !important;
963
+ }
964
+ /* Tree/overflow modes */
965
+ #agg-json [data-testid="json-tree"],
966
+ #agg-json [role="tree"],
967
+ #agg-json .overflow-auto {
968
+ background: rgba(4, 10, 24, 0.9) !important;
969
+ color: #e6f2ff !important;
970
+ border-radius: 10px !important;
971
+ border: 1px solid rgba(148,163,184,.35) !important;
972
+ }
973
+
974
+ /* Eval log markdown */
975
+ #eval-log, #eval-log * { color: #cfe6ff !important; }
976
+ #eval-log pre, #eval-log code {
977
+ background: rgba(2, 6, 23, 0.85) !important;
978
+ color: #e2e8f0 !important;
979
+ border: 1px solid rgba(148,163,184,.3) !important;
980
+ border-radius: 10px !important;
981
+ }
982
+
983
+ /* When Evaluate tab is active and JS has added .eval-active, bump contrast subtly */
984
+ #eval-tab.eval-active .block,
985
+ #eval-tab.eval-active .group {
986
+ border-color: #60a5fa !important;
987
+ }
988
+ #eval-tab.eval-active .label {
989
+ color: #e6f2ff !important;
990
+ }
991
  """
992
 
993
  theme = gr.themes.Soft(
 
1005
  )
1006
 
1007
  with gr.Blocks(css=CSS, theme=theme, fill_height=True) as demo:
1008
+ # Optional: JS to toggle .eval-active when Evaluate tab selected
1009
+ gr.HTML("""
1010
+ <script>
1011
+ (function(){
1012
+ const applyEvalActive = () => {
1013
+ const selected = document.querySelector('.tab-nav button[role="tab"][aria-selected="true"]');
1014
+ const evalPanel = document.querySelector('#eval-tab');
1015
+ if (!evalPanel) return;
1016
+ if (selected && /Evaluate/.test(selected.textContent)) {
1017
+ evalPanel.classList.add('eval-active');
1018
+ } else {
1019
+ evalPanel.classList.remove('eval-active');
1020
+ }
1021
+ };
1022
+ document.addEventListener('click', function(e) {
1023
+ if (e.target && e.target.getAttribute('role') === 'tab') {
1024
+ setTimeout(applyEvalActive, 50);
1025
+ }
1026
+ }, true);
1027
+ document.addEventListener('DOMContentLoaded', applyEvalActive);
1028
+ setTimeout(applyEvalActive, 300);
1029
+ })();
1030
+ </script>
1031
+ """)
1032
+
1033
  gr.Markdown(
1034
  "<h1 style='margin:0'>Self-Sensing Concrete Assistant</h1>"
1035
  "<p style='opacity:.9'>"
 
1151
  gr.Markdown("Upload your **gold.csv** and compute metrics against the app logs.")
1152
  with gr.Row():
1153
  gold_file = gr.File(label="gold.csv", file_types=[".csv"], interactive=True)
1154
+ k_slider = gr.Slider(3, 12, value=8, step=1, label="k for Hit/Recall/nDCG", elem_id="k-slider")
1155
  with gr.Row():
1156
  btn_eval = gr.Button("Compute Metrics", variant="primary")
1157
  with gr.Row():
1158
+ out_perq = gr.File(label="Per-question metrics (CSV)", elem_id="perq-file")
1159
+ out_agg = gr.File(label="Aggregate metrics (JSON)", elem_id="agg-file")
1160
+ out_json = gr.JSON(label="Aggregate summary", elem_id="agg-json")
1161
+ out_log = gr.Markdown(label="Run log", elem_id="eval-log")
1162
 
1163
  def _run_eval_inproc(gold_path: str, k: int = 8):
1164
  import json as _json
 
1205
  # ------------- Launch -------------
1206
  if __name__ == "__main__":
1207
  demo.queue().launch()
1208
+ import os
1209
+ import pandas as pd
1210
+
1211
+ # Folder where your RAG files are stored
1212
+ folder = "papers" # change if needed
1213
+
1214
+ # List all files in the folder
1215
+ files = sorted(os.listdir(folder))
1216
+
1217
+ # Save them to a CSV file
1218
+ pd.DataFrame({"doc": files}).to_csv("paper_list.csv", index=False)
1219
+
1220
+ print("✅ Saved paper_list.csv with", len(files), "papers")
1221
+