Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,6 +17,10 @@ FAISS_PATH = os.environ.get("FAISS_PATH", f"{ASSETS_DIR}/index.faiss")
|
|
| 17 |
META_PATH = os.environ.get("META_PATH", f"{ASSETS_DIR}/index_meta.filtered.jsonl")
|
| 18 |
REL_CONFIG_PATH = os.environ.get("REL_CONFIG_PATH", f"{ASSETS_DIR}/relevance_config.json")
|
| 19 |
QUANTIZE = os.environ.get("QUANTIZE", "4bit") # "none" | "8bit" | "4bit"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
# Models
|
| 22 |
BASE_MODEL = os.environ.get("BASE_MODEL", "mistralai/Mistral-7B-Instruct-v0.2")
|
|
@@ -810,35 +814,59 @@ def enter_app(first_name, last_name, state):
|
|
| 810 |
state["last_name"] = last_name
|
| 811 |
return gr.update(visible=False), gr.update(visible=True), state, f"Welcome, {first_name}! You can start chatting."
|
| 812 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 813 |
def predict(message, chat_history, state):
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 817 |
state["last_a"] = answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 818 |
return (
|
| 819 |
chat_history,
|
| 820 |
-
"",
|
| 821 |
-
gr.update(visible=True),
|
| 822 |
-
gr.update(value=None),
|
| 823 |
-
gr.update(value=""),
|
| 824 |
state
|
| 825 |
)
|
| 826 |
|
|
|
|
| 827 |
def _push_feedback_to_hub():
|
| 828 |
-
"""
|
| 829 |
-
if not
|
| 830 |
return
|
| 831 |
-
|
| 832 |
-
|
| 833 |
-
api.upload_file(
|
| 834 |
-
path_or_fileobj=FEEDBACK_PATH,
|
| 835 |
-
path_in_repo="analytics/feedback.csv",
|
| 836 |
-
repo_id=SPACE_REPO_ID,
|
| 837 |
-
repo_type="space",
|
| 838 |
-
)
|
| 839 |
-
dlog("FEEDBACK", "Uploaded analytics/feedback.csv to Hub")
|
| 840 |
-
except Exception as e:
|
| 841 |
-
dlog("FEEDBACK", f"Hub upload failed: {e}")
|
| 842 |
|
| 843 |
|
| 844 |
def save_feedback(rating, comment, state):
|
|
|
|
| 17 |
META_PATH = os.environ.get("META_PATH", f"{ASSETS_DIR}/index_meta.filtered.jsonl")
|
| 18 |
REL_CONFIG_PATH = os.environ.get("REL_CONFIG_PATH", f"{ASSETS_DIR}/relevance_config.json")
|
| 19 |
QUANTIZE = os.environ.get("QUANTIZE", "4bit") # "none" | "8bit" | "4bit"
|
| 20 |
+
# --- Turn logging ---
|
| 21 |
+
TRANSCRIPT_PATH = os.environ.get("TRANSCRIPT_PATH", "transcripts.jsonl")
|
| 22 |
+
PUSH_TRANSCRIPTS = os.environ.get("PUSH_TRANSCRIPTS", "1") == "1" # set to "0" to disable
|
| 23 |
+
|
| 24 |
|
| 25 |
# Models
|
| 26 |
BASE_MODEL = os.environ.get("BASE_MODEL", "mistralai/Mistral-7B-Instruct-v0.2")
|
|
|
|
| 814 |
state["last_name"] = last_name
|
| 815 |
return gr.update(visible=False), gr.update(visible=True), state, f"Welcome, {first_name}! You can start chatting."
|
| 816 |
|
| 817 |
+
def _log_turn(state: Dict[str, Any], question: str, answer: str):
|
| 818 |
+
rec = {
|
| 819 |
+
"timestamp_utc": _now_iso(),
|
| 820 |
+
"session_id": state.get("session_id", ""),
|
| 821 |
+
"first_name": state.get("first_name", ""),
|
| 822 |
+
"last_name": state.get("last_name", ""),
|
| 823 |
+
"question": question,
|
| 824 |
+
"answer": answer,
|
| 825 |
+
}
|
| 826 |
+
with open(TRANSCRIPT_PATH, "a", encoding="utf-8") as f:
|
| 827 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 828 |
+
|
| 829 |
+
if PUSH_TRANSCRIPTS:
|
| 830 |
+
_push_file_to_hub(TRANSCRIPT_PATH, "analytics/transcripts.jsonl")
|
| 831 |
+
|
| 832 |
+
|
| 833 |
def predict(message, chat_history, state):
|
| 834 |
+
msg = (message or "").strip()
|
| 835 |
+
if not msg:
|
| 836 |
+
# No input → don't show feedback, just return current state
|
| 837 |
+
return chat_history, "", gr.update(visible=False), None, "", state
|
| 838 |
+
|
| 839 |
+
try:
|
| 840 |
+
answer = ask(msg)
|
| 841 |
+
except Exception as e:
|
| 842 |
+
answer = f"Sorry — something went wrong: {e!r}"
|
| 843 |
+
|
| 844 |
+
chat_history = (chat_history or []) + [(msg, answer)]
|
| 845 |
+
state["last_q"] = msg
|
| 846 |
state["last_a"] = answer
|
| 847 |
+
|
| 848 |
+
# Log every turn (safe if _log_turn isn't defined)
|
| 849 |
+
try:
|
| 850 |
+
_log_turn(state, msg, answer)
|
| 851 |
+
except Exception:
|
| 852 |
+
pass
|
| 853 |
+
|
| 854 |
return (
|
| 855 |
chat_history,
|
| 856 |
+
"", # clear input
|
| 857 |
+
gr.update(visible=True), # show feedback pane
|
| 858 |
+
gr.update(value=None), # reset rating
|
| 859 |
+
gr.update(value=""), # reset comment
|
| 860 |
state
|
| 861 |
)
|
| 862 |
|
| 863 |
+
|
| 864 |
def _push_feedback_to_hub():
|
| 865 |
+
"""Upload feedback.csv to analytics/feedback.csv in this Space repo (if enabled)."""
|
| 866 |
+
if not PUSH_FEEDBACK:
|
| 867 |
return
|
| 868 |
+
_push_file_to_hub(FEEDBACK_PATH, "analytics/feedback.csv")
|
| 869 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 870 |
|
| 871 |
|
| 872 |
def save_feedback(rating, comment, state):
|