Commit
·
b4d57a6
1
Parent(s):
7006210
update
Browse files- .query_history.json +26 -0
- .uvicorn.log +88 -0
- PROJECT_COMPLETE.md +1 -0
- app/api.py +94 -27
- app/rag_service.py +167 -2
- app/static/app.js +83 -0
- app/static/index.html +67 -0
- scripts/smoke_check.py +45 -0
- uvicorn.log +0 -0
.query_history.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"query": "תסווג את התלונות 5 סוגים",
|
| 4 |
+
"response": {
|
| 5 |
+
"summary": "הקלות שבה ניתן להתלונן שאפשר בקלות להגיש תלונה רצוי לאפשר הגשה של כמה תלונות יחד, ממספרים שונים כדי לחסוך בירוקרטיה.\nתודה\nחנה שדות בחירת התלונה מינימליות אז איפה אפשר לכתוב תודות פרט לתלונות?"
|
| 6 |
+
}
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"query": "תסווג את התלונות 5 סוגים",
|
| 10 |
+
"response": {
|
| 11 |
+
"summary": "הקלות שבה ניתן להתלונן שאפשר בקלות להגיש תלונה רצוי לאפשר הגשה של כמה תלונות יחד, ממספרים שונים כדי לחסוך בירוקרטיה.\nתודה\nחנה שדות בחירת התלונה מינימליות אז איפה אפשר לכתוב תודות פרט לתלונות?"
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"query": "תסווג את התלונות 5 סוגים",
|
| 16 |
+
"response": {
|
| 17 |
+
"summary": "הקלות שבה ניתן להתלונן שאפשר בקלות להגיש תלונה רצוי לאפשר הגשה של כמה תלונות יחד, ממספרים שונים כדי לחסוך בירוקרטיה.\nתודה\nחנה שדות בחירת התלונה מינימליות אז איפה אפשר לכתוב תודות פרט לתלונות?"
|
| 18 |
+
}
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"query": "תסווג את התלונות 5 סוגים",
|
| 22 |
+
"response": {
|
| 23 |
+
"summary": "הקלות שבה ניתן להתלונן שאפשר בקלות להגיש תלונה רצוי לאפשר הגשה של כמה תלונות יחד, ממספרים שונים כדי לחסוך בירוקרטיה.\nתודה\nחנה שדות בחירת התלונה מינימליות אז איפה אפשר לכתוב תודות פרט לתלונות?"
|
| 24 |
+
}
|
| 25 |
+
}
|
| 26 |
+
]
|
.uvicorn.log
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO: Started server process [70640]
|
| 2 |
+
INFO: Waiting for application startup.
|
| 3 |
+
INFO: Application startup complete.
|
| 4 |
+
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
|
| 8 |
+
To disable this warning, you can either:
|
| 9 |
+
- Avoid using `tokenizers` before the fork if possible
|
| 10 |
+
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
|
| 11 |
+
INFO: 127.0.0.1:55678 - "POST /query HTTP/1.1" 200 OK
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
INFO: 127.0.0.1:55706 - "POST /query HTTP/1.1" 200 OK
|
| 15 |
+
INFO: 127.0.0.1:55717 - "GET / HTTP/1.1" 200 OK
|
| 16 |
+
INFO: 127.0.0.1:55717 - "GET /static/app.js HTTP/1.1" 200 OK
|
| 17 |
+
INFO: 127.0.0.1:55717 - "POST /health HTTP/1.1" 200 OK
|
| 18 |
+
INFO: 127.0.0.1:55717 - "GET /history HTTP/1.1" 200 OK
|
| 19 |
+
|
| 20 |
+
INFO: 127.0.0.1:55717 - "POST /query HTTP/1.1" 200 OK
|
| 21 |
+
INFO: 127.0.0.1:55717 - "GET /history HTTP/1.1" 200 OK
|
| 22 |
+
|
| 23 |
+
INFO: 127.0.0.1:55726 - "POST /query HTTP/1.1" 200 OK
|
| 24 |
+
INFO: 127.0.0.1:55737 - "GET / HTTP/1.1" 200 OK
|
| 25 |
+
INFO: 127.0.0.1:55737 - "GET /static/app.js HTTP/1.1" 304 Not Modified
|
| 26 |
+
INFO: 127.0.0.1:55737 - "POST /health HTTP/1.1" 200 OK
|
| 27 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 28 |
+
INFO: 127.0.0.1:55737 - "GET / HTTP/1.1" 200 OK
|
| 29 |
+
INFO: 127.0.0.1:55737 - "POST /health HTTP/1.1" 200 OK
|
| 30 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 31 |
+
|
| 32 |
+
INFO: 127.0.0.1:55737 - "POST /query HTTP/1.1" 200 OK
|
| 33 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 34 |
+
INFO: 127.0.0.1:55737 - "GET / HTTP/1.1" 200 OK
|
| 35 |
+
INFO: 127.0.0.1:55737 - "POST /health HTTP/1.1" 200 OK
|
| 36 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 37 |
+
INFO: 127.0.0.1:55737 - "POST /history/clear HTTP/1.1" 200 OK
|
| 38 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 39 |
+
INFO: 127.0.0.1:55737 - "POST /history/clear HTTP/1.1" 200 OK
|
| 40 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 41 |
+
INFO: 127.0.0.1:55737 - "POST /history/clear HTTP/1.1" 200 OK
|
| 42 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 43 |
+
INFO: 127.0.0.1:55737 - "POST /history/clear HTTP/1.1" 200 OK
|
| 44 |
+
INFO: 127.0.0.1:55737 - "GET /history HTTP/1.1" 200 OK
|
| 45 |
+
INFO: 127.0.0.1:55752 - "GET /apple-touch-icon-precomposed.png HTTP/1.1" 404 Not Found
|
| 46 |
+
INFO: 127.0.0.1:55753 - "GET /apple-touch-icon.png HTTP/1.1" 404 Not Found
|
| 47 |
+
INFO: 127.0.0.1:55754 - "GET /favicon.ico HTTP/1.1" 404 Not Found
|
| 48 |
+
INFO: 127.0.0.1:55795 - "GET / HTTP/1.1" 200 OK
|
| 49 |
+
INFO: 127.0.0.1:55795 - "GET /static/app.js HTTP/1.1" 200 OK
|
| 50 |
+
INFO: 127.0.0.1:55795 - "POST /health HTTP/1.1" 200 OK
|
| 51 |
+
INFO: 127.0.0.1:55795 - "GET /history HTTP/1.1" 200 OK
|
| 52 |
+
INFO: 127.0.0.1:55795 - "GET /favicon.ico HTTP/1.1" 404 Not Found
|
| 53 |
+
|
| 54 |
+
INFO: 127.0.0.1:55795 - "POST /query HTTP/1.1" 200 OK
|
| 55 |
+
INFO: 127.0.0.1:55795 - "GET /history HTTP/1.1" 200 OK
|
| 56 |
+
|
| 57 |
+
INFO: 127.0.0.1:55821 - "POST /query HTTP/1.1" 200 OK
|
| 58 |
+
INFO: 127.0.0.1:55821 - "GET /history HTTP/1.1" 200 OK
|
| 59 |
+
|
| 60 |
+
INFO: 127.0.0.1:55824 - "POST /query HTTP/1.1" 200 OK
|
| 61 |
+
INFO: 127.0.0.1:55824 - "GET /history HTTP/1.1" 200 OK
|
| 62 |
+
|
| 63 |
+
INFO: 127.0.0.1:55824 - "POST /query HTTP/1.1" 200 OK
|
| 64 |
+
INFO: 127.0.0.1:55824 - "GET /history HTTP/1.1" 200 OK
|
| 65 |
+
|
| 66 |
+
INFO: 127.0.0.1:55824 - "POST /query HTTP/1.1" 200 OK
|
| 67 |
+
INFO: 127.0.0.1:55824 - "GET /history HTTP/1.1" 200 OK
|
| 68 |
+
|
| 69 |
+
INFO: 127.0.0.1:55839 - "POST /query HTTP/1.1" 200 OK
|
| 70 |
+
INFO: 127.0.0.1:55839 - "GET /history HTTP/1.1" 200 OK
|
| 71 |
+
INFO: 127.0.0.1:55846 - "POST /history/clear HTTP/1.1" 200 OK
|
| 72 |
+
INFO: 127.0.0.1:55846 - "GET /history HTTP/1.1" 200 OK
|
| 73 |
+
INFO: 127.0.0.1:55846 - "POST /history/clear HTTP/1.1" 200 OK
|
| 74 |
+
INFO: 127.0.0.1:55846 - "GET /history HTTP/1.1" 200 OK
|
| 75 |
+
INFO: 127.0.0.1:55846 - "POST /history/clear HTTP/1.1" 200 OK
|
| 76 |
+
INFO: 127.0.0.1:55846 - "GET /history HTTP/1.1" 200 OK
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
INFO: 127.0.0.1:55846 - "POST /query HTTP/1.1" 200 OK
|
| 81 |
+
INFO: 127.0.0.1:55846 - "GET /history HTTP/1.1" 200 OK
|
| 82 |
+
INFO: 127.0.0.1:55850 - "POST /query HTTP/1.1" 200 OK
|
| 83 |
+
INFO: 127.0.0.1:55850 - "GET /history HTTP/1.1" 200 OK
|
| 84 |
+
|
| 85 |
+
INFO: 127.0.0.1:55849 - "POST /query HTTP/1.1" 200 OK
|
| 86 |
+
INFO: 127.0.0.1:55849 - "GET /history HTTP/1.1" 200 OK
|
| 87 |
+
INFO: 127.0.0.1:55850 - "POST /query HTTP/1.1" 200 OK
|
| 88 |
+
INFO: 127.0.0.1:55850 - "GET /history HTTP/1.1" 200 OK
|
PROJECT_COMPLETE.md
CHANGED
|
@@ -30,6 +30,7 @@ Build a **Feedback Analysis RAG Agent** that:
|
|
| 30 |
- [x] Topic extraction (k-means clustering)
|
| 31 |
- [x] Sentiment analysis (multilingual)
|
| 32 |
- [x] Error handling and validation
|
|
|
|
| 33 |
|
| 34 |
### Infrastructure (Complete)
|
| 35 |
- [x] Virtual environment setup (.venv)
|
|
|
|
| 30 |
- [x] Topic extraction (k-means clustering)
|
| 31 |
- [x] Sentiment analysis (multilingual)
|
| 32 |
- [x] Error handling and validation
|
| 33 |
+
- [x] Free-form RAG synthesizer (analyst-style, broader-context responses)
|
| 34 |
|
| 35 |
### Infrastructure (Complete)
|
| 36 |
- [x] Virtual environment setup (.venv)
|
app/api.py
CHANGED
|
@@ -4,9 +4,12 @@ from typing import List, Optional, Dict, Any
|
|
| 4 |
|
| 5 |
import numpy as np
|
| 6 |
import pandas as pd
|
| 7 |
-
from fastapi import FastAPI, Query
|
| 8 |
-
from fastapi.responses import ORJSONResponse
|
| 9 |
-
from
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
from .config import settings
|
| 12 |
from .data_loader import load_feedback
|
|
@@ -21,16 +24,36 @@ app = FastAPI(title="Feedback Analysis RAG Agent", version="1.0.0", default_resp
|
|
| 21 |
svc = RAGService()
|
| 22 |
embedder = svc.embedder
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
class QueryRequest(BaseModel):
|
| 26 |
-
query: str
|
| 27 |
-
top_k: int = 5
|
| 28 |
|
| 29 |
|
| 30 |
class QueryResponse(BaseModel):
|
| 31 |
query: str
|
| 32 |
summary: Optional[str]
|
| 33 |
-
results:
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
@app.post("/health")
|
|
@@ -55,40 +78,54 @@ def ingest() -> Dict[str, Any]:
|
|
| 55 |
|
| 56 |
|
| 57 |
@app.post("/query", response_model=QueryResponse)
|
| 58 |
-
def query(req: QueryRequest) -> QueryResponse:
|
| 59 |
-
"""Free-form question answering over feedback data.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
try:
|
| 61 |
# Use the higher-level answer pipeline which can handle counts and keyword queries
|
| 62 |
out = svc.answer(req.query, top_k=req.top_k)
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
)
|
| 76 |
except FileNotFoundError:
|
| 77 |
-
|
| 78 |
query=req.query,
|
| 79 |
summary="Error: Vector index not found. Please run /ingest first.",
|
| 80 |
-
results=[]
|
| 81 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
except Exception as e:
|
| 83 |
-
|
| 84 |
query=req.query,
|
| 85 |
summary=f"Error: {str(e)}",
|
| 86 |
-
results=[]
|
| 87 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
|
| 90 |
class TopicsRequest(BaseModel):
|
| 91 |
-
num_topics: int = 5
|
| 92 |
|
| 93 |
|
| 94 |
@app.post("/topics")
|
|
@@ -183,7 +220,7 @@ def topics(req: TopicsRequest) -> Dict[str, Any]:
|
|
| 183 |
|
| 184 |
|
| 185 |
class SentimentRequest(BaseModel):
|
| 186 |
-
limit: int = 100
|
| 187 |
|
| 188 |
|
| 189 |
@app.post("/sentiment")
|
|
@@ -198,3 +235,33 @@ def sentiment(req: SentimentRequest) -> Dict[str, Any]:
|
|
| 198 |
out = analyze_sentiments(texts)
|
| 199 |
return {"count": len(out), "results": out}
|
| 200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
import numpy as np
|
| 6 |
import pandas as pd
|
| 7 |
+
from fastapi import FastAPI, Query, Request
|
| 8 |
+
from fastapi.responses import ORJSONResponse, HTMLResponse
|
| 9 |
+
from fastapi.staticfiles import StaticFiles
|
| 10 |
+
import json
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from pydantic import BaseModel, Field
|
| 13 |
|
| 14 |
from .config import settings
|
| 15 |
from .data_loader import load_feedback
|
|
|
|
| 24 |
svc = RAGService()
|
| 25 |
embedder = svc.embedder
|
| 26 |
|
| 27 |
+
# Simple in-memory history persisted best-effort to `.query_history.json`
|
| 28 |
+
history_file = Path(".query_history.json")
|
| 29 |
+
history = []
|
| 30 |
+
if history_file.exists():
|
| 31 |
+
try:
|
| 32 |
+
with history_file.open("r", encoding="utf-8") as f:
|
| 33 |
+
history = json.load(f)
|
| 34 |
+
except Exception:
|
| 35 |
+
history = []
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def save_history() -> None:
|
| 39 |
+
try:
|
| 40 |
+
with history_file.open("w", encoding="utf-8") as f:
|
| 41 |
+
json.dump(history, f, ensure_ascii=False, indent=2)
|
| 42 |
+
except Exception:
|
| 43 |
+
# best-effort persistence; ignore errors
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
|
| 47 |
class QueryRequest(BaseModel):
|
| 48 |
+
query: str = Field(..., example="תסווג את התלונות 5 סוגים")
|
| 49 |
+
top_k: int = Field(5, example=5)
|
| 50 |
|
| 51 |
|
| 52 |
class QueryResponse(BaseModel):
|
| 53 |
query: str
|
| 54 |
summary: Optional[str]
|
| 55 |
+
# `results` (example records) removed deliberately: API returns only
|
| 56 |
+
# an analyst-style `summary` to clients.
|
| 57 |
|
| 58 |
|
| 59 |
@app.post("/health")
|
|
|
|
| 78 |
|
| 79 |
|
| 80 |
@app.post("/query", response_model=QueryResponse)
|
| 81 |
+
def query(req: QueryRequest, request: Request) -> QueryResponse:
|
| 82 |
+
"""Free-form question answering over feedback data.
|
| 83 |
+
|
| 84 |
+
This endpoint also appends the (request, response) pair to an in-memory history
|
| 85 |
+
which is persisted best-effort to `.query_history.json`.
|
| 86 |
+
"""
|
| 87 |
try:
|
| 88 |
# Use the higher-level answer pipeline which can handle counts and keyword queries
|
| 89 |
out = svc.answer(req.query, top_k=req.top_k)
|
| 90 |
+
|
| 91 |
+
# Only return the analyst-style summary to the client. Do not include
|
| 92 |
+
# example records or raw contexts in the API response.
|
| 93 |
+
resp_dict = {"query": out.query, "summary": out.summary}
|
| 94 |
+
|
| 95 |
+
# append to history (store only the summary)
|
| 96 |
+
try:
|
| 97 |
+
history.append({"query": out.query, "response": {"summary": out.summary}})
|
| 98 |
+
save_history()
|
| 99 |
+
except Exception:
|
| 100 |
+
pass
|
| 101 |
+
|
| 102 |
+
return QueryResponse(**resp_dict)
|
| 103 |
except FileNotFoundError:
|
| 104 |
+
resp = QueryResponse(
|
| 105 |
query=req.query,
|
| 106 |
summary="Error: Vector index not found. Please run /ingest first.",
|
|
|
|
| 107 |
)
|
| 108 |
+
try:
|
| 109 |
+
history.append({"query": resp.query, "response": {"summary": resp.summary}})
|
| 110 |
+
save_history()
|
| 111 |
+
except Exception:
|
| 112 |
+
pass
|
| 113 |
+
return resp
|
| 114 |
except Exception as e:
|
| 115 |
+
resp = QueryResponse(
|
| 116 |
query=req.query,
|
| 117 |
summary=f"Error: {str(e)}",
|
|
|
|
| 118 |
)
|
| 119 |
+
try:
|
| 120 |
+
history.append({"query": resp.query, "response": {"summary": resp.summary}})
|
| 121 |
+
save_history()
|
| 122 |
+
except Exception:
|
| 123 |
+
pass
|
| 124 |
+
return resp
|
| 125 |
|
| 126 |
|
| 127 |
class TopicsRequest(BaseModel):
|
| 128 |
+
num_topics: int = Field(5, example=5)
|
| 129 |
|
| 130 |
|
| 131 |
@app.post("/topics")
|
|
|
|
| 220 |
|
| 221 |
|
| 222 |
class SentimentRequest(BaseModel):
|
| 223 |
+
limit: int = Field(100, example=50)
|
| 224 |
|
| 225 |
|
| 226 |
@app.post("/sentiment")
|
|
|
|
| 235 |
out = analyze_sentiments(texts)
|
| 236 |
return {"count": len(out), "results": out}
|
| 237 |
|
| 238 |
+
|
| 239 |
+
# Mount static files for a simple frontend if present
|
| 240 |
+
static_dir = Path(__file__).parent / "static"
|
| 241 |
+
if static_dir.exists():
|
| 242 |
+
# Serve static assets under /static/* (so index.html can reference /static/app.js)
|
| 243 |
+
app.mount("/static", StaticFiles(directory=str(static_dir)), name="static")
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
@app.get("/")
|
| 247 |
+
def root() -> HTMLResponse:
|
| 248 |
+
"""Serve the main index.html for the frontend."""
|
| 249 |
+
try:
|
| 250 |
+
html = (static_dir / "index.html").read_text(encoding="utf-8")
|
| 251 |
+
return HTMLResponse(html)
|
| 252 |
+
except Exception:
|
| 253 |
+
return HTMLResponse("<html><body><h1>Frontend not available</h1></body></html>", status_code=404)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
@app.get("/history")
|
| 257 |
+
def get_history() -> Dict[str, Any]:
|
| 258 |
+
return {"history": history}
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
@app.post("/history/clear")
|
| 262 |
+
def clear_history() -> Dict[str, Any]:
|
| 263 |
+
global history
|
| 264 |
+
history = []
|
| 265 |
+
save_history()
|
| 266 |
+
return {"status": "cleared"}
|
| 267 |
+
|
app/rag_service.py
CHANGED
|
@@ -106,10 +106,175 @@ class RAGService:
|
|
| 106 |
# Fallback: simple extractive "summary"
|
| 107 |
return " ".join(contexts[:3])
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
def query(self, query: str, top_k: int = 5) -> RetrievalOutput:
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
contexts = [r.row[settings.text_column] for r in results]
|
| 112 |
-
|
|
|
|
|
|
|
| 113 |
return RetrievalOutput(query=query, results=results, summary=summary)
|
| 114 |
|
| 115 |
def answer(self, query: str, top_k: int = 5) -> RetrievalOutput:
|
|
|
|
| 106 |
# Fallback: simple extractive "summary"
|
| 107 |
return " ".join(contexts[:3])
|
| 108 |
|
| 109 |
+
def synthesize(self, query: str, results: List[SearchResult], contexts: List[str], max_contexts: int = 40) -> Optional[str]:
|
| 110 |
+
"""Produce a free-form, analyst-style answer that synthesizes the retrieved contexts.
|
| 111 |
+
|
| 112 |
+
This method asks the LLM to act as an experienced data analyst for digital business
|
| 113 |
+
processes and to synthesize insights, root causes, business impact and recommended
|
| 114 |
+
next steps. It is explicitly not an extractive response of "most relevant" snippets.
|
| 115 |
+
"""
|
| 116 |
+
if not contexts:
|
| 117 |
+
return None
|
| 118 |
+
|
| 119 |
+
# Limit number of contexts and truncate each to keep prompt size reasonable
|
| 120 |
+
safe_ctxs = []
|
| 121 |
+
for c in contexts[:max_contexts]:
|
| 122 |
+
# truncate to ~800 chars to avoid extremely long prompts
|
| 123 |
+
safe_ctxs.append((c[:800] + "...") if len(c) > 800 else c)
|
| 124 |
+
|
| 125 |
+
joined = "\n\n".join(f"- {c}" for c in safe_ctxs)
|
| 126 |
+
|
| 127 |
+
# Detect if query is in Hebrew
|
| 128 |
+
is_hebrew = any('\u0590' <= char <= '\u05FF' for char in query)
|
| 129 |
+
lang_instruction = "ענה בעברית באופן מקצועי" if is_hebrew else "Answer in the language of the query in a professional tone"
|
| 130 |
+
|
| 131 |
+
instruction = (
|
| 132 |
+
"You are an experienced information-systems analyst and senior data analyst working for a government ministry.\n"
|
| 133 |
+
"You have access to the full feedback dataset and metadata (service name, level).\n"
|
| 134 |
+
"Your task: produce a clear, structured, analyst-style answer to the user's question.\n"
|
| 135 |
+
"Requirements:\n"
|
| 136 |
+
"- Always synthesize across the provided examples and aggregate data; do NOT return raw snippets or 'most relevant' lists.\n"
|
| 137 |
+
"- Where applicable, compute simple aggregates over the dataset (counts, averages by service or by level).\n"
|
| 138 |
+
"- Provide: 1) Executive summary (2-4 sentences), 2) Key themes with short examples, 3) Likely root causes, 4) Business impact and priority, 5) Recommended next steps, 6) Suggested KPIs.\n"
|
| 139 |
+
"- If the user asks to split feedback into N topics, provide the N topics and a brief description and count for each.\n"
|
| 140 |
+
"- If the user asks about items with level < 3, filter by 'level' field accordingly and summarize services with low levels.\n"
|
| 141 |
+
"- Be explicit about uncertainty and flag where more data is needed.\n"
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
# Compute simple aggregates locally to include in the prompt: counts by service and average level
|
| 145 |
+
try:
|
| 146 |
+
df = load_feedback()
|
| 147 |
+
# basic aggregates
|
| 148 |
+
total = len(df)
|
| 149 |
+
counts_by_service = df.groupby(settings.service_column).size().sort_values(ascending=False).head(10).to_dict()
|
| 150 |
+
avg_level_by_service = df.groupby(settings.service_column)[settings.level_column].mean().sort_values(ascending=False).head(10).to_dict()
|
| 151 |
+
low_level_df = df[df[settings.level_column] < 3]
|
| 152 |
+
low_level_counts = low_level_df.groupby(settings.service_column).size().sort_values(ascending=False).head(10).to_dict()
|
| 153 |
+
except Exception:
|
| 154 |
+
total = None
|
| 155 |
+
counts_by_service = {}
|
| 156 |
+
avg_level_by_service = {}
|
| 157 |
+
low_level_counts = {}
|
| 158 |
+
|
| 159 |
+
aggregates_str = f"Total feedback: {total}\nTop services by count: {counts_by_service}\nTop services by avg level: {avg_level_by_service}\nLow-level counts (level<3): {low_level_counts}\n"
|
| 160 |
+
|
| 161 |
+
# Special-case: the user asked to split into N topics (e.g., "חלק את המשובים ל5 נושאים")
|
| 162 |
+
import re
|
| 163 |
+
m = re.search(r"(\d+)\s*נוש", query)
|
| 164 |
+
if ("חלק" in query and "נוש" in query) or m:
|
| 165 |
+
try:
|
| 166 |
+
n_topics = int(m.group(1)) if m else 5
|
| 167 |
+
texts = df[settings.text_column].astype(str).tolist()
|
| 168 |
+
embeddings = self.embedder.encode(texts)
|
| 169 |
+
from .topics import kmeans_topics
|
| 170 |
+
res = kmeans_topics(embeddings, num_topics=n_topics)
|
| 171 |
+
|
| 172 |
+
# Build a compact summary of clusters with sample examples
|
| 173 |
+
clusters: Dict[int, list] = {}
|
| 174 |
+
for label, text in zip(res.labels, texts):
|
| 175 |
+
clusters.setdefault(int(label), []).append(text)
|
| 176 |
+
|
| 177 |
+
cluster_summaries = []
|
| 178 |
+
for tid, items in clusters.items():
|
| 179 |
+
sample = items[:3]
|
| 180 |
+
cluster_summaries.append(f"Cluster {tid}: count={len(items)}, examples: {sample}")
|
| 181 |
+
|
| 182 |
+
clusters_str = "\n".join(cluster_summaries[:n_topics])
|
| 183 |
+
|
| 184 |
+
prompt = (
|
| 185 |
+
f"{instruction}\n\n{lang_instruction}.\n\nUser query:\n{query}\n\nDataset aggregates:\n{aggregates_str}\n\nCluster summaries (samples):\n{clusters_str}\n\n"
|
| 186 |
+
"Using the cluster summaries above, provide for each cluster: 1) a concise topic name (2-4 words), 2) 1-2 sentence description of the theme, and 3) recommended next steps/prioritization. Present as a numbered list."
|
| 187 |
+
)
|
| 188 |
+
except Exception:
|
| 189 |
+
# fallback to standard prompt if clustering fails
|
| 190 |
+
prompt = (
|
| 191 |
+
f"{instruction}\n\n{lang_instruction}.\n\nUser query:\n{query}\n\nDataset aggregates:\n{aggregates_str}\n\nFeedback examples (truncated):\n{joined}\n\nPlease present a clear, actionable, and human-readable analysis."
|
| 192 |
+
)
|
| 193 |
+
# Send to LLM below
|
| 194 |
+
elif ("נמוך" in query and ("3" in query or "שלוש" in query)) or ("level < 3" in query) or ("ציון" in query and "3" in query and ("נמוך" in query or "מתחת" in query)):
|
| 195 |
+
# User asks about items with level < 3
|
| 196 |
+
try:
|
| 197 |
+
low_texts = low_level_df[settings.text_column].astype(str).tolist()
|
| 198 |
+
if low_texts:
|
| 199 |
+
embeddings = self.embedder.encode(low_texts)
|
| 200 |
+
from .topics import kmeans_topics
|
| 201 |
+
res = kmeans_topics(embeddings, num_topics=3)
|
| 202 |
+
clusters: Dict[int, list] = {}
|
| 203 |
+
for label, text in zip(res.labels, low_texts):
|
| 204 |
+
clusters.setdefault(int(label), []).append(text)
|
| 205 |
+
clusters_str = "\n".join([f"Cluster {tid}: count={len(items)}, examples: {items[:3]}" for tid, items in clusters.items()])
|
| 206 |
+
else:
|
| 207 |
+
clusters_str = "(no low-level feedback found)"
|
| 208 |
+
|
| 209 |
+
prompt = (
|
| 210 |
+
f"{instruction}\n\n{lang_instruction}.\n\nUser query:\n{query}\n\nDataset aggregates (low-level):\n{aggregates_str}\n\nLow-level cluster samples:\n{clusters_str}\n\n"
|
| 211 |
+
"Summarize the dominant topic(s) among low-rated feedback and identify which services are most affected. Provide recommended remediation steps and priority."
|
| 212 |
+
)
|
| 213 |
+
except Exception:
|
| 214 |
+
prompt = (
|
| 215 |
+
f"{instruction}\n\n{lang_instruction}.\n\nUser query:\n{query}\n\nDataset aggregates:\n{aggregates_str}\n\nFeedback examples (truncated):\n{joined}\n\nPlease present a clear, actionable, and human-readable analysis."
|
| 216 |
+
)
|
| 217 |
+
elif "שירותים" in query or "שירות" in query:
|
| 218 |
+
# User asked about services with issues vs services working well
|
| 219 |
+
try:
|
| 220 |
+
svc_stats = df.groupby(settings.service_column)[settings.level_column].agg(['mean','count']).sort_values('mean')
|
| 221 |
+
problematic = svc_stats[svc_stats['mean'] < 3].head(10).to_dict('index')
|
| 222 |
+
good = svc_stats[svc_stats['mean'] >= 4].head(10).to_dict('index')
|
| 223 |
+
svc_str = f"Problematic (mean<3): {problematic}\nWorking well (mean>=4): {good}\n"
|
| 224 |
+
prompt = (
|
| 225 |
+
f"{instruction}\n\n{lang_instruction}.\n\nUser query:\n{query}\n\nDataset aggregates:\n{aggregates_str}\n\nService-level stats:\n{svc_str}\n\n"
|
| 226 |
+
"Using the service-level statistics and sampled feedback, describe which services have serious issues, which are working well overall, and provide prioritized recommendations for remediation and monitoring."
|
| 227 |
+
)
|
| 228 |
+
except Exception:
|
| 229 |
+
prompt = (
|
| 230 |
+
f"{instruction}\n\n{lang_instruction}.\n\nUser query:\n{query}\n\nDataset aggregates:\n{aggregates_str}\n\nFeedback examples (truncated):\n{joined}\n\nPlease present a clear, actionable, and human-readable analysis."
|
| 231 |
+
)
|
| 232 |
+
else:
|
| 233 |
+
prompt = (
|
| 234 |
+
f"{instruction}\n\n{lang_instruction}.\n\nUser query:\n{query}\n\nDataset aggregates:\n{aggregates_str}\n\nFeedback examples (truncated):\n{joined}\n\nPlease present a clear, actionable, and human-readable analysis."
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# Try Gemini first
|
| 238 |
+
if settings.gemini_api_key and genai is not None:
|
| 239 |
+
try:
|
| 240 |
+
genai.configure(api_key=settings.gemini_api_key)
|
| 241 |
+
model = genai.GenerativeModel("gemini-1.5-flash")
|
| 242 |
+
resp = model.generate_content(prompt)
|
| 243 |
+
text = getattr(resp, "text", None)
|
| 244 |
+
if isinstance(text, str) and text.strip():
|
| 245 |
+
return text.strip()
|
| 246 |
+
except Exception:
|
| 247 |
+
pass
|
| 248 |
+
|
| 249 |
+
# Fallback to OpenAI if available
|
| 250 |
+
if settings.openai_api_key and OpenAI is not None:
|
| 251 |
+
client = OpenAI(api_key=settings.openai_api_key)
|
| 252 |
+
try:
|
| 253 |
+
resp = client.chat.completions.create(
|
| 254 |
+
model="gpt-4o-mini",
|
| 255 |
+
messages=[{"role": "user", "content": prompt}],
|
| 256 |
+
temperature=0.3,
|
| 257 |
+
max_tokens=700,
|
| 258 |
+
)
|
| 259 |
+
return resp.choices[0].message.content
|
| 260 |
+
except Exception:
|
| 261 |
+
pass
|
| 262 |
+
|
| 263 |
+
# Fallback: short extractive-ish synthesis
|
| 264 |
+
# Compose a short paragraph from top contexts
|
| 265 |
+
extract = " ".join(contexts[:5])
|
| 266 |
+
return extract
|
| 267 |
+
|
| 268 |
def query(self, query: str, top_k: int = 5) -> RetrievalOutput:
|
| 269 |
+
# Increase retrieval breadth by default so the LLM can synthesize across
|
| 270 |
+
# a broader set of feedback items (not only the top 5). This helps produce
|
| 271 |
+
# free-form analyst-style answers that consider more of the encoded data.
|
| 272 |
+
adjusted_k = max(top_k, 40)
|
| 273 |
+
results = self.retrieve(query, top_k=adjusted_k)
|
| 274 |
contexts = [r.row[settings.text_column] for r in results]
|
| 275 |
+
# Use the new synthesizing free-form answer by default so responses are
|
| 276 |
+
# written as an experienced data analyst and synthesize across retrieved data.
|
| 277 |
+
summary = self.synthesize(query, results, contexts, max_contexts=adjusted_k)
|
| 278 |
return RetrievalOutput(query=query, results=results, summary=summary)
|
| 279 |
|
| 280 |
def answer(self, query: str, top_k: int = 5) -> RetrievalOutput:
|
app/static/app.js
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
async function checkServer() {
|
| 2 |
+
try {
|
| 3 |
+
const r = await fetch('/health', {method: 'POST', headers: {'Content-Type':'application/json'}, body: JSON.stringify({})});
|
| 4 |
+
if (!r.ok) throw new Error('no');
|
| 5 |
+
const j = await r.json();
|
| 6 |
+
document.getElementById('server-status').textContent = j.status || 'ok';
|
| 7 |
+
} catch (e) {
|
| 8 |
+
document.getElementById('server-status').textContent = 'לא זמין';
|
| 9 |
+
}
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
async function refreshHistory() {
|
| 13 |
+
try {
|
| 14 |
+
const r = await fetch('/history');
|
| 15 |
+
const j = await r.json();
|
| 16 |
+
const h = j.history || [];
|
| 17 |
+
const container = document.getElementById('history');
|
| 18 |
+
container.innerHTML = '';
|
| 19 |
+
if (h.length === 0) {
|
| 20 |
+
container.textContent = 'לא נמצאו שאילתות קודמות.';
|
| 21 |
+
return;
|
| 22 |
+
}
|
| 23 |
+
// Respect the UI toggle: only include example snippets in history when
|
| 24 |
+
// the user opts to show sources. History entries are simple objects: {query, response: {summary}}
|
| 25 |
+
const showSources = document.getElementById('show-sources') && document.getElementById('show-sources').checked;
|
| 26 |
+
h.slice().reverse().forEach(entry => {
|
| 27 |
+
const el = document.createElement('div');
|
| 28 |
+
el.className = 'card';
|
| 29 |
+
const q = document.createElement('div'); q.innerHTML = '<strong>שאלה:</strong> ' + escapeHtml(entry.query);
|
| 30 |
+
const s = document.createElement('div'); s.innerHTML = '<strong>סיכום:</strong> ' + (entry.response && entry.response.summary ? escapeHtml(entry.response.summary) : '');
|
| 31 |
+
const res = document.createElement('div');
|
| 32 |
+
// We intentionally do not render example records unless the server
|
| 33 |
+
// and client explicitly provide them. By default this is hidden.
|
| 34 |
+
el.appendChild(q); el.appendChild(s); el.appendChild(res);
|
| 35 |
+
container.appendChild(el);
|
| 36 |
+
});
|
| 37 |
+
} catch (e) {
|
| 38 |
+
console.error('history fetch failed', e);
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
function escapeHtml(unsafe) {
|
| 43 |
+
return unsafe
|
| 44 |
+
.replace(/&/g, "&")
|
| 45 |
+
.replace(/</g, "<")
|
| 46 |
+
.replace(/>/g, ">")
|
| 47 |
+
.replace(/\"/g, """)
|
| 48 |
+
.replace(/'/g, "'");
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
async function sendQuery() {
|
| 52 |
+
const q = document.getElementById('query').value;
|
| 53 |
+
const top_k = parseInt(document.getElementById('top_k').value || '5', 10);
|
| 54 |
+
const body = { query: q, top_k };
|
| 55 |
+
try {
|
| 56 |
+
const r = await fetch('/query', {method: 'POST', headers: {'Content-Type':'application/json'}, body: JSON.stringify(body)});
|
| 57 |
+
const j = await r.json();
|
| 58 |
+
// show last response (summary is primary)
|
| 59 |
+
document.getElementById('last-response').style.display = 'block';
|
| 60 |
+
document.getElementById('resp-summary').textContent = j.summary || '';
|
| 61 |
+
// we no longer show raw JSON or example records by default
|
| 62 |
+
await refreshHistory();
|
| 63 |
+
} catch (e) {
|
| 64 |
+
alert('שגיאה בשליחת השאלה — בדוק את הקונסול');
|
| 65 |
+
console.error(e);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
async function clearHistory() {
|
| 70 |
+
try {
|
| 71 |
+
await fetch('/history/clear', {method: 'POST'});
|
| 72 |
+
await refreshHistory();
|
| 73 |
+
} catch (e) {
|
| 74 |
+
console.error('clear failed', e);
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
window.addEventListener('load', async () => {
|
| 79 |
+
document.getElementById('send').addEventListener('click', sendQuery);
|
| 80 |
+
document.getElementById('clear-history').addEventListener('click', clearHistory);
|
| 81 |
+
await checkServer();
|
| 82 |
+
await refreshHistory();
|
| 83 |
+
});
|
app/static/index.html
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
+
<html lang="he">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="utf-8" />
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 6 |
+
<title>Feedback RAG — Frontend</title>
|
| 7 |
+
<style>
|
| 8 |
+
body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Noto Sans', sans-serif; margin: 0; direction: rtl; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); min-height: 100vh; color: #0b2545; }
|
| 9 |
+
.container { max-width: 1000px; margin: 24px auto; padding: 24px; }
|
| 10 |
+
h1 { color: white; text-shadow: 0 2px 4px rgba(0,0,0,0.2); margin: 0 0 24px 0; }
|
| 11 |
+
header { display:flex; align-items:center; justify-content:space-between; gap:12px; margin-bottom: 0; }
|
| 12 |
+
textarea { width: 100%; height: 120px; font-size: 16px; padding:12px; border-radius:8px; border:2px solid #d0d7e6; font-family: inherit; }
|
| 13 |
+
textarea:focus { outline: none; border-color: #667eea; box-shadow: 0 0 0 3px rgba(102,126,234,0.1); }
|
| 14 |
+
input[type="number"] { padding: 8px; border-radius:6px; border:1px solid #d0d7e6; font-size:14px; }
|
| 15 |
+
button { padding: 10px 14px; margin-top: 8px; border-radius:6px; border: none; cursor: pointer; font-size:14px; font-weight:600; transition: all 0.2s; }
|
| 16 |
+
.primary { background: #0b63ff; color: white; }
|
| 17 |
+
.primary:hover { background: #0050cc; transform: translateY(-2px); box-shadow: 0 4px 12px rgba(11,99,255,0.3); }
|
| 18 |
+
.muted { background: #eef3ff; color: #0b2545; }
|
| 19 |
+
.muted:hover { background: #dde6ff; }
|
| 20 |
+
.card { border-radius: 12px; padding: 20px; margin-top: 16px; background: white; box-shadow: 0 10px 30px rgba(0,0,0,0.15); }
|
| 21 |
+
.summary { font-size: 16px; line-height: 1.7; color: #073763; white-space: pre-wrap; word-wrap: break-word; }
|
| 22 |
+
.result { white-space: pre-wrap; }
|
| 23 |
+
header .title { font-size: 20px; margin:0 }
|
| 24 |
+
.history-list { margin-top: 20px; }
|
| 25 |
+
.small { color: #666; font-size: 12px; }
|
| 26 |
+
.controls { display:flex; gap:12px; align-items:center; margin-top:12px; flex-wrap: wrap; }
|
| 27 |
+
label { display:block; margin-bottom:6px; color: #0b2545; font-weight:500; }
|
| 28 |
+
.response-badge { display: inline-block; background: #e8f5e9; color: #2e7d32; padding: 4px 8px; border-radius: 4px; font-size: 12px; margin-bottom: 12px; }
|
| 29 |
+
</style>
|
| 30 |
+
</head>
|
| 31 |
+
<body>
|
| 32 |
+
<div class="container">
|
| 33 |
+
<header>
|
| 34 |
+
<h1>Feedback RAG — ממשק</h1>
|
| 35 |
+
<div class="small">שרת: <span id="server-status">...בדיקה</span></div>
|
| 36 |
+
</header>
|
| 37 |
+
|
| 38 |
+
<section class="card">
|
| 39 |
+
<label for="query">שאלה</label>
|
| 40 |
+
<textarea id="query">תסווג את התלונות 5 סוגים</textarea>
|
| 41 |
+
<div>
|
| 42 |
+
<label for="top_k">מספר תוצאות (top_k)</label>
|
| 43 |
+
<input id="top_k" type="number" value="5" min="1" max="50" style="width:80px;" />
|
| 44 |
+
</div>
|
| 45 |
+
<div style="margin-top:8px;">
|
| 46 |
+
<label><input type="checkbox" id="show-sources" /> הצג דוגמאות (Sources)</label>
|
| 47 |
+
<span class="small" style="margin-left:12px;">ברירת מחדל: מוסתר — יוצג רק הסיכום האנליטי</span>
|
| 48 |
+
</div>
|
| 49 |
+
<div style="display:flex;gap:8px;margin-top:12px;">
|
| 50 |
+
<button id="send" class="primary">🔍 שאל</button>
|
| 51 |
+
<button id="clear-history" class="muted">🗑️ נקה היסטוריה</button>
|
| 52 |
+
</div>
|
| 53 |
+
</section>
|
| 54 |
+
|
| 55 |
+
<section id="last-response" class="card" style="display:none;">
|
| 56 |
+
<h3>✓ תגובה אנליטית</h3>
|
| 57 |
+
<div id="resp-summary" class="summary"></div>
|
| 58 |
+
</section>
|
| 59 |
+
|
| 60 |
+
<section class="card history-list">
|
| 61 |
+
<h3>היסטוריית שאלות</h3>
|
| 62 |
+
<div id="history"></div>
|
| 63 |
+
</section>
|
| 64 |
+
</div>
|
| 65 |
+
<script src="/static/app.js"></script>
|
| 66 |
+
</body>
|
| 67 |
+
</html>
|
scripts/smoke_check.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Simple smoke check that hits the running FastAPI server root and /query.
|
| 3 |
+
|
| 4 |
+
This uses only the standard library so it should work in the project's venv.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
import urllib.request
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_root() -> str:
|
| 14 |
+
with urllib.request.urlopen("http://127.0.0.1:8000/") as resp:
|
| 15 |
+
return resp.read().decode("utf-8")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def post_query(q: str):
|
| 19 |
+
data = json.dumps({"query": q, "top_k": 5}).encode("utf-8")
|
| 20 |
+
req = urllib.request.Request("http://127.0.0.1:8000/query", data=data, headers={"Content-Type": "application/json"})
|
| 21 |
+
with urllib.request.urlopen(req, timeout=30) as resp:
|
| 22 |
+
return json.load(resp)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def main() -> None:
|
| 26 |
+
print("Checking http://127.0.0.1:8000/ ...")
|
| 27 |
+
try:
|
| 28 |
+
root = get_root()
|
| 29 |
+
print("Root response length:", len(root))
|
| 30 |
+
except Exception as e:
|
| 31 |
+
print("Failed to GET root:", e)
|
| 32 |
+
return
|
| 33 |
+
|
| 34 |
+
sample_q = "מה הבעיות העיקריות שמשתמשים מציינים?"
|
| 35 |
+
print("Posting sample query to /query ...")
|
| 36 |
+
try:
|
| 37 |
+
resp = post_query(sample_q)
|
| 38 |
+
print("Query response keys:", list(resp.keys()))
|
| 39 |
+
print("Summary (truncated):\n", (resp.get("summary") or "(no summary)")[:800])
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print("Failed to POST /query:", e)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
if __name__ == "__main__":
|
| 45 |
+
main()
|
uvicorn.log
CHANGED
|
Binary files a/uvicorn.log and b/uvicorn.log differ
|
|
|