galbendavids commited on
Commit
2ebebf3
·
1 Parent(s): 75c53f5

UI/UX: RTL layout, dark mode, stable header logo, chat/input RTL, scrollbar, focus states; agent: retrieve→generate→end, final answer separator

Browse files
Files changed (4) hide show
  1. .gitignore +30 -0
  2. agent.py +9 -69
  3. app.py +125 -32
  4. rag_engine.py +42 -7
.gitignore ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Secrets – never commit to cloud
2
+ .env
3
+ .env.local
4
+ .env.*.local
5
+ *.token
6
+ .secrets
7
+ hf_token*
8
+
9
+ # Python
10
+ __pycache__/
11
+ *.py[cod]
12
+ *$py.class
13
+ *.so
14
+ .Python
15
+ venv/
16
+ .venv/
17
+ env/
18
+
19
+ # IDE / OS
20
+ .idea/
21
+ .vscode/
22
+ .DS_Store
23
+ *.swp
24
+ *.swo
25
+
26
+ # Build / cache
27
+ *.egg-info/
28
+ dist/
29
+ build/
30
+ .cache/
agent.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
- LangGraph agent: orchestrates RAG pipeline and verifies answer quality.
3
- If the planned answer is not good, the agent directs the pipeline toward correction (retry with feedback).
4
  """
5
 
6
  from typing import Optional, List, TypedDict, Literal
@@ -17,16 +17,12 @@ class AgentState(TypedDict, total=False):
17
  user_prompt: Optional[str]
18
  steps_log: List[str]
19
  draft_answer: Optional[str]
20
- is_good: bool
21
  feedback: Optional[str]
22
  iteration: int
23
 
24
 
25
- MAX_REFINE_ITERATIONS = 2
26
-
27
-
28
  def build_agent_graph(engine: RAGEngine):
29
- """Build the LangGraph: retrieve -> generate -> evaluate -> (end | generate with feedback)."""
30
 
31
  def retrieve(state: AgentState) -> dict:
32
  """Run RAG up to (not including) LLM. Fill refusal or prompts + steps_log."""
@@ -63,76 +59,19 @@ def build_agent_graph(engine: RAGEngine):
63
  steps_log.append("✅ Draft generated")
64
  return {"draft_answer": draft, "steps_log": steps_log}
65
 
66
- def evaluate(state: AgentState) -> dict:
67
- """Check if the answer is good. Set is_good and optionally feedback for refinement."""
68
- query = state["query"]
69
- draft = state.get("draft_answer") or ""
70
- steps_log = list(state.get("steps_log") or [])
71
- iteration = state.get("iteration", 0)
72
-
73
- # Error / timeout / rate limit responses are not "good" but we don't refine them
74
- if draft.startswith("⚠️") or draft.startswith("❌") or draft.startswith("⏱️"):
75
- return {"is_good": True, "steps_log": steps_log} # Treat as final
76
-
77
- steps_log.append("🔍 Evaluating answer quality...")
78
- engine.configure_api(state["api_key"])
79
- eval_prompt = f"""You are a quality checker. Given the user question and the assistant's answer, decide if the answer is good.
80
-
81
- User question: {query[:300]}
82
-
83
- Assistant answer: {draft[:1500]}
84
-
85
- Reply with exactly one of:
86
- - YES
87
- - NO: <one short line explaining what to improve>
88
-
89
- Reply:"""
90
- models = ["gemini-2.0-flash", "gemini-1.5-flash"]
91
- try:
92
- raw = engine._call_api_with_backoff(
93
- "You reply only with YES or NO: <feedback>. No other text.",
94
- eval_prompt,
95
- models,
96
- )
97
- except Exception:
98
- raw = "YES"
99
- raw = (raw or "").strip().upper()
100
- is_good = raw.startswith("YES") or "NO" not in raw[:10]
101
- feedback = ""
102
- if not is_good and "NO" in raw:
103
- idx = raw.find(":")
104
- if idx != -1:
105
- feedback = raw[idx + 1 :].strip()[:200]
106
- else:
107
- feedback = "Improve relevance and completeness."
108
-
109
- steps_log.append("✅ Good" if is_good else f"⚠️ Needs improvement: {feedback[:60]}...")
110
- return {
111
- "is_good": is_good,
112
- "feedback": feedback if not is_good else None,
113
- "iteration": iteration + 1,
114
- "steps_log": steps_log,
115
- }
116
-
117
  def route_after_retrieve(state: AgentState) -> Literal["end", "generate"]:
118
  if state.get("refusal"):
119
  return "end"
120
  return "generate"
121
 
122
- def route_after_evaluate(state: AgentState) -> Literal["end", "generate"]:
123
- if state.get("is_good") or (state.get("iteration") or 0) >= MAX_REFINE_ITERATIONS:
124
- return "end"
125
- return "generate"
126
-
127
  workflow = StateGraph(AgentState)
128
  workflow.add_node("retrieve", retrieve)
129
  workflow.add_node("generate", generate)
130
- workflow.add_node("evaluate", evaluate)
131
 
132
  workflow.set_entry_point("retrieve")
133
  workflow.add_conditional_edges("retrieve", route_after_retrieve, {"end": END, "generate": "generate"})
134
- workflow.add_edge("generate", "evaluate")
135
- workflow.add_conditional_edges("evaluate", route_after_evaluate, {"end": END, "generate": "generate"})
136
 
137
  return workflow.compile()
138
 
@@ -156,12 +95,11 @@ def run_stream(engine: RAGEngine, graph, query: str, api_key: str):
156
  text = f"{text}\n\n{body}"
157
  yield text
158
 
159
- # Final state: ensure we have a visible answer
160
  final_answer = (last_state.get("refusal") or last_state.get("draft_answer") or "").strip()
161
  steps_log = list(last_state.get("steps_log") or [])
162
  steps_log.append("✅ Done")
163
 
164
- # If no answer at all, show a clear fallback (user expects a verbal, detailed response)
165
  if not final_answer:
166
  final_answer = (
167
  "לא התקבלה תשובה מהמודל. ייתכן שנחסמה או שהבקשה ארכה מדי. "
@@ -173,4 +111,6 @@ def run_stream(engine: RAGEngine, graph, query: str, api_key: str):
173
  engine.response_cache[cache_key] = final_answer
174
  engine._maintain_conversation_history(query, final_answer)
175
 
176
- yield f"{chr(10).join(steps_log)}\n\n{final_answer}"
 
 
 
1
  """
2
+ LangGraph agent: orchestrates RAG pipeline (retrieve generate → end).
3
+ Single LLM call so the user always gets a final verbal answer that aggregates the retrieved context.
4
  """
5
 
6
  from typing import Optional, List, TypedDict, Literal
 
17
  user_prompt: Optional[str]
18
  steps_log: List[str]
19
  draft_answer: Optional[str]
 
20
  feedback: Optional[str]
21
  iteration: int
22
 
23
 
 
 
 
24
  def build_agent_graph(engine: RAGEngine):
25
+ """Build the LangGraph: retrieve generate end (one LLM call for final answer)."""
26
 
27
  def retrieve(state: AgentState) -> dict:
28
  """Run RAG up to (not including) LLM. Fill refusal or prompts + steps_log."""
 
59
  steps_log.append("✅ Draft generated")
60
  return {"draft_answer": draft, "steps_log": steps_log}
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def route_after_retrieve(state: AgentState) -> Literal["end", "generate"]:
63
  if state.get("refusal"):
64
  return "end"
65
  return "generate"
66
 
67
+ # Pipeline: retrieve generate → end (no evaluate/refine – one LLM call so user always gets final answer)
 
 
 
 
68
  workflow = StateGraph(AgentState)
69
  workflow.add_node("retrieve", retrieve)
70
  workflow.add_node("generate", generate)
 
71
 
72
  workflow.set_entry_point("retrieve")
73
  workflow.add_conditional_edges("retrieve", route_after_retrieve, {"end": END, "generate": "generate"})
74
+ workflow.add_edge("generate", END)
 
75
 
76
  return workflow.compile()
77
 
 
95
  text = f"{text}\n\n{body}"
96
  yield text
97
 
98
+ # Final state: ensure user always sees the verbal answer (aggregated from pipeline)
99
  final_answer = (last_state.get("refusal") or last_state.get("draft_answer") or "").strip()
100
  steps_log = list(last_state.get("steps_log") or [])
101
  steps_log.append("✅ Done")
102
 
 
103
  if not final_answer:
104
  final_answer = (
105
  "לא התקבלה תשובה מהמודל. ייתכן שנחסמה או שהבקשה ארכה מדי. "
 
111
  engine.response_cache[cache_key] = final_answer
112
  engine._maintain_conversation_history(query, final_answer)
113
 
114
+ # One final yield: steps + clear separator + the verbal answer (so user always sees it)
115
+ steps_text = chr(10).join(steps_log)
116
+ yield f"{steps_text}\n\n--- התשובה ---\n\n{final_answer}"
app.py CHANGED
@@ -102,7 +102,7 @@ def create_comparison_start():
102
  """Generate a comparison question template"""
103
  return "Compare between the cars: "
104
 
105
- # Premium, inviting UI – automotive vibe with warmth
106
  custom_css = """
107
  @import url('https://fonts.googleapis.com/css2?family=Outfit:wght@400;500;600;700;800&family=Heebo:wght@400;500;600;700&display=swap');
108
 
@@ -127,17 +127,40 @@ custom_css = """
127
  --font-body: 'Heebo', -apple-system, sans-serif;
128
  }
129
 
130
- body.dark, [data-theme="dark"], .dark {
131
- --primary: #ea580c;
132
- --primary-hover: #f97316;
133
- --primary-soft: rgba(234, 88, 12, 0.2);
 
 
134
  --text-primary: #fafaf9;
135
  --text-secondary: #d6d3d1;
136
  --text-muted: #a8a29e;
137
- --bg-app: linear-gradient(160deg, #1c1917 0%, #292524 100%);
138
- --bg-surface: #292524;
139
- --bg-chat: #1c1917;
140
  --border: #44403c;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  }
142
 
143
  * {
@@ -153,9 +176,11 @@ body {
153
  max-width: 900px !important;
154
  margin: 0 auto !important;
155
  padding: 24px 16px 40px !important;
 
 
156
  }
157
 
158
- /* Header – hero style */
159
  .header-section {
160
  background: var(--bg-surface) !important;
161
  border-radius: var(--radius) !important;
@@ -164,6 +189,23 @@ body {
164
  text-align: center !important;
165
  box-shadow: var(--shadow-md) !important;
166
  border: 1px solid var(--border) !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  }
168
 
169
  .header-section h1 {
@@ -199,13 +241,15 @@ body {
199
  color: var(--text-muted) !important;
200
  }
201
 
202
- /* Chat container card with shadow */
203
  .chat-container {
204
  background: var(--bg-surface) !important;
205
  border: 1px solid var(--border) !important;
206
  border-radius: var(--radius) !important;
207
  overflow: hidden !important;
208
  box-shadow: var(--shadow-lg) !important;
 
 
209
  }
210
 
211
  .gradio-chatbot {
@@ -213,13 +257,34 @@ body {
213
  padding: 24px !important;
214
  height: 520px !important;
215
  border-radius: 0 !important;
 
216
  }
217
 
218
  [data-testid="chatbot"] {
219
  background: var(--bg-chat) !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  }
221
 
222
- /* Message bubbles */
223
  .message {
224
  padding: 14px 0 !important;
225
  margin: 0 !important;
@@ -229,23 +294,24 @@ body {
229
  border: none !important;
230
  background: transparent !important;
231
  border-radius: 0 !important;
 
 
232
  }
233
 
234
  .message.user {
235
- margin: 12px 0 !important;
236
  padding: 14px 18px !important;
237
  text-align: right !important;
238
  background: linear-gradient(135deg, var(--primary-soft) 0%, rgba(13, 148, 136, 0.08) 100%) !important;
239
- border-radius: var(--radius-sm) var(--radius-sm) 4px var(--radius-sm) !important;
240
- border: 1px solid rgba(194, 65, 12, 0.15) !important;
241
  max-width: 85% !important;
242
- margin-left: auto !important;
243
  }
244
 
245
  .message.assistant {
246
- margin: 12px 0 !important;
247
  padding: 16px 18px !important;
248
- text-align: left !important;
249
  background: var(--bg-surface) !important;
250
  border-radius: 4px var(--radius-sm) var(--radius-sm) var(--radius-sm) !important;
251
  border: 1px solid var(--border) !important;
@@ -258,6 +324,8 @@ body {
258
  background: transparent !important;
259
  font-size: 15px !important;
260
  line-height: 1.7 !important;
 
 
261
  }
262
 
263
  .message a {
@@ -277,14 +345,25 @@ body {
277
  font-family: var(--font-head) !important;
278
  }
279
 
280
- /* Input area pill style */
 
 
 
 
 
 
 
 
 
 
281
  .gr-box.gr-input-box {
282
  background: var(--bg-surface) !important;
283
  border: 1px solid var(--border) !important;
284
  border-radius: 999px !important;
285
  margin: 12px 16px 16px !important;
286
- padding: 8px 8px 8px 20px !important;
287
  box-shadow: var(--shadow-sm) !important;
 
288
  }
289
 
290
  .gr-textbox textarea {
@@ -295,6 +374,8 @@ body {
295
  font-size: 15px !important;
296
  min-height: 48px !important;
297
  border-radius: 0 !important;
 
 
298
  }
299
 
300
  .gr-textbox textarea:focus {
@@ -306,7 +387,7 @@ body {
306
  color: var(--text-muted) !important;
307
  }
308
 
309
- /* Primary button – Send */
310
  .gr-button.primary {
311
  background: linear-gradient(135deg, var(--primary) 0%, var(--primary-hover) 100%) !important;
312
  color: white !important;
@@ -323,9 +404,18 @@ body {
323
  box-shadow: 0 4px 14px rgba(194, 65, 12, 0.4) !important;
324
  }
325
 
326
- /* Example chips */
327
- .gr-examples .gr-sample-button,
328
- .gr-sample-button {
 
 
 
 
 
 
 
 
 
329
  background: var(--bg-surface) !important;
330
  border: 1px solid var(--border) !important;
331
  color: var(--text-primary) !important;
@@ -333,36 +423,35 @@ body {
333
  padding: 10px 18px !important;
334
  font-size: 0.9rem !important;
335
  transition: all 0.2s ease !important;
 
336
  }
337
 
338
- .gr-examples .gr-sample-button:hover,
339
- .gr-sample-button:hover {
340
  border-color: var(--primary) !important;
341
  color: var(--primary) !important;
342
  background: var(--primary-soft) !important;
343
  }
344
 
345
- /* Stop button */
346
- .gr-button-secondary,
347
- .gr-button:not(.primary) {
348
  border-radius: var(--radius-sm) !important;
349
  font-weight: 500 !important;
350
  }
351
 
352
- /* Tables in answers */
353
  table {
354
  border-collapse: collapse !important;
355
  width: 100% !important;
356
  margin: 14px 0 !important;
357
  border-radius: var(--radius-sm) !important;
358
  overflow: hidden !important;
 
359
  }
360
 
361
  table th {
362
  background: var(--primary) !important;
363
  color: white !important;
364
  padding: 12px 14px !important;
365
- text-align: left !important;
366
  font-weight: 600 !important;
367
  }
368
 
@@ -370,6 +459,7 @@ table td {
370
  padding: 12px 14px !important;
371
  border-bottom: 1px solid var(--border) !important;
372
  color: var(--text-primary) !important;
 
373
  }
374
 
375
  /* Code */
@@ -388,6 +478,8 @@ pre {
388
  padding: 14px !important;
389
  border-radius: var(--radius-sm) !important;
390
  overflow-x: auto !important;
 
 
391
  }
392
 
393
  pre code {
@@ -420,8 +512,9 @@ with gr.Blocks(theme=theme, css=custom_css, title="CarsRUS – השוואת רכ
420
 
421
  with gr.Column(elem_classes="header-section"):
422
  gr.HTML("""
423
- <div>
424
- <h1>🚗 CarsRUS</h1>
 
425
  <p class="header-tagline">השוואת רכבים והמלצות מבוססות כתבות מ־auto.co.il</p>
426
  <p class="header-note">
427
  שאל על דגם בודד או בקש השוואה בין שנ�� דגמים. התשובות מבוססות רק על כתבות מבסיס הידע המקומי.
 
102
  """Generate a comparison question template"""
103
  return "Compare between the cars: "
104
 
105
+ # Premium UI – RTL-first, dark mode, automotive vibe
106
  custom_css = """
107
  @import url('https://fonts.googleapis.com/css2?family=Outfit:wght@400;500;600;700;800&family=Heebo:wght@400;500;600;700&display=swap');
108
 
 
127
  --font-body: 'Heebo', -apple-system, sans-serif;
128
  }
129
 
130
+ /* Dark mode – full coverage so UI stays readable */
131
+ body.dark, [data-theme="dark"], .dark, html.dark {
132
+ --primary: #f97316;
133
+ --primary-hover: #fb923c;
134
+ --primary-soft: rgba(249, 115, 22, 0.2);
135
+ --accent: #2dd4bf;
136
  --text-primary: #fafaf9;
137
  --text-secondary: #d6d3d1;
138
  --text-muted: #a8a29e;
139
+ --bg-app: #0c0a09;
140
+ --bg-surface: #1c1917;
141
+ --bg-chat: #141211;
142
  --border: #44403c;
143
+ --shadow-sm: 0 1px 2px rgba(0,0,0,0.3);
144
+ --shadow-md: 0 4px 12px rgba(0,0,0,0.4);
145
+ --shadow-lg: 0 12px 40px rgba(0,0,0,0.5);
146
+ }
147
+
148
+ body.dark .header-section, .dark .header-section,
149
+ body.dark .chat-container, .dark .chat-container {
150
+ background: var(--bg-surface) !important;
151
+ border-color: var(--border) !important;
152
+ }
153
+
154
+ body.dark .gradio-chatbot, .dark .gradio-chatbot,
155
+ body.dark [data-testid="chatbot"], .dark [data-testid="chatbot"] {
156
+ background: var(--bg-chat) !important;
157
+ }
158
+
159
+ body.dark .gr-box, .dark .gr-box,
160
+ body.dark .gr-textbox textarea, .dark .gr-textbox textarea {
161
+ background: var(--bg-surface) !important;
162
+ border-color: var(--border) !important;
163
+ color: var(--text-primary) !important;
164
  }
165
 
166
  * {
 
176
  max-width: 900px !important;
177
  margin: 0 auto !important;
178
  padding: 24px 16px 40px !important;
179
+ direction: rtl !important;
180
+ text-align: right !important;
181
  }
182
 
183
+ /* Header – logo area stable (no disappearing image) */
184
  .header-section {
185
  background: var(--bg-surface) !important;
186
  border-radius: var(--radius) !important;
 
189
  text-align: center !important;
190
  box-shadow: var(--shadow-md) !important;
191
  border: 1px solid var(--border) !important;
192
+ direction: rtl !important;
193
+ }
194
+
195
+ .header-logo {
196
+ display: inline-flex !important;
197
+ align-items: center !important;
198
+ justify-content: center !important;
199
+ width: 64px !important;
200
+ height: 64px !important;
201
+ min-width: 64px !important;
202
+ min-height: 64px !important;
203
+ font-size: 2.5rem !important;
204
+ line-height: 1 !important;
205
+ background: var(--primary-soft) !important;
206
+ border-radius: var(--radius) !important;
207
+ margin-bottom: 12px !important;
208
+ border: 1px solid var(--border) !important;
209
  }
210
 
211
  .header-section h1 {
 
241
  color: var(--text-muted) !important;
242
  }
243
 
244
+ /* Chat – RTL layout */
245
  .chat-container {
246
  background: var(--bg-surface) !important;
247
  border: 1px solid var(--border) !important;
248
  border-radius: var(--radius) !important;
249
  overflow: hidden !important;
250
  box-shadow: var(--shadow-lg) !important;
251
+ direction: rtl !important;
252
+ text-align: right !important;
253
  }
254
 
255
  .gradio-chatbot {
 
257
  padding: 24px !important;
258
  height: 520px !important;
259
  border-radius: 0 !important;
260
+ direction: rtl !important;
261
  }
262
 
263
  [data-testid="chatbot"] {
264
  background: var(--bg-chat) !important;
265
+ direction: rtl !important;
266
+ }
267
+
268
+ /* Scrollbar – subtle in light/dark */
269
+ .gradio-chatbot .overflow-y-auto,
270
+ [data-testid="chatbot"] .overflow-y-auto {
271
+ direction: rtl !important;
272
+ }
273
+
274
+ .gradio-chatbot::-webkit-scrollbar, [data-testid="chatbot"]::-webkit-scrollbar {
275
+ width: 8px !important;
276
+ }
277
+
278
+ .gradio-chatbot::-webkit-scrollbar-track, [data-testid="chatbot"]::-webkit-scrollbar-track {
279
+ background: var(--bg-chat) !important;
280
+ }
281
+
282
+ .gradio-chatbot::-webkit-scrollbar-thumb, [data-testid="chatbot"]::-webkit-scrollbar-thumb {
283
+ background: var(--border) !important;
284
+ border-radius: 4px !important;
285
  }
286
 
287
+ /* Message bubbles – RTL */
288
  .message {
289
  padding: 14px 0 !important;
290
  margin: 0 !important;
 
294
  border: none !important;
295
  background: transparent !important;
296
  border-radius: 0 !important;
297
+ direction: rtl !important;
298
+ text-align: right !important;
299
  }
300
 
301
  .message.user {
302
+ margin: 12px 0 12px auto !important;
303
  padding: 14px 18px !important;
304
  text-align: right !important;
305
  background: linear-gradient(135deg, var(--primary-soft) 0%, rgba(13, 148, 136, 0.08) 100%) !important;
306
+ border-radius: var(--radius-sm) 4px var(--radius-sm) var(--radius-sm) !important;
307
+ border: 1px solid rgba(194, 65, 12, 0.2) !important;
308
  max-width: 85% !important;
 
309
  }
310
 
311
  .message.assistant {
312
+ margin: 12px auto 12px 0 !important;
313
  padding: 16px 18px !important;
314
+ text-align: right !important;
315
  background: var(--bg-surface) !important;
316
  border-radius: 4px var(--radius-sm) var(--radius-sm) var(--radius-sm) !important;
317
  border: 1px solid var(--border) !important;
 
324
  background: transparent !important;
325
  font-size: 15px !important;
326
  line-height: 1.7 !important;
327
+ direction: rtl !important;
328
+ text-align: right !important;
329
  }
330
 
331
  .message a {
 
345
  font-family: var(--font-head) !important;
346
  }
347
 
348
+ /* Separator "--- התשובה ---" inside message */
349
+ .message mark, .message [data-sep] {
350
+ display: block !important;
351
+ margin: 12px 0 !important;
352
+ padding: 8px 0 !important;
353
+ color: var(--primary) !important;
354
+ font-weight: 600 !important;
355
+ border-bottom: 1px solid var(--border) !important;
356
+ }
357
+
358
+ /* Input – RTL pill */
359
  .gr-box.gr-input-box {
360
  background: var(--bg-surface) !important;
361
  border: 1px solid var(--border) !important;
362
  border-radius: 999px !important;
363
  margin: 12px 16px 16px !important;
364
+ padding: 8px 20px 8px 8px !important;
365
  box-shadow: var(--shadow-sm) !important;
366
+ direction: rtl !important;
367
  }
368
 
369
  .gr-textbox textarea {
 
374
  font-size: 15px !important;
375
  min-height: 48px !important;
376
  border-radius: 0 !important;
377
+ direction: rtl !important;
378
+ text-align: right !important;
379
  }
380
 
381
  .gr-textbox textarea:focus {
 
387
  color: var(--text-muted) !important;
388
  }
389
 
390
+ /* Buttons */
391
  .gr-button.primary {
392
  background: linear-gradient(135deg, var(--primary) 0%, var(--primary-hover) 100%) !important;
393
  color: white !important;
 
404
  box-shadow: 0 4px 14px rgba(194, 65, 12, 0.4) !important;
405
  }
406
 
407
+ .gr-button:focus-visible, .gr-textbox textarea:focus-visible {
408
+ outline: 2px solid var(--primary) !important;
409
+ outline-offset: 2px !important;
410
+ }
411
+
412
+ /* Example chips – RTL */
413
+ .gr-examples, .gr-samples {
414
+ direction: rtl !important;
415
+ text-align: right !important;
416
+ }
417
+
418
+ .gr-examples .gr-sample-button, .gr-sample-button {
419
  background: var(--bg-surface) !important;
420
  border: 1px solid var(--border) !important;
421
  color: var(--text-primary) !important;
 
423
  padding: 10px 18px !important;
424
  font-size: 0.9rem !important;
425
  transition: all 0.2s ease !important;
426
+ direction: rtl !important;
427
  }
428
 
429
+ .gr-examples .gr-sample-button:hover, .gr-sample-button:hover {
 
430
  border-color: var(--primary) !important;
431
  color: var(--primary) !important;
432
  background: var(--primary-soft) !important;
433
  }
434
 
435
+ .gr-button-secondary, .gr-button:not(.primary) {
 
 
436
  border-radius: var(--radius-sm) !important;
437
  font-weight: 500 !important;
438
  }
439
 
440
+ /* Tables RTL */
441
  table {
442
  border-collapse: collapse !important;
443
  width: 100% !important;
444
  margin: 14px 0 !important;
445
  border-radius: var(--radius-sm) !important;
446
  overflow: hidden !important;
447
+ direction: rtl !important;
448
  }
449
 
450
  table th {
451
  background: var(--primary) !important;
452
  color: white !important;
453
  padding: 12px 14px !important;
454
+ text-align: right !important;
455
  font-weight: 600 !important;
456
  }
457
 
 
459
  padding: 12px 14px !important;
460
  border-bottom: 1px solid var(--border) !important;
461
  color: var(--text-primary) !important;
462
+ text-align: right !important;
463
  }
464
 
465
  /* Code */
 
478
  padding: 14px !important;
479
  border-radius: var(--radius-sm) !important;
480
  overflow-x: auto !important;
481
+ direction: ltr !important;
482
+ text-align: left !important;
483
  }
484
 
485
  pre code {
 
512
 
513
  with gr.Column(elem_classes="header-section"):
514
  gr.HTML("""
515
+ <div dir="rtl" style="text-align: center;">
516
+ <div class="header-logo" aria-hidden="true">🚗</div>
517
+ <h1>CarsRUS</h1>
518
  <p class="header-tagline">השוואת רכבים והמלצות מבוססות כתבות מ־auto.co.il</p>
519
  <p class="header-note">
520
  שאל על דגם בודד או בקש השוואה בין שנ�� דגמים. התשובות מבוססות רק על כתבות מבסיס הידע המקומי.
rag_engine.py CHANGED
@@ -793,10 +793,8 @@ class RAGEngine:
793
  steps_log.append("🔍 Normalizing car names...")
794
  canonical = self._normalize_car_name(query)
795
  if canonical:
796
- steps_log.append(f"✅ Recognized canonical id: {canonical}")
797
  search_query = canonical
798
  else:
799
- steps_log.append("ℹ️ No canonical car found; using full query for search")
800
  search_query = query
801
 
802
  is_comparison = self._is_comparison_question(query)
@@ -806,6 +804,20 @@ class RAGEngine:
806
  steps_log.append("📋 Detected: single-model question (rule-based)")
807
 
808
  ordered_supported = self._get_ordered_supported_canonicals_in_text(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
809
  if is_comparison:
810
  if len(ordered_supported) == 0:
811
  return (self._unsupported_car_refusal(query, is_comparison=True), None, None, steps_log)
@@ -921,10 +933,8 @@ Based on the context above, provide a clear answer that aggregates the informati
921
  processing_steps.append("🔍 Normalizing car names...")
922
  canonical = self._normalize_car_name(query)
923
  if canonical:
924
- processing_steps.append(f"✅ Recognized canonical id: {canonical}")
925
  search_query = canonical
926
  else:
927
- processing_steps.append("ℹ️ No canonical car found; using full query for search")
928
  search_query = query
929
 
930
  # עצה 7: זיהוי שאלות השוואתיות
@@ -932,6 +942,19 @@ Based on the context above, provide a clear answer that aggregates the informati
932
 
933
  # Policy guard: do not recommend models without auto.co.il articles in our KB
934
  ordered_supported = self._get_ordered_supported_canonicals_in_text(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
935
  if is_comparison:
936
  if len(ordered_supported) == 0:
937
  return self._unsupported_car_refusal(query, is_comparison=True)
@@ -1072,12 +1095,9 @@ Based on the context above, provide a clear answer that aggregates the informati
1072
 
1073
  canonical = self._normalize_car_name(query)
1074
  if canonical:
1075
- processing_steps.append(f"✅ Recognized canonical id: {canonical}")
1076
  search_query = canonical
1077
  else:
1078
- processing_steps.append("ℹ️ No canonical car found; using full query for search")
1079
  search_query = query
1080
- yield steps_text()
1081
 
1082
  # --- Step 2: Question type (rule-based regex/keywords, no LLM) ---
1083
  is_comparison = self._is_comparison_question(query)
@@ -1088,6 +1108,21 @@ Based on the context above, provide a clear answer that aggregates the informati
1088
  yield steps_text()
1089
 
1090
  ordered_supported = self._get_ordered_supported_canonicals_in_text(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1091
  if is_comparison:
1092
  if len(ordered_supported) == 0:
1093
  yield self._unsupported_car_refusal(query, is_comparison=True)
 
793
  steps_log.append("🔍 Normalizing car names...")
794
  canonical = self._normalize_car_name(query)
795
  if canonical:
 
796
  search_query = canonical
797
  else:
 
798
  search_query = query
799
 
800
  is_comparison = self._is_comparison_question(query)
 
804
  steps_log.append("📋 Detected: single-model question (rule-based)")
805
 
806
  ordered_supported = self._get_ordered_supported_canonicals_in_text(query)
807
+ # Show user which cars were identified (for comparison: both; for single: one)
808
+ if is_comparison:
809
+ if len(ordered_supported) >= 2:
810
+ names = ", ".join(self.CANONICAL_TO_DISPLAY.get(c, c) for c in ordered_supported[:2])
811
+ steps_log.append(f"✅ זיהוי דגמים להשוואה: {names}")
812
+ elif len(ordered_supported) == 1:
813
+ one_display = self.CANONICAL_TO_DISPLAY.get(ordered_supported[0], ordered_supported[0])
814
+ steps_log.append(f"✅ זיהוי דגם אחד (השני לא ברשימה): {one_display}")
815
+ else:
816
+ if canonical:
817
+ steps_log.append(f"✅ Recognized canonical id: {canonical}")
818
+ elif not ordered_supported:
819
+ steps_log.append("ℹ️ No canonical car found; using full query for search")
820
+
821
  if is_comparison:
822
  if len(ordered_supported) == 0:
823
  return (self._unsupported_car_refusal(query, is_comparison=True), None, None, steps_log)
 
933
  processing_steps.append("🔍 Normalizing car names...")
934
  canonical = self._normalize_car_name(query)
935
  if canonical:
 
936
  search_query = canonical
937
  else:
 
938
  search_query = query
939
 
940
  # עצה 7: זיהוי שאלות השוואתיות
 
942
 
943
  # Policy guard: do not recommend models without auto.co.il articles in our KB
944
  ordered_supported = self._get_ordered_supported_canonicals_in_text(query)
945
+ # Show user which cars were identified (for comparison: both; for single: one)
946
+ if is_comparison:
947
+ if len(ordered_supported) >= 2:
948
+ names = ", ".join(self.CANONICAL_TO_DISPLAY.get(c, c) for c in ordered_supported[:2])
949
+ processing_steps.append(f"✅ זיהוי דגמים להשוואה: {names}")
950
+ elif len(ordered_supported) == 1:
951
+ one_display = self.CANONICAL_TO_DISPLAY.get(ordered_supported[0], ordered_supported[0])
952
+ processing_steps.append(f"✅ זיהוי דגם אחד (השני לא ברשימה): {one_display}")
953
+ else:
954
+ if canonical:
955
+ processing_steps.append(f"✅ Recognized canonical id: {canonical}")
956
+ else:
957
+ processing_steps.append("ℹ️ No canonical car found; using full query for search")
958
  if is_comparison:
959
  if len(ordered_supported) == 0:
960
  return self._unsupported_car_refusal(query, is_comparison=True)
 
1095
 
1096
  canonical = self._normalize_car_name(query)
1097
  if canonical:
 
1098
  search_query = canonical
1099
  else:
 
1100
  search_query = query
 
1101
 
1102
  # --- Step 2: Question type (rule-based regex/keywords, no LLM) ---
1103
  is_comparison = self._is_comparison_question(query)
 
1108
  yield steps_text()
1109
 
1110
  ordered_supported = self._get_ordered_supported_canonicals_in_text(query)
1111
+ # Show user which cars were identified (for comparison: both; for single: one)
1112
+ if is_comparison:
1113
+ if len(ordered_supported) >= 2:
1114
+ names = ", ".join(self.CANONICAL_TO_DISPLAY.get(c, c) for c in ordered_supported[:2])
1115
+ processing_steps.append(f"✅ זיהוי דגמים להשוואה: {names}")
1116
+ elif len(ordered_supported) == 1:
1117
+ one_display = self.CANONICAL_TO_DISPLAY.get(ordered_supported[0], ordered_supported[0])
1118
+ processing_steps.append(f"✅ זיהוי דגם אחד (השני לא ברשימה): {one_display}")
1119
+ else:
1120
+ if canonical:
1121
+ processing_steps.append(f"✅ Recognized canonical id: {canonical}")
1122
+ else:
1123
+ processing_steps.append("ℹ️ No canonical car found; using full query for search")
1124
+ yield steps_text()
1125
+
1126
  if is_comparison:
1127
  if len(ordered_supported) == 0:
1128
  yield self._unsupported_car_refusal(query, is_comparison=True)