Alleinzellgaenger commited on
Commit
49058ae
·
2 Parent(s): 450a52b d82d893

Merge branch 'dev' into main

Browse files
backend/app.py CHANGED
@@ -1,13 +1,14 @@
1
  from fastapi import FastAPI, File, UploadFile, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from fastapi.staticfiles import StaticFiles
4
- from fastapi.responses import FileResponse
5
  import os
6
  import tempfile
7
  from dotenv import load_dotenv
8
  from pydantic import BaseModel
9
  from typing import Optional, List
10
  import anthropic
 
11
 
12
  # Load environment variables
13
  load_dotenv()
@@ -34,14 +35,21 @@ class ChatMessage(BaseModel):
34
 
35
  class ChatRequest(BaseModel):
36
  messages: List[ChatMessage]
37
- chunk: Optional[str] = None
 
 
 
38
  document: Optional[str] = None
39
 
40
  @app.post("/api/chat")
41
  async def chat_endpoint(request: ChatRequest):
42
- print(f"💬 Received chat with {len(request.messages)} messages")
43
- # Use provided chunk and document, or fallback to hardcoded content
44
- chunk = request.chunk or "No specific chunk provided"
 
 
 
 
45
  document = request.document or """
46
  # Auswertung Versuch F44: Zeeman Effekt
47
  Dominic Holst, Moritz Pfau
@@ -163,25 +171,54 @@ async def chat_endpoint(request: ChatRequest):
163
 
164
  *Figure 9: Cadmium rote Linie*
165
  """
166
- # Create system prompt for research paper tutor
167
- system_prompt = f"""
168
- You are PaperMentor, an expert academic tutor. Your purpose is to guide a user to a deep, phenomenological understanding of an academic paper.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- The user's primary goal is to: "phänomenologisch verstehen, was passiert, was beobachtet wurde und warum das so ist, mit wenig Fokus auf Formeln, sondern Fokus auf intuitivem Verständnis und dem experimentellen Ansatz." (phenomenologically understand what is happening, what was observed, and why, with little focus on formulas but a strong focus on intuitive understanding and the experimental approach).
171
 
172
- Your entire interaction must be guided by this goal.
173
 
174
- You will be given a specific chunk of the paper to discuss, as well as the full document for context.
175
 
176
- ---
177
- Current Chunk:
178
- {chunk}
179
- ---
180
- Full Document for Context:
181
- {document}
182
- ---
183
 
184
- Your interaction must follow this specific conversational flow:
185
 
186
  1. **Greeting and Contextualization:**
187
  * Begin with a friendly greeting.
@@ -226,8 +263,12 @@ async def chat_endpoint(request: ChatRequest):
226
  for msg in request.messages
227
  if msg.role in ["user", "assistant"]
228
  ]
 
229
  if not any(msg["role"] == "user" for msg in anthropic_messages):
230
- return {"role": "assistant", "content": "I didn't receive your message. Could you please ask again?"}
 
 
 
231
 
232
  print("🤖 Calling Claude for chat response...")
233
  response = client.messages.create(
@@ -235,13 +276,9 @@ async def chat_endpoint(request: ChatRequest):
235
  max_tokens=10000,
236
  system=system_prompt, # system prompt here
237
  messages=anthropic_messages,
238
- thinking={
239
- "type": "enabled",
240
- "budget_tokens": 5000
241
- },
242
  )
243
 
244
- response_text = response.content[1].text
245
  print(f"✅ Received response from Claude: {response_text[:100]}...")
246
  return {"role": "assistant", "content": response_text}
247
 
@@ -279,6 +316,269 @@ async def upload_pdf(file: UploadFile = File(...)):
279
  print(f"❌ Error uploading PDF: {e}")
280
  raise HTTPException(status_code=500, detail=f"PDF upload error: {str(e)}")
281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
  # Mount static files for production deployment
283
  frontend_path = os.path.join(os.path.dirname(__file__), "..", "frontend")
284
  assets_path = os.path.join(frontend_path, "assets")
 
1
  from fastapi import FastAPI, File, UploadFile, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from fastapi.staticfiles import StaticFiles
4
+ from fastapi.responses import FileResponse, StreamingResponse
5
  import os
6
  import tempfile
7
  from dotenv import load_dotenv
8
  from pydantic import BaseModel
9
  from typing import Optional, List
10
  import anthropic
11
+ import json
12
 
13
  # Load environment variables
14
  load_dotenv()
 
35
 
36
  class ChatRequest(BaseModel):
37
  messages: List[ChatMessage]
38
+ chunk: Optional[str] = None # Legacy support
39
+ currentChunk: Optional[str] = None
40
+ nextChunk: Optional[str] = None
41
+ action: Optional[str] = None # 'skip', 'understood', or None
42
  document: Optional[str] = None
43
 
44
  @app.post("/api/chat")
45
  async def chat_endpoint(request: ChatRequest):
46
+ print(f"💬 Received chat with {len(request.messages)} messages, action: {request.action}")
47
+
48
+ # Use new format if available, otherwise fall back to legacy
49
+ current_chunk = request.currentChunk or request.chunk or "No specific chunk provided"
50
+ next_chunk = request.nextChunk or ""
51
+ action = request.action
52
+
53
  document = request.document or """
54
  # Auswertung Versuch F44: Zeeman Effekt
55
  Dominic Holst, Moritz Pfau
 
171
 
172
  *Figure 9: Cadmium rote Linie*
173
  """
174
+ # Create system prompt for research paper tutor with transition support
175
+ is_transition = action in ['skip', 'understood']
176
+
177
+ if is_transition:
178
+ system_prompt = f"""
179
+ You are PaperMentor, an expert academic tutor guiding the user through a continuous learning journey of an academic paper.
180
+
181
+ The user has just {action} the previous section and is transitioning to a new topic. This is part of a continuous conversation where you maintain context and adapt based on the user's actions.
182
+
183
+ User's Action: {action}
184
+
185
+ Previous Section:
186
+ {current_chunk}
187
+
188
+ New Section to Introduce:
189
+ {next_chunk}
190
+
191
+ Full Document for Context:
192
+ {document}
193
+
194
+ Your response should:
195
+ 1. **Acknowledge the transition**: Briefly reference their choice to {action} the previous section
196
+ 2. **Provide smooth continuity**: Connect the previous section to this new one naturally
197
+ 3. **Introduce the new section**: Present the new topic with enthusiasm and context
198
+ 4. **Adapt your approach**: If they skipped, perhaps adjust to be more engaging. If they understood, acknowledge their progress
199
+ 5. **Begin new exploration**: Start the 3-question sequence for this new section
200
+
201
+ Maintain the same conversational style and focus on phenomenological understanding.
202
+ """
203
+ else:
204
+ system_prompt = f"""
205
+ You are PaperMentor, an expert academic tutor. Your purpose is to guide a user to a deep, phenomenological understanding of an academic paper.
206
 
207
+ The user's primary goal is to: "phänomenologisch verstehen, was passiert, was beobachtet wurde und warum das so ist, mit wenig Fokus auf Formeln, sondern Fokus auf intuitivem Verständnis und dem experimentellen Ansatz." (phenomenologically understand what is happening, what was observed, and why, with little focus on formulas but a strong focus on intuitive understanding and the experimental approach).
208
 
209
+ Your entire interaction must be guided by this goal.
210
 
211
+ You will be given a specific chunk of the paper to discuss, as well as the full document for context.
212
 
213
+ ---
214
+ Current Chunk:
215
+ {current_chunk}
216
+ ---
217
+ Full Document for Context:
218
+ {document}
219
+ ---
220
 
221
+ Your interaction must follow this specific conversational flow:
222
 
223
  1. **Greeting and Contextualization:**
224
  * Begin with a friendly greeting.
 
263
  for msg in request.messages
264
  if msg.role in ["user", "assistant"]
265
  ]
266
+ # For transitions, add a dummy user message to trigger Claude response
267
  if not any(msg["role"] == "user" for msg in anthropic_messages):
268
+ if is_transition:
269
+ anthropic_messages.append({"role": "user", "content": "Please continue to the next section."})
270
+ else:
271
+ return {"role": "assistant", "content": "I didn't receive your message. Could you please ask again?"}
272
 
273
  print("🤖 Calling Claude for chat response...")
274
  response = client.messages.create(
 
276
  max_tokens=10000,
277
  system=system_prompt, # system prompt here
278
  messages=anthropic_messages,
 
 
 
 
279
  )
280
 
281
+ response_text = response.content[0].text
282
  print(f"✅ Received response from Claude: {response_text[:100]}...")
283
  return {"role": "assistant", "content": response_text}
284
 
 
316
  print(f"❌ Error uploading PDF: {e}")
317
  raise HTTPException(status_code=500, detail=f"PDF upload error: {str(e)}")
318
 
319
+ @app.post("/api/chat/stream")
320
+ async def chat_stream(request: ChatRequest):
321
+ """Streaming chat endpoint for continuous conversation"""
322
+ print(f"💬 Received chat with {len(request.messages)} messages, action: {request.action}")
323
+
324
+ # Use new format if available, otherwise fall back to legacy
325
+ current_chunk = request.currentChunk or request.chunk or "No specific chunk provided"
326
+ next_chunk = request.nextChunk or ""
327
+ action = request.action
328
+
329
+ document = request.document or """
330
+ # Auswertung Versuch F44: Zeeman Effekt
331
+ Dominic Holst, Moritz Pfau
332
+ October 23, 2020
333
+
334
+ ## 1 Magnetfeld und Hysterese
335
+ Zu Beginn des Versuchs haben wir mit Hilfe des Teslameters die Magnetfeldstärke B an der Position der Cd-Lampe bei verschiedenen Spulenströmen gemessen (siehe Messwerte in Tabelle 1 im Laborbuch). In Figure 1 sind die gemessenen Feldstärken als Funktion der Stromstärke aufgetragen.
336
+
337
+ Anhand der Fehlerbalken und der praktisch identischen Überlagerung der beiden linearen Fitgeraden für auf- und absteigende Stromstärken, wird deutlich, dass **keine Hystereseeffekte vorliegen**. Der lineare Fit wurde hierbei nur auf die Stromstärken bis einschl. 10A angewandt, da für größere Stromstärken das Magnetfeld nicht in direktem proportionalen Zusammenhang ansteigt. Dies ist mit Sättigungseffekten der Magnetisierung des Eisenkerns der verwendeten Spule zu erklären.
338
+
339
+ *Figure 1: Messung des Magnetfelds als Funktion der Stromstärke*
340
+
341
+ ## 2 Qualitative Beobachtung des Zeeman Effekts
342
+ Mit Hilfe der CMOS Kamera wurde das Spektrum des emittierten Lichts der Cadmiumlampe unter Verwendung des Lummer Gehercke Interferometers beobachtet. Die Beobachtungen wurden in longitudinaler und transversaler Richtung zum Magnetfeld durchgeführt.
343
+
344
+ ### 2.1 Longitudinale Richtung:
345
+ **ohne Filter:**
346
+ Es sind deutlich zwei Linien pro Ordnung zu erkennen. Dies sind die σ+ und σ' Linien. Die π Linie ist in longitudinaler Richtung nicht zu beobachten
347
+
348
+ **mit λ/4-Plättchen und Polarisationsfilter:**
349
+ Von der Cadmiumlampe aus betrachtet wird zuerst ein λ/4-Plättchen und danach ein Polarisationsfilter in den Strahlengang gebracht. Je nach Ausrichtung der Filter zueinander wird nun eine der beiden Linien ausgeblendet.
350
+
351
+ **-45° Winkel:**
352
+ Stehen λ/4-Plättchen und Polarisationsfilter zueinander im −45° Winkel, wird das zirkular polarisierte Licht der σ¯ Linie um 45° verschoben linear polarisiert und somit vom Polarisationsfilter abgeschirmt. Folglich ist in dieser Konstellation nur die linke der beiden σ Linien zu beobachten.
353
+
354
+ **+45° Winkel:**
355
+ Stehen λ/4-Plättchen und Polarisationsfilter zueinander im +45° Winkel, ist nach analogem Prinzip wie zuvor nur die rechte Linie auf dem Kamerabild zu beobachten.
356
+
357
+ *Figure 2: Bilder der CMOS Kamera in longitudinaler Richtung mit a) λ/4-Plättchen und Polarisationsfilter im −45° Winkel, b) ohne Filter und c) Filter im +45° Winkel*
358
+
359
+ ### 2.2 Transversale Richtung:
360
+ **ohne Filter:**
361
+ Es sind deutlich drei Linien pro Ordnung zu erkennen. Dies sind die σ⁺, π und σ⁻ Linien.
362
+
363
+ **mit Polarisationsfilter horizontal (in B-Feld Richtung):**
364
+ Die beiden σ-Linien sind vollständig ausgeblendet. Die π- Linie ist deutlich sichtbar.
365
+
366
+ **mit Polarisationsfilter vertikal (90° zu B-Feld Richtung):**
367
+ Die beiden σ-Linien sind klar sichtbar. Die π-Linie ist ausgeblendet.
368
+
369
+ *Figure 3: Bilder der CMOS Kamera in vertikaler Richtung mit a) keinem Filter, b) Polarisationsfilter horizontal und c) Polarisationsfilter vertikal*
370
+
371
+ Wie in Figure 3 gut zu erkennen ist, sind die ausgeblendeten Linien in beiden Konfigurationen weiterhin leicht sichtbar. Dies ist auf das nicht perfekt homogene Magnetfeld am Ort der Ca-Lampe zurückzuführen. Das Licht ist also nicht perfekt zirkular bzw. in B-Feld Richtung polarisiert, weshalb ein vollständiges Ausblenden im Experiment nicht zu beobachten ist.
372
+
373
+ ## 3 Spektroskopie des Zeemaneffekts
374
+
375
+ ### 3.1 Bestimmen des Zeemanshifts
376
+ Die Messdaten bei verschiedene Stromstärken wurden jeweils in einem Plot dargestellt. Um für den Fit möglichst saubere Messkurven des Spektrums zu verwenden, wurde die Messreihe bei I = 8A nicht in die Datenauswertung einbezogen, da die Aufspaltung der Cadmiumlinie nur schwer zu beobachten war. Das gleich gilt für die 8. Interferenzodnung, die nicht berücksichtigt wurde. Für die Datenauswertung fließen also die Nullte bis 7. Ordnung jeweils bei 9 bis 13 Ampere ein.
377
+ Als Funktion um die Messdaten zu fitten wurde ein Pseudo-Voigt-Profil verwendet. Die drei Kurven einer Ordnung wurden hierbei gemeinsam mit der Summe dreier Pseudo-Voigt-Profile gefittet. In Figure 4 sind exemplarisch anhand der Daten für I = 12A die Messdaten und der abschnittsweise Fit zu erkennen.
378
+
379
+ *Figure 4: Messdaten und Voigt-Fit bei Spulenstrom I = 12A*
380
+
381
+ Anhand der Fitparameter wird die Position der σ und π Linien bestimmt. Die Fehler der Fitparameter sind extrem klein (≈ 0,1px) und eigenen sich nicht als realistische Fehler für unsere weitere Rechnung. Als minimalen Fehler nehmen wir daher die Auflösung der Kamera an (1px) und skalieren alle Fehler so, dass der kleineste Fehler exakt 1px beträgt. Die anderen Fehler sind dann entsprechend linear skaliert größer. Dies berücksichtigt die unterschiedliche Qualität der Fits auf unterschiedliche Interferenz-Ordnungen, bringt die Fehler aber in einen experimentell realistischen Bereich.
382
+ Für die Berechnung des Zeemanshifts müssen die Verzerrungseffekte der Lummer-Gehrcke-Platte beachtet werden. Hierfür wird die Position der π-Linien gegen der Interferenzordnung k der entsprechenden Linie aufgetragen. Der funktionelle Zusammenhang dieser beiden Größen wird durch eine quadratische Funktion k = f(a) approximiert:
383
+
384
+ k = f(a) = ba² + ca + d (1)
385
+
386
+ Wir verwenden hier eine Taylor-Näherung für eine in der Realität deutlich kompliziertere Funktion. Dies ist aber, wie in Figure 5 gut ersichtlich, für unsere Zwecke weitaus ausreichend.
387
+ Die beiden σ-Linien können auf den quadratischen Fit f(a) projiziert werden, wodurch wir die jeweilige (nicht mehr ganzzahligen) Ordnung der σ-Linien erhalten. In Figure 5 ist (wieder exemplarisch für I = 12A) die optische Verzerrung der Platte aufgetragen.
388
+
389
+ *Figure 5: Verzerrungseffekte der Lummer-Gehrcke-Platte bei I = 12A*
390
+
391
+ Die Differenz zur ganzzahligen Ordnung der zugehörigen π-Linie ergibt δk. Für eine (kleine) Wellenlängenverschiebung δλ gilt:
392
+
393
+ δλ = δk / Δk * λ² / (2d * sqrt(n² − 1)) (2)
394
+
395
+ Für den Abstand Δk zweier Ordnungen gilt Δk = 1. Für die Wellenlänge λ der betrachten Linie verwenden wir den in Part 2 bestimmten Wert von λ = (643, 842 ± 0, 007)nm.
396
+ Wir kennen nun die Wellenlänge des Zeemanshift für jede von uns betrachtete Linie. Mit dem Zusammenhang zwischen Wellenlänge und Energie E = hc/λ lässt sich nun die Energieverschiebung der Linine bestimmen. Wir nehmen an, dass die Wellenlängenverschiebung δλ klein gegenüber der absoluten Wellenlänge λ ist, und erhalten daher für die Energieverschiebung δE in guter Näherung:
397
+
398
+ δE = (hc/λ²) * δλ (3)
399
+
400
+ Abschließend nehmen wir den Durchschnitt aller Werte δE für eine Stromstärke I.
401
+
402
+ ### 3.2 Bestimmen des Bohrschen Magnetons μB
403
+ Für die Energieverschiebung beim Zeemaneffekt gilt:
404
+
405
+ δE = μB · ml · B (4)
406
+
407
+ Da es sich bei der betrachteten Cadmiumlinie um einen ¹D₂ → ¹P₁ Übergang handelt gilt hier ml = ±1. Somit folgt für das Bohrsche Magneton μB als Funktion des Spulenstroms I:
408
+
409
+ μB(I) = δE(I) / B(I) (5)
410
+
411
+ Die Magnetfeldstärke B(I) wurde hier anhand der Messwerte aus Teil 1 des Experiments bestimmt.
412
+ Wir erhalten für jeden Spulenstrom I einen experimentell bestimmten Wert des Bohrschen Magnetons μB. Unsere Ergebnisse sind in Figure 6 graphisch dargestellt.
413
+
414
+ *Figure 6: Experimentell bestimmte Werte für das Bohrsche Magneton bei unterschiedlichen Spulenströmen I*
415
+
416
+ Für den experimentellen Mittelwert erhalten wir:
417
+ μB,exp = (10, 1 ± 0.8) · 10⁻²⁴ J/T
418
+
419
+ Der Literaturwert beträgt:
420
+ μB,lit = 9, 27400949 · 10⁻²⁴ J/T
421
+
422
+ Unsere experimentell ermittelte Wert weicht also um 1,2 Sigma vom Literaturwert ab. Die Abweichung ist folglich nicht signifikant.
423
+
424
+ ### 3.3 Kritische Betrachtung der Ergebnisse
425
+ Erfreulicherweise scheint unsere experimentelle Methode keine signifikante Abweichung zwischen Literaturwert und experimentellem Wert des Bohrschen Magnetons zu ergeben. Wir befinden uns mit unserem Wert im niedrigen 2-Sigma-Intervall. Dennoch ist kritisch anzumerken, dass wir einen vergleichsweise großen realtiven Fehler auf unser Messergebnis von 7,1% erhalten. Das bedeutet, unsere Abweichung ist zwar nicht sigifikant, dennoch weicht unser experimenteller Wert um knapp 10% vom Literaturwert ab. Der verwendete experimentelle Aufbau ist folglich nur bedingt für eine exakte Bestimmung des Bohrschen Magnetons geeigent.
426
+
427
+ Die beiden dominierenden Fehlerquellen sind zum einen die Bestimmung des Magnetfeldes B am Ort der Cadmium Lampe (Inhomogenitäten, exakte Platzierung der Lampe) und zum anderen die Wahl der Fehler der Positionen der π- und σ -Linien im Spektrum.
428
+ Zum Vergleich: Legt man den Fehler prinzipiell für alle Linien auf 1px, also die maximale Auflösung der Kamera, fest und verzichtet auf eine Skalierung der Fehler, beträgt die Abweichung des exp. Werts zum Literaturwert schon 2,8 Sigma. Wählt man analog für den Fehler der Linien 2px, da beispielsweise ein Maximum auch exakt zwischen zwei Pixelreihen liegen kann, liegt die Abweichung bei 1,4 Sigma.
429
+
430
+ ## 4 Quantitative Betrachtung des Spektrums
431
+
432
+ ### 4.1 Wellenlänge rote Cd-Linie
433
+
434
+ *Figure 7: Neonspektrum*
435
+
436
+ Zunächst wird der Untergrund von den Messdaten abgezogen, um Störungen durch Rauschen oder Sondereffekte wie kosmische Strahlung oder Umgebungsquellen zu eliminieren. Sollten sich in den Spektren negative Werte befinden, ist dies auf zufällige Unterschiede im Rauschen zurückzuführen. Anhand bekannter Linien des Neonspektrums werden den Pixeln nun Wellenlängen zugeordnet. Hierfür wurde der Bereich des Neonspektrums aufgenommen, in dem sich auch die rote Linie des Cadmiumspektrums befindet. In 7 sieht man das Neonspektrum und die Peaks, an die jeweils ein Voigt-Profil gelegt wurde. Jetzt kann man den identifizierten Linien ihre jeweilige Wellenlänge zuordnen und einen polynomiellen Zusammenhang finden. Wir haben uns für eine Gerade entschieden, die wie in Figure 8 zu sehen gut zu den Daten passt.
437
+ Schließlich wird ein Voigt-Profil an die gemessene rote Cd-Linie gelegt, wie in Figure 9 gezeigt. Umrechnung anhand der Kalibrierung führt auf einen Wert von λcd = (643,842 ± 0,007)nm. Dies befindet sich im 1σ-Bereich des Literaturwertes von λlit = 643, 84695nm. Der Fehler ist Ergebnis der Gauß'schen Fehlerfortpflanzung.
438
+
439
+ *Figure 8: Kalibrationsgerade*
440
+
441
+ ### 4.2 Kritische Betrachtung der Ergebnisse
442
+ Messwert und theoretische Vorhersage für die bestimmte Linie stimmen innerhalb statistischer Schwankungen überein. Dies ist umso interessanter, wenn man die Unsicherheit des Messergebnisses betrachtet, die kleiner als 0,002% ist. Der absolute Fehler ist, wenn man die Steigung der Kalibrationsgeraden betrachtet, kleiner als 1px. Er besteht ausschließlich aus Abweichungen der numerischen Fits. Berücksichtigt man Ungenauigkeiten des CMOS Sensors oder die Möglichkeit, dass je nach Lage des Messwerts auch eine Abweichung um weniger als 1px eine größere Messwertschwankung verursachen kann, da die Pixel nur diskrete Werte messen können, liegt eine nachträgliche Anpassung nahe. Skaliert man die Unsicherheit auf 1px, liegt der Fehler des Messwerts bei 0,012nm. Damit ist der relative Fehler weiterhin kleiner 0,005%.
443
+
444
+ Zur hohen Genauigkeit trägt vor allem das gute Messverfahren bei. Spektrometer und Datenaufnahme per Computer lassen wenig Raum für Abweichungen. Wie die Daten zeigen, haben wir dabei eine Quelle für einen möglichen großen systematischen Fehler umgangen: Die Kamera wurde auf das Spektrometer nur locker aufgesteckt. Hätte sich deren Position zwischen Neon- und Cadmiummmessung z.B. durch Erschütterung des Labortisches verändert, hätte die Energiekalibrierung nicht mehr zur Messung der Cadmiumlinie gepasst.
445
+
446
+ Abbildung 6 zeigt unerwartetes Verhalten. Obwohl der Magnet ausgeschaltet war, sind drei Maxima zu sehen, deren Flanken sehr steil abfallen. Vergleicht man mit den Messungen im Magnetfeld, ähneln sich die Strukturen. Möglich ist, dass die Eisenkernspule, in der sich die Lampe während der Messung befand eine Restmagnetisierung aufwies, die eine Aufspaltung herbeigeführt hat.
447
+
448
+ *Figure 9: Cadmium rote Linie*
449
+ """
450
+ # Create system prompt for research paper tutor with transition support
451
+ is_transition = action in ['skip', 'understood']
452
+
453
+ if is_transition:
454
+ system_prompt = f"""
455
+ You are PaperMentor, an expert academic tutor guiding the user through a continuous learning journey of an academic paper.
456
+
457
+ The user has just {action} the previous section and is transitioning to a new topic. This is part of a continuous conversation where you maintain context and adapt based on the user's actions.
458
+
459
+ User's Action: {action}
460
+
461
+ Previous Section:
462
+ {current_chunk}
463
+
464
+ New Section to Introduce:
465
+ {next_chunk}
466
+
467
+ Full Document for Context:
468
+ {document}
469
+
470
+ Your response should:
471
+ 1. **Acknowledge the transition**: Briefly reference their choice to {action} the previous section
472
+ 2. **Provide smooth continuity**: Connect the previous section to this new one naturally
473
+ 3. **Introduce the new section**: Present the new topic with enthusiasm and context
474
+ 4. **Adapt your approach**: If they skipped, perhaps adjust to be more engaging. If they understood, acknowledge their progress
475
+ 5. **Begin new exploration**: Start the 3-question sequence for this new section
476
+
477
+ Maintain the same conversational style and focus on phenomenological understanding.
478
+ """
479
+ else:
480
+ system_prompt = f"""
481
+ You are PaperMentor, an expert academic tutor. Your purpose is to guide a user to a deep, phenomenological understanding of an academic paper.
482
+
483
+ The user's primary goal is to: "phänomenologisch verstehen, was passiert, was beobachtet wurde und warum das so ist, mit wenig Fokus auf Formeln, sondern Fokus auf intuitivem Verständnis und dem experimentellen Ansatz." (phenomenologically understand what is happening, what was observed, and why, with little focus on formulas but a strong focus on intuitive understanding and the experimental approach).
484
+
485
+ Your entire interaction must be guided by this goal.
486
+
487
+ You will be given a specific chunk of the paper to discuss, as well as the full document for context.
488
+
489
+ ---
490
+ Current Chunk:
491
+ {current_chunk}
492
+ ---
493
+ Full Document for Context:
494
+ {document}
495
+ ---
496
+
497
+ Your interaction must follow this specific conversational flow:
498
+
499
+ 1. **Greeting and Contextualization:**
500
+ * Begin with a friendly greeting.
501
+ * First, briefly explain what this chunk is about in simple terms.
502
+ * Then, place this chunk within the larger context of the paper. Explain its purpose in the overall argument. For instance: "Here, the authors are presenting the core observation that the rest of the paper will attempt to explain," or "This section lays the theoretical groundwork for the experiment they describe later."
503
+
504
+ 2. **Socratic Questioning (The 3-Question Rule):**
505
+ * Your main task is to test and deepen the user's understanding through a series of exactly three questions about the current chunk.
506
+ * **First Question:** Ask a single, open-ended question that probes the user's intuitive grasp of the chunk's most important concept. The question must align with the user's goal (e.g., "In simple terms, what did the researchers actually observe here?" or "Why was it necessary for them to design the experiment in this specific way?"). **Always ask only one question at a time.**
507
+ * **If the user answers correctly:** Affirm their understanding (e.g., "Exactly," "That's a great way to put it") and immediately ask a *second, deeper question*. This question should build upon their correct answer, asking for more detail or to consider the implications.
508
+ * **If the user answers the second question correctly:** Again, affirm their response and ask a *third, even more probing question*. This final question should challenge them to think about the "why" or the broader significance of the information.
509
+ * **If the user answers incorrectly at any point:** Gently correct the misunderstanding. Provide a clear, intuitive explanation, always connecting it to the experimental observations and the "why." After your explanation, re-ask the question in a slightly different way to ensure they now understand, then continue the 3-question sequence.
510
+
511
+ 3. **Moving On:**
512
+ * After the user has successfully answered all three questions, congratulate them on their solid understanding.
513
+ * Conclude by explicitly giving them the choice to continue or stay. Say something like: "Excellent, it seems you have a very solid grasp of this part. Shall we move on to the next section, or is there anything here you'd like to explore further?"
514
+
515
+ **Important Behaviors:**
516
+ * **Language:** The entire conversation must be in English, as indicated by the user's goal.
517
+ * **Focus:** Always prioritize intuitive, conceptual, and experimental understanding over formal, mathematical details.
518
+ * **Pacing:** The flow is dictated by the user's successful answers. Move from one question to the next smoothly.
519
+ * **Structure:** Importantly, maintain a clear and logical flow in the conversation. Never loose track of the objective.
520
+ * **Markdown:** Output markdown if you think it is useful. Break your response into reasonable sections.
521
+ Begin the conversation.
522
+ """
523
+
524
+ anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
525
+ if not anthropic_api_key:
526
+ return {"role": "assistant", "content": "I'm sorry, but the chat service is not configured. Please check the API key configuration."}
527
+
528
+
529
+ client = anthropic.Anthropic(api_key=anthropic_api_key)
530
+
531
+ if not request.messages:
532
+ # No conversation yet — assistant should speak first
533
+ anthropic_messages = [
534
+ {"role": "user", "content": "Please start the conversation based on the provided context."}
535
+ ]
536
+ else:
537
+ anthropic_messages = [
538
+ {"role": msg.role, "content": msg.content}
539
+ for msg in request.messages
540
+ if msg.role in ["user", "assistant"]
541
+ ]
542
+ print(anthropic_messages)
543
+ # For transitions, add a dummy user message to trigger Claude response
544
+ if not any(msg["role"] == "user" for msg in anthropic_messages):
545
+ if is_transition:
546
+ anthropic_messages.append({"role": "user", "content": "Please continue to the next section."})
547
+ else:
548
+ def generate_error():
549
+ yield f"data: {json.dumps({'error': 'I did not receive your message. Could you please ask again?'})}\n\n"
550
+ return StreamingResponse(
551
+ media_type="text/event-stream",
552
+ content=generate_error(),
553
+ headers={"Cache-Control": "no-cache",
554
+ "Connection": "keep-alive",
555
+ "Access-Control-Allow-Origin": "*"},
556
+ )
557
+
558
+ def generate():
559
+ try:
560
+ with client.messages.stream(
561
+ model="claude-sonnet-4-20250514",
562
+ max_tokens=10000,
563
+ system=system_prompt, # system prompt here
564
+ messages=anthropic_messages,
565
+ ) as stream:
566
+ for text in stream.text_stream:
567
+ print(f"Raw text chunk: {repr(text)}")
568
+ yield f"data: {json.dumps(text)}\n\n"
569
+ yield f"data: {json.dumps({'done': True})}\n\n"
570
+ except Exception as e:
571
+ yield f"data: {json.dumps({'error': str(e)})}\n\n"
572
+
573
+ return StreamingResponse(
574
+ media_type="text/event_stream",
575
+ content=generate(),
576
+ headers={"Cache-Control": "no-cache",
577
+ "Connection": "keep-alive",
578
+ "Access-Control-Allow-Origin": "*"},
579
+ )
580
+
581
+
582
  # Mount static files for production deployment
583
  frontend_path = os.path.join(os.path.dirname(__file__), "..", "frontend")
584
  assets_path = os.path.join(frontend_path, "assets")
frontend/src/App.css CHANGED
@@ -40,3 +40,18 @@
40
  .read-the-docs {
41
  color: #888;
42
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  .read-the-docs {
41
  color: #888;
42
  }
43
+
44
+ @keyframes fade-in {
45
+ from {
46
+ opacity: 0;
47
+ transform: translateY(5px);
48
+ }
49
+ to {
50
+ opacity: 1;
51
+ transform: translateY(0);
52
+ }
53
+ }
54
+
55
+ .animate-fade-in {
56
+ animation: fade-in 0.3s ease-out;
57
+ }
frontend/src/components/ChunkLoadingTips.jsx ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState, useEffect } from 'react';
2
+
3
+ const tips = [
4
+ "You can always skip a chunk if it's not relevant to your learning goals.",
5
+ "Be sure to mark a chunk as understood when you've grasped the concept.",
6
+ "Ask the mentor to confirm your understanding before moving forward.",
7
+ "Don't hesitate to ask follow-up questions about complex topics.",
8
+ "Use the previous chunk button to review earlier concepts.",
9
+ "The mentor adapts explanations based on your questions and responses.",
10
+ "Take your time - there's no rush to complete chunks quickly.",
11
+ "Ask for examples when abstract concepts seem unclear.",
12
+ "Request connections between current and previous chunks when helpful.",
13
+ "The mentor can explain mathematical formulas step by step."
14
+ ];
15
+
16
+ const ChunkLoadingTips = ({ message = "Preparing your document..." }) => {
17
+ const [currentTipIndex, setCurrentTipIndex] = useState(0);
18
+
19
+ useEffect(() => {
20
+ const interval = setInterval(() => {
21
+ setCurrentTipIndex((prev) => (prev + 1) % tips.length);
22
+ }, 4000); // Change tip every 4 seconds
23
+
24
+ return () => clearInterval(interval);
25
+ }, []);
26
+
27
+ return (
28
+ <div className="absolute top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 z-50">
29
+ <div className="bg-white/90 backdrop-blur-sm rounded-xl shadow-lg border border-gray-200 p-6 max-w-sm text-center">
30
+ {/* Loading spinner */}
31
+ <div className="relative mb-4">
32
+ <div className="w-8 h-8 border-3 border-blue-200 rounded-full animate-spin border-t-blue-600 mx-auto"></div>
33
+ </div>
34
+
35
+ {/* Main message */}
36
+ <h3 className="text-sm font-medium text-gray-900 mb-3">
37
+ {message}
38
+ </h3>
39
+ <p key={currentTipIndex} className="text-xs text-gray-600 animate-fade-in leading-relaxed">
40
+ {tips[currentTipIndex]}
41
+ </p>
42
+ </div>
43
+ </div>
44
+ );
45
+ };
46
+
47
+ export default ChunkLoadingTips;
frontend/src/components/ChunkPanel.jsx CHANGED
@@ -2,84 +2,336 @@ import ReactMarkdown from 'react-markdown';
2
  import remarkMath from 'remark-math';
3
  import rehypeKatex from 'rehype-katex';
4
  import rehypeRaw from 'rehype-raw';
5
- import { getChatMarkdownComponents } from '../utils/markdownComponents.jsx';
6
  import SimpleChat from './SimpleChat.jsx';
 
7
  import React, { useState, useEffect } from 'react';
8
 
9
  const ChunkPanel = ({
10
  documentData,
11
  currentChunkIndex,
12
- chunkExpanded,
13
- setChunkExpanded,
14
- chunkStates,
15
- skipChunk,
16
- markChunkUnderstood,
17
- startInteractiveLesson,
18
  showChat,
19
- setShowChat,
20
- setChunkAsInteractive,
21
- updateChunkChatHistory,
22
- getCurrentChunkChatHistory
 
 
 
 
 
 
 
 
23
  }) => {
24
 
25
  const chatMarkdownComponents = getChatMarkdownComponents();
 
26
  const [isLoading, setIsLoading] = useState(false);
27
- const currentChunk = documentData?.chunks?.[currentChunkIndex] ?? null;
28
- const chunkText = currentChunk?.text || '';
29
 
 
 
 
 
 
 
 
 
 
 
30
 
 
 
31
 
32
- // Generate greeting when chat opens and no messages
33
- useEffect(() => {
34
- if (showChat && (getCurrentChunkChatHistory()?.length ?? 0) === 0) {
35
- generateGreeting();
36
  }
37
- }, [showChat]);
38
- const handleChatToggle = () => {
39
- if (!showChat) {
40
- setChunkAsInteractive();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  }
42
- setShowChat(!showChat);
43
  };
44
 
45
  const generateGreeting = async () => {
46
  setIsLoading(true);
 
 
 
47
  try {
48
  const response = await fetch('/api/chat', {
49
  method: 'POST',
50
  headers: { 'Content-Type': 'application/json' },
51
  body: JSON.stringify({
52
  messages: [],
53
- chunk: documentData?.chunks?.[currentChunkIndex]?.text || '',
54
  document: documentData ? JSON.stringify(documentData) : ''
55
  })
56
  });
57
 
58
  const data = await response.json();
59
 
60
- updateChunkChatHistory([
61
  {
62
  role: 'assistant',
63
- content: data.content || 'Hi! Ask me anything about this section.'
64
- }
65
- ]);
 
66
  } catch (error) {
67
  console.error('Error generating greeting:', error);
68
- updateChunkChatHistory([
69
  {
70
  role: 'assistant',
71
- content: 'Hi! Ask me anything about this section.'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  }
73
- ]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  } finally {
75
  setIsLoading(false);
76
  }
77
  };
78
 
79
  const handleSend = async (text) => {
80
- const userMessage = { role: 'user', content: text };
81
- const newMessages = [...getCurrentChunkChatHistory(), userMessage];
82
- updateChunkChatHistory(newMessages);
83
  setIsLoading(true);
84
 
85
  try {
@@ -87,23 +339,23 @@ const ChunkPanel = ({
87
  method: 'POST',
88
  headers: { 'Content-Type': 'application/json' },
89
  body: JSON.stringify({
90
- messages: newMessages,
91
- chunk: documentData?.chunks?.[currentChunkIndex]?.text || '',
92
  document: documentData ? JSON.stringify(documentData) : ''
93
  })
94
  });
95
 
96
  const data = await response.json();
97
- updateChunkChatHistory([
98
- ...newMessages,
99
- { role: 'assistant', content: data.content || 'Sorry, no response received.' }
100
- ]);
101
  } catch (error) {
102
  console.error('Error:', error);
103
- updateChunkChatHistory([
104
- ...newMessages,
105
- { role: 'assistant', content: 'Sorry, something went wrong. Please try again.' }
106
- ]);
107
  } finally {
108
  setIsLoading(false);
109
  }
@@ -112,130 +364,96 @@ const ChunkPanel = ({
112
  return (
113
  <>
114
  {/* Chunk Header */}
115
- <div className="px-6 py-4 flex-shrink-0 bg-white rounded-t-lg border-b border-gray-200 z-10">
116
- <div className="flex items-center justify-between">
117
- <button
118
- onClick={() => setChunkExpanded(!chunkExpanded)}
119
- className="flex items-center hover:bg-gray-50 py-2 px-3 rounded-lg transition-all -ml-3"
120
- >
121
- <div className="font-semibold text-gray-900 text-left flex-1">
122
- <ReactMarkdown
123
- remarkPlugins={[remarkMath]}
124
- rehypePlugins={[rehypeRaw, rehypeKatex]}
125
- components={{
126
- p: ({ children }) => <span>{children}</span>, // Render as inline span
127
- ...chatMarkdownComponents
128
- }}
129
- >
130
- {documentData?.chunks?.[currentChunkIndex]?.topic || "Loading..."}
131
- </ReactMarkdown>
132
- </div>
133
- <span className="text-gray-400 ml-3">
134
- {chunkExpanded ? '▲' : '▼'}
135
- </span>
136
- </button>
137
-
138
- </div>
139
-
140
- {/* Expandable Chunk Content */}
141
- {chunkExpanded && documentData?.chunks?.[currentChunkIndex] && (
142
- <>
143
- <div className="prose prose-sm max-w-none">
144
- <ReactMarkdown
145
- remarkPlugins={[remarkMath]}
146
- rehypePlugins={[rehypeRaw, rehypeKatex]}
147
- components={chatMarkdownComponents}
148
  >
149
- {documentData.chunks[currentChunkIndex].text}
150
- </ReactMarkdown>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  </div>
152
 
153
- {/* Action Buttons */}
154
- <div className="flex items-center justify-center gap-4 mt-4 pt-4 border-gray-200">
155
- <button
156
- onClick={skipChunk}
157
- className={`py-2 px-4 border rounded-lg transition-all text-sm ${
158
- chunkStates[currentChunkIndex] === 'skipped'
159
- ? 'bg-red-500 text-white border-red-500 hover:bg-red-600'
160
- : 'bg-white hover:bg-gray-50 border-gray-300'
161
- }`}
162
- >
163
- Skip
164
- </button>
165
  <button
166
- onClick={handleChatToggle}
167
- className={`py-2 px-4 border rounded-lg transition-all text-sm flex items-center gap-1 ${
168
- chunkStates[currentChunkIndex] === 'interactive'
169
- ? 'bg-blue-500 text-white border-blue-500 hover:bg-blue-600'
170
- : 'bg-white hover:bg-gray-50 border-gray-300'
171
- }`}
172
  >
173
- <svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.5">
174
- <path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z"/>
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  </svg>
176
- Chat
177
  </button>
178
- <button
 
 
179
  onClick={markChunkUnderstood}
180
- className={`py-2 px-4 border rounded-lg transition-all text-sm ${
181
- chunkStates[currentChunkIndex] === 'understood'
182
- ? 'bg-green-500 text-white border-green-500 hover:bg-green-600'
183
- : 'bg-white hover:bg-gray-50 border-gray-300'
184
- }`}
185
  >
186
- Understood
 
 
 
 
 
 
 
 
 
 
187
  </button>
188
  </div>
189
- </>
190
- )}
191
-
192
- {/* Show buttons even when chunk is collapsed */}
193
- {!chunkExpanded && (
194
- <div className="flex items-center justify-center gap-4 mt-4 pt-4 border-t border-gray-200">
195
- <button
196
- onClick={skipChunk}
197
- className={`py-2 px-4 border rounded-lg transition-all text-sm ${
198
- chunkStates[currentChunkIndex] === 'skipped'
199
- ? 'bg-red-500 text-white border-red-500 hover:bg-red-600'
200
- : 'bg-white hover:bg-gray-50 border-gray-300'
201
- }`}
202
- >
203
- Skip
204
- </button>
205
- <button
206
- onClick={handleChatToggle}
207
- className={`py-2 px-4 border rounded-lg transition-all text-sm flex items-center gap-1 ${
208
- chunkStates[currentChunkIndex] === 'interactive'
209
- ? 'bg-blue-500 text-white border-blue-500 hover:bg-blue-600'
210
- : 'bg-white hover:bg-gray-50 border-gray-300'
211
- }`}
212
- >
213
- <svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.5">
214
- <path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z"/>
215
- </svg>
216
- Chat
217
- </button>
218
- <button
219
- onClick={markChunkUnderstood}
220
- className={`py-2 px-4 border rounded-lg transition-all text-sm ${
221
- chunkStates[currentChunkIndex] === 'understood'
222
- ? 'bg-green-500 text-white border-green-500 hover:bg-green-600'
223
- : 'bg-white hover:bg-gray-50 border-gray-300'
224
- }`}
225
- >
226
- Understood
227
- </button>
228
- </div>
229
- )}
230
  </div>
231
 
232
- {/* Chat Interface - Only shown when showChat is true */}
233
  {showChat && (
234
- <div className="flex-1 flex flex-col min-h-0 bg-white rounded-lg m-2 shadow-lg">
235
  <SimpleChat
236
- messages={getCurrentChunkChatHistory()}
237
- onSend={handleSend}
238
- isLoading={isLoading}
 
 
239
  />
240
  </div>
241
  )}
 
2
  import remarkMath from 'remark-math';
3
  import rehypeKatex from 'rehype-katex';
4
  import rehypeRaw from 'rehype-raw';
5
+ import { getChatMarkdownComponents, getTitleMarkdownComponents } from '../utils/markdownComponents.jsx';
6
  import SimpleChat from './SimpleChat.jsx';
7
+ import ChunkLoadingTips from './ChunkLoadingTips.jsx';
8
  import React, { useState, useEffect } from 'react';
9
 
10
  const ChunkPanel = ({
11
  documentData,
12
  currentChunkIndex,
 
 
 
 
 
 
13
  showChat,
14
+ isTransitioning,
15
+ updateGlobalChatHistory,
16
+ getGlobalChatHistory,
17
+ addMessageToChunk,
18
+ getCurrentChunkMessages,
19
+ hasChunkMessages,
20
+ isChunkCompleted,
21
+ canEditChunk,
22
+ setWaitingForFirstResponse,
23
+ markChunkUnderstood,
24
+ skipChunk,
25
+ goToPrevChunk
26
  }) => {
27
 
28
  const chatMarkdownComponents = getChatMarkdownComponents();
29
+ const titleMarkdownComponents = getTitleMarkdownComponents();
30
  const [isLoading, setIsLoading] = useState(false);
 
 
31
 
32
+ // Generate greeting for chunks that don't have messages yet
33
+ // Only for initial chunk (0) and when not transitioning
34
+ useEffect(() => {
35
+ if (documentData && showChat && !hasChunkMessages(currentChunkIndex) && currentChunkIndex === 0 && !isTransitioning) {
36
+ generateGreetingStreaming();
37
+ }
38
+ }, [currentChunkIndex, documentData, showChat, isTransitioning]);
39
+
40
+ const updateLastAssistantMessage = (delta) => {
41
+ const allMessages = getGlobalChatHistory();
42
 
43
+ const currentChunkMessages = allMessages.filter(msg => msg.chunkIndex === currentChunkIndex);
44
+ const lastAssistantInChunk = [...currentChunkMessages].reverse().find(msg => msg.role === 'assistant');
45
 
46
+ if (!lastAssistantInChunk) {
47
+ console.warn("No assistant message found for current chunk — adding new one.");
48
+ addMessageToChunk({ role: 'assistant', content: delta }, currentChunkIndex);
49
+ return;
50
  }
51
+
52
+ const updatedMessages = allMessages.map(msg => {
53
+ if (msg === lastAssistantInChunk) {
54
+ return { ...msg, content: msg.content + (typeof delta === "string" ? delta : delta?.content || "") };
55
+ }
56
+ return msg;
57
+ });
58
+
59
+ updateGlobalChatHistory(updatedMessages);
60
+ };
61
+
62
+ const generateGreetingStreaming = async () => {
63
+ setIsLoading(true);
64
+ try {
65
+ const response = await fetch('/api/chat/stream', {
66
+ method: 'POST',
67
+ headers: { 'Content-Type': 'application/json' },
68
+ body: JSON.stringify({
69
+ messages: [],
70
+ currentChunk: documentData?.chunks?.[currentChunkIndex]?.text || '',
71
+ document: documentData ? JSON.stringify(documentData) : ''
72
+ })
73
+ });
74
+
75
+ const reader = response.body.getReader();
76
+ let shouldStop = false;
77
+
78
+ // Local snapshot to avoid stale reads
79
+ let localMessages = getGlobalChatHistory();
80
+ const createTempId = () => `assistant_${Date.now()}_${Math.random().toString(36).slice(2)}`;
81
+ let assistantId = null;
82
+
83
+ // SSE read buffer
84
+ let sseBuffer = '';
85
+
86
+ // Streaming smoothness buffer
87
+ let textBuffer = '';
88
+ let frameScheduled = false;
89
+
90
+ const flushBuffer = (isFinal = false) => {
91
+ if (!assistantId) return;
92
+
93
+ const lastMsg = localMessages[localMessages.length - 1];
94
+ if (lastMsg.id === assistantId) {
95
+ // Append buffered text
96
+ lastMsg.content += textBuffer;
97
+ textBuffer = '';
98
+ }
99
+ updateGlobalChatHistory([...localMessages]);
100
+ };
101
+
102
+ const scheduleFlush = () => {
103
+ if (!frameScheduled) {
104
+ frameScheduled = true;
105
+ requestAnimationFrame(() => {
106
+ flushBuffer();
107
+ frameScheduled = false;
108
+ });
109
+ }
110
+ };
111
+
112
+ while (!shouldStop) {
113
+ const { done, value } = await reader.read();
114
+ if (done) break;
115
+
116
+ sseBuffer += new TextDecoder().decode(value);
117
+ const parts = sseBuffer.split('\n\n');
118
+ sseBuffer = parts.pop(); // keep last partial
119
+
120
+ for (const part of parts) {
121
+ if (!part.startsWith('data:')) continue;
122
+ const jsonStr = part.slice(5).trim();
123
+ if (!jsonStr) continue;
124
+
125
+ let parsed;
126
+ try {
127
+ parsed = JSON.parse(jsonStr);
128
+ } catch (err) {
129
+ console.warn('Could not JSON.parse stream chunk', jsonStr);
130
+ continue;
131
+ }
132
+
133
+ if (parsed.error) {
134
+ console.error('streaming error', parsed.error);
135
+ shouldStop = true;
136
+ break;
137
+ }
138
+ if (parsed.done) {
139
+ shouldStop = true;
140
+ flushBuffer(true); // final flush, remove cursor
141
+ break;
142
+ }
143
+
144
+ const delta = typeof parsed === 'string' ? parsed : parsed?.content ?? '';
145
+
146
+ if (!assistantId) {
147
+ assistantId = createTempId();
148
+ localMessages.push({
149
+ id: assistantId,
150
+ role: 'assistant',
151
+ content: delta,
152
+ chunkIndex: currentChunkIndex
153
+ });
154
+ } else {
155
+ textBuffer += delta;
156
+ }
157
+
158
+ // Schedule smooth UI update
159
+ scheduleFlush();
160
+ }
161
+ }
162
+ } catch (error) {
163
+ console.error(error);
164
+ addMessageToChunk(
165
+ { role: 'assistant', content: 'Sorry, something went wrong. Please try again.' },
166
+ currentChunkIndex
167
+ );
168
+ } finally {
169
+ setIsLoading(false);
170
  }
 
171
  };
172
 
173
  const generateGreeting = async () => {
174
  setIsLoading(true);
175
+ if (setWaitingForFirstResponse) {
176
+ setWaitingForFirstResponse(true);
177
+ }
178
  try {
179
  const response = await fetch('/api/chat', {
180
  method: 'POST',
181
  headers: { 'Content-Type': 'application/json' },
182
  body: JSON.stringify({
183
  messages: [],
184
+ currentChunk: documentData?.chunks?.[currentChunkIndex]?.text || '',
185
  document: documentData ? JSON.stringify(documentData) : ''
186
  })
187
  });
188
 
189
  const data = await response.json();
190
 
191
+ addMessageToChunk(
192
  {
193
  role: 'assistant',
194
+ content: data.content || 'Hi! Welcome to your learning session. Let\'s explore this document together!'
195
+ },
196
+ currentChunkIndex
197
+ );
198
  } catch (error) {
199
  console.error('Error generating greeting:', error);
200
+ addMessageToChunk(
201
  {
202
  role: 'assistant',
203
+ content: 'Hi! Welcome to your learning session. Let\'s explore this document together!'
204
+ },
205
+ currentChunkIndex
206
+ );
207
+ } finally {
208
+ setIsLoading(false);
209
+ if (setWaitingForFirstResponse) {
210
+ setWaitingForFirstResponse(false);
211
+ }
212
+ }
213
+ };
214
+
215
+ const handleSendStreaming = async (text) => {
216
+ const userMessage = { role: 'user', content: text, chunkIndex: currentChunkIndex };
217
+ addMessageToChunk(userMessage, currentChunkIndex);
218
+ setIsLoading(true);
219
+
220
+ try {
221
+ // Get the updated messages after adding the user message
222
+ const updatedMessages = [...getGlobalChatHistory(), userMessage];
223
+
224
+ const response = await fetch('/api/chat/stream', {
225
+ method: 'POST',
226
+ headers: { 'Content-Type': 'application/json' },
227
+ body: JSON.stringify({
228
+ messages: updatedMessages,
229
+ currentChunk: documentData?.chunks?.[currentChunkIndex]?.text || '',
230
+ document: documentData ? JSON.stringify(documentData) : ''
231
+ })
232
+ });
233
+
234
+ const reader = await response.body.getReader();
235
+
236
+ let shouldStop = false;
237
+
238
+ // Local snapshot to avoid stale reads - include the user message we just added
239
+ let localMessages = updatedMessages;
240
+ const createTempId = () => `assistant_${Date.now()}_${Math.random().toString(36).slice(2)}`;
241
+ let assistantId = null;
242
+
243
+ // SSE read buffer
244
+ let sseBuffer = '';
245
+
246
+ // Streaming smoothness buffer
247
+ let textBuffer = '';
248
+ let frameScheduled = false;
249
+
250
+ const flushBuffer = (isFinal = false) => {
251
+ if (!assistantId) return;
252
+
253
+ const lastMsg = localMessages[localMessages.length - 1];
254
+ if (lastMsg.id === assistantId) {
255
+ // Append buffered text
256
+ lastMsg.content += textBuffer;
257
+ textBuffer = '';
258
+ }
259
+ updateGlobalChatHistory([...localMessages]);
260
+ };
261
+
262
+ const scheduleFlush = () => {
263
+ if (!frameScheduled) {
264
+ frameScheduled = true;
265
+ requestAnimationFrame(() => {
266
+ flushBuffer();
267
+ frameScheduled = false;
268
+ });
269
  }
270
+ };
271
+ while (!shouldStop) {
272
+ const { done, value } = await reader.read();
273
+ if (done) break;
274
+
275
+ sseBuffer += new TextDecoder().decode(value);
276
+ const parts = sseBuffer.split('\n\n');
277
+ sseBuffer = parts.pop(); // keep last partial
278
+
279
+ for (const part of parts) {
280
+ if (!part.startsWith('data:')) continue;
281
+ const jsonStr = part.slice(5).trim();
282
+ if (!jsonStr) continue;
283
+
284
+ let parsed;
285
+ try {
286
+ parsed = JSON.parse(jsonStr);
287
+ } catch (err) {
288
+ console.warn('Could not JSON.parse stream chunk', jsonStr);
289
+ continue;
290
+ }
291
+
292
+ if (parsed.error) {
293
+ console.error('streaming error', parsed.error);
294
+ shouldStop = true;
295
+ break;
296
+ }
297
+ if (parsed.done) {
298
+ shouldStop = true;
299
+ flushBuffer(true); // final flush, remove cursor
300
+ break;
301
+ }
302
+
303
+ const delta = typeof parsed === 'string' ? parsed : parsed?.content ?? '';
304
+
305
+ if (!assistantId) {
306
+ assistantId = createTempId();
307
+ localMessages.push({
308
+ id: assistantId,
309
+ role: 'assistant',
310
+ content: delta,
311
+ chunkIndex: currentChunkIndex
312
+ });
313
+ } else {
314
+ textBuffer += delta;
315
+ }
316
+
317
+ // Schedule smooth UI update
318
+ scheduleFlush();
319
+ }
320
+ }
321
+ } catch (error) {
322
+ console.error(error);
323
+ addMessageToChunk(
324
+ { role: 'assistant', content: 'Sorry, something went wrong. Please try again.' },
325
+ currentChunkIndex
326
+ );
327
  } finally {
328
  setIsLoading(false);
329
  }
330
  };
331
 
332
  const handleSend = async (text) => {
333
+ const userMessage = { role: 'user', content: text, chunkIndex: currentChunkIndex };
334
+ addMessageToChunk(userMessage, currentChunkIndex);
 
335
  setIsLoading(true);
336
 
337
  try {
 
339
  method: 'POST',
340
  headers: { 'Content-Type': 'application/json' },
341
  body: JSON.stringify({
342
+ messages: getGlobalChatHistory(),
343
+ currentChunk: documentData?.chunks?.[currentChunkIndex]?.text || '',
344
  document: documentData ? JSON.stringify(documentData) : ''
345
  })
346
  });
347
 
348
  const data = await response.json();
349
+ addMessageToChunk(
350
+ { role: 'assistant', content: data.content || 'Sorry, no response received.' },
351
+ currentChunkIndex
352
+ );
353
  } catch (error) {
354
  console.error('Error:', error);
355
+ addMessageToChunk(
356
+ { role: 'assistant', content: 'Sorry, something went wrong. Please try again.' },
357
+ currentChunkIndex
358
+ );
359
  } finally {
360
  setIsLoading(false);
361
  }
 
364
  return (
365
  <>
366
  {/* Chunk Header */}
367
+ <div className="px-6 py-4 flex-shrink-0 bg-white rounded-t-lg border-b border-gray-200 z-10 flex items-center justify-between">
368
+ <div className="flex items-center flex-1">
369
+ {/* Previous Chunk Button */}
370
+ <button
371
+ onClick={goToPrevChunk}
372
+ disabled={currentChunkIndex === 0}
373
+ className="mr-3 p-2 rounded-full bg-gray-100 hover:bg-gray-200 text-gray-600 transition-colors duration-200 disabled:opacity-50 disabled:cursor-not-allowed"
374
+ title="Go to previous chunk"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
  >
376
+ <svg
377
+ className="w-5 h-5"
378
+ fill="currentColor"
379
+ viewBox="0 0 20 20"
380
+ >
381
+ <path
382
+ fillRule="evenodd"
383
+ d="M12.707 5.293a1 1 0 010 1.414L9.414 10l3.293 3.293a1 1 0 01-1.414 1.414l-4-4a1 1 0 010-1.414l4-4a1 1 0 011.414 0z"
384
+ clipRule="evenodd"
385
+ />
386
+ </svg>
387
+ </button>
388
+
389
+ {/* Chunk Title */}
390
+ <div className="font-semibold text-xl text-gray-900 text-left">
391
+ <ReactMarkdown
392
+ remarkPlugins={[remarkMath]}
393
+ rehypePlugins={[rehypeRaw, rehypeKatex]}
394
+ components={titleMarkdownComponents}
395
+ >
396
+ {documentData?.chunks?.[currentChunkIndex]?.topic || "Loading..."}
397
+ </ReactMarkdown>
398
+ </div>
399
  </div>
400
 
401
+ <div className="flex items-center gap-2">
402
+ {/* Skip Button */}
 
 
 
 
 
 
 
 
 
 
403
  <button
404
+ onClick={skipChunk}
405
+ className="p-2 rounded-full bg-gray-100 hover:bg-gray-200 text-gray-600 transition-colors duration-200"
406
+ title="Skip this chunk"
 
 
 
407
  >
408
+ <svg
409
+ className="w-5 h-5"
410
+ fill="currentColor"
411
+ viewBox="0 0 20 20"
412
+ >
413
+ <path
414
+ fillRule="evenodd"
415
+ d="M7.293 14.707a1 1 0 010-1.414L10.586 10 7.293 6.707a1 1 0 011.414-1.414l4 4a1 1 0 010 1.414l-4 4a1 1 0 01-1.414 0z"
416
+ clipRule="evenodd"
417
+ />
418
+ <path
419
+ fillRule="evenodd"
420
+ d="M12.293 14.707a1 1 0 010-1.414L15.586 10l-3.293-3.293a1 1 0 011.414-1.414l4 4a1 1 0 010 1.414l-4 4a1 1 0 01-1.414 0z"
421
+ clipRule="evenodd"
422
+ />
423
  </svg>
 
424
  </button>
425
+
426
+ {/* Understood Button */}
427
+ <button
428
  onClick={markChunkUnderstood}
429
+ className="p-2 rounded-full bg-green-100 hover:bg-green-200 text-green-600 transition-colors duration-200"
430
+ title="Mark chunk as understood"
 
 
 
431
  >
432
+ <svg
433
+ className="w-5 h-5"
434
+ fill="currentColor"
435
+ viewBox="0 0 20 20"
436
+ >
437
+ <path
438
+ fillRule="evenodd"
439
+ d="M16.707 5.293a1 1 0 010 1.414l-8 8a1 1 0 01-1.414 0l-4-4a1 1 0 011.414-1.414L8 12.586l7.293-7.293a1 1 0 011.414 0z"
440
+ clipRule="evenodd"
441
+ />
442
+ </svg>
443
  </button>
444
  </div>
445
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
446
  </div>
447
 
448
+ {/* Chat Interface - Always shown when showChat is true */}
449
  {showChat && (
450
+ <div className="relative flex-1 overflow-hidden">
451
  <SimpleChat
452
+ messages={getGlobalChatHistory()}
453
+ currentChunkIndex={currentChunkIndex}
454
+ canEdit={canEditChunk(currentChunkIndex)}
455
+ onSend={handleSendStreaming}
456
+ isLoading={isLoading || isTransitioning}
457
  />
458
  </div>
459
  )}
frontend/src/components/DocumentProcessor.jsx CHANGED
@@ -9,12 +9,14 @@ import { usePanelResize } from '../hooks/usePanelResize';
9
  // Import components
10
  import LoadingAnimation from './LoadingAnimation';
11
  import DocumentViewer from './DocumentViewer';
12
- import ChunkNavigation from './ChunkNavigation';
13
  import ChunkPanel from './ChunkPanel';
 
14
 
15
  function DocumentProcessor() {
16
  // State for PDF navigation
17
  const [pdfNavigation, setPdfNavigation] = useState(null);
 
 
18
 
19
  // Custom hooks
20
  const {
@@ -33,6 +35,7 @@ function DocumentProcessor() {
33
  currentChunkIndex,
34
  chunkExpanded,
35
  showChat,
 
36
  goToNextChunk,
37
  goToPrevChunk,
38
  skipChunk,
@@ -41,8 +44,13 @@ function DocumentProcessor() {
41
  setChunkExpanded,
42
  setShowChat,
43
  setChunkAsInteractive,
44
- updateChunkChatHistory,
45
- getCurrentChunkChatHistory
 
 
 
 
 
46
  } = useChunkNavigation(documentData, null);
47
 
48
  const {
@@ -88,10 +96,6 @@ function DocumentProcessor() {
88
  );
89
  }
90
 
91
- if (processing) {
92
- return <LoadingAnimation uploadProgress={uploadProgress} />;
93
- }
94
-
95
  if (!documentData) {
96
  return (
97
  <div className="h-screen bg-gray-50 flex items-center justify-center">
@@ -121,12 +125,17 @@ function DocumentProcessor() {
121
  style={{ cursor: isDragging ? 'col-resize' : 'default' }}
122
  >
123
  {/* Left Panel - Document */}
124
- <div style={{ width: `${leftPanelWidth}%`, height: '100%' }}>
125
- <DocumentViewer
126
- selectedFile={selectedFile}
127
- documentData={documentData}
128
- onPageChange={setPdfNavigation}
129
- />
 
 
 
 
 
130
  </div>
131
 
132
  {/* Resizable Divider */}
@@ -148,32 +157,33 @@ function DocumentProcessor() {
148
  <div
149
  className="flex flex-col"
150
  style={{ width: `${100 - leftPanelWidth}%` }}
151
- >
152
- {/* Navigation Bar */}
153
- <ChunkNavigation
154
- currentChunkIndex={currentChunkIndex}
155
- documentData={documentData}
156
- chunkStates={chunkStates}
157
- goToPrevChunk={goToPrevChunk}
158
- goToNextChunk={goToNextChunk}
159
- />
160
-
161
  {/* Chunk Panel */}
162
  <div className="flex-1 flex flex-col min-h-0 bg-white rounded-lg shadow-sm">
163
  <ChunkPanel
164
  documentData={documentData}
165
  currentChunkIndex={currentChunkIndex}
166
- chunkExpanded={chunkExpanded}
167
- setChunkExpanded={setChunkExpanded}
168
- chunkStates={chunkStates}
169
- skipChunk={skipChunk}
170
- markChunkUnderstood={markChunkUnderstood}
171
- startInteractiveLesson={handleStartInteractiveLesson}
172
  showChat={showChat}
173
- setShowChat={setShowChat}
174
- setChunkAsInteractive={setChunkAsInteractive}
175
- updateChunkChatHistory={updateChunkChatHistory}
176
- getCurrentChunkChatHistory={getCurrentChunkChatHistory}
 
 
 
 
 
 
 
 
177
  />
178
  </div>
179
  </div>
 
9
  // Import components
10
  import LoadingAnimation from './LoadingAnimation';
11
  import DocumentViewer from './DocumentViewer';
 
12
  import ChunkPanel from './ChunkPanel';
13
+ import ProgressBar from './ProgressBar';
14
 
15
  function DocumentProcessor() {
16
  // State for PDF navigation
17
  const [pdfNavigation, setPdfNavigation] = useState(null);
18
+ // State for first LLM response loading
19
+ const [waitingForFirstResponse, setWaitingForFirstResponse] = useState(false);
20
 
21
  // Custom hooks
22
  const {
 
35
  currentChunkIndex,
36
  chunkExpanded,
37
  showChat,
38
+ isTransitioning,
39
  goToNextChunk,
40
  goToPrevChunk,
41
  skipChunk,
 
44
  setChunkExpanded,
45
  setShowChat,
46
  setChunkAsInteractive,
47
+ updateGlobalChatHistory,
48
+ getGlobalChatHistory,
49
+ addMessageToChunk,
50
+ getCurrentChunkMessages,
51
+ hasChunkMessages,
52
+ isChunkCompleted,
53
+ canEditChunk
54
  } = useChunkNavigation(documentData, null);
55
 
56
  const {
 
96
  );
97
  }
98
 
 
 
 
 
99
  if (!documentData) {
100
  return (
101
  <div className="h-screen bg-gray-50 flex items-center justify-center">
 
125
  style={{ cursor: isDragging ? 'col-resize' : 'default' }}
126
  >
127
  {/* Left Panel - Document */}
128
+ <div style={{ width: `${leftPanelWidth}%`, height: '100%' }} className="flex flex-col">
129
+
130
+
131
+ {/* Document Viewer */}
132
+ <div className="flex-1 min-h-0">
133
+ <DocumentViewer
134
+ selectedFile={selectedFile}
135
+ documentData={documentData}
136
+ onPageChange={setPdfNavigation}
137
+ />
138
+ </div>
139
  </div>
140
 
141
  {/* Resizable Divider */}
 
157
  <div
158
  className="flex flex-col"
159
  style={{ width: `${100 - leftPanelWidth}%` }}
160
+ > {/* Progress Bar */}
161
+ <div className="mb-4 flex-shrink-0">
162
+ <ProgressBar
163
+ currentChunkIndex={currentChunkIndex}
164
+ totalChunks={documentData?.chunks?.length || 0}
165
+ onChunkClick={null} // Start with linear progression only
166
+ />
167
+ </div>
168
+
 
169
  {/* Chunk Panel */}
170
  <div className="flex-1 flex flex-col min-h-0 bg-white rounded-lg shadow-sm">
171
  <ChunkPanel
172
  documentData={documentData}
173
  currentChunkIndex={currentChunkIndex}
 
 
 
 
 
 
174
  showChat={showChat}
175
+ isTransitioning={isTransitioning}
176
+ updateGlobalChatHistory={updateGlobalChatHistory}
177
+ getGlobalChatHistory={getGlobalChatHistory}
178
+ addMessageToChunk={addMessageToChunk}
179
+ getCurrentChunkMessages={getCurrentChunkMessages}
180
+ hasChunkMessages={hasChunkMessages}
181
+ isChunkCompleted={isChunkCompleted}
182
+ canEditChunk={canEditChunk}
183
+ setWaitingForFirstResponse={setWaitingForFirstResponse}
184
+ markChunkUnderstood={markChunkUnderstood}
185
+ skipChunk={skipChunk}
186
+ goToPrevChunk={goToPrevChunk}
187
  />
188
  </div>
189
  </div>
frontend/src/components/ProgressBar.jsx ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react';
2
+
3
+ const ProgressBar = ({
4
+ currentChunkIndex,
5
+ totalChunks,
6
+ onChunkClick = null // Optional: allow clicking on progress bar segments
7
+ }) => {
8
+ const progressPercentage = totalChunks > 0 ? ((currentChunkIndex) / totalChunks) * 100 : 0;
9
+
10
+ return (
11
+ <div className="w-full">
12
+ {/* Progress Label */}
13
+ <div className="flex justify-between items-center mb-2">
14
+ <span className="text-sm font-medium text-gray-700">
15
+ Progress: {Math.round(progressPercentage)}%
16
+ </span>
17
+ <span className="text-sm text-gray-500">
18
+ {currentChunkIndex + 1} of {totalChunks} sections
19
+ </span>
20
+ </div>
21
+
22
+ {/* Progress Bar */}
23
+ <div className="w-full bg-gray-200 rounded-lg h-3 overflow-hidden shadow-sm relative">
24
+ {/* Progress fill */}
25
+ <div
26
+ className="h-full bg-green-500 transition-all duration-500 ease-out rounded-lg"
27
+ style={{ width: `${progressPercentage}%` }}
28
+ />
29
+
30
+ {/* Optional: Clickable segments overlay */}
31
+ {onChunkClick && (
32
+ <div className="absolute inset-0 flex">
33
+ {Array.from({ length: totalChunks }, (_, index) => (
34
+ <button
35
+ key={index}
36
+ onClick={() => onChunkClick(index)}
37
+ className="flex-1 hover:bg-white hover:bg-opacity-20 transition-colors duration-200"
38
+ title={`Go to chunk ${index + 1}`}
39
+ />
40
+ ))}
41
+ </div>
42
+ )}
43
+ </div>
44
+ </div>
45
+ );
46
+ };
47
+
48
+ export default ProgressBar;
frontend/src/components/SimpleChat.jsx CHANGED
@@ -1,81 +1,177 @@
1
- import { useState } from 'react';
2
  import ReactMarkdown from 'react-markdown';
3
  import remarkMath from 'remark-math';
4
  import rehypeKatex from 'rehype-katex';
5
  import rehypeRaw from 'rehype-raw';
6
  import { getChatMarkdownComponents } from '../utils/markdownComponents.jsx';
7
 
8
-
9
- const SimpleChat = ({ messages, onSend, isLoading }) => {
10
  const [input, setInput] = useState('');
 
 
11
 
12
  const handleSubmit = (e) => {
13
  e.preventDefault();
14
- if (!input.trim() || isLoading) return;
15
  onSend(input.trim());
16
  setInput('');
17
  };
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  return (
20
- <div className="flex flex-col h-full">
21
- {/* Messages */}
22
- <div className="flex-1 overflow-y-auto p-4 space-y-3">
23
- {messages.map((message, idx) => (
24
- <div
25
- key={idx}
26
- className={`flex ${message.role === 'user' ? 'justify-end' : 'justify-start'}`}
27
- >
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  <div
29
- className={`max-w-[90%] p-3 rounded-lg ${
30
- message.role === 'user'
31
- ? 'bg-gray-100 text-white'
32
- : 'bg-white text-gray-900'
33
- }`}
34
  >
35
- <ReactMarkdown
 
 
 
 
 
 
 
36
  remarkPlugins={[remarkMath]}
37
  rehypePlugins={[rehypeRaw, rehypeKatex]}
38
  components={getChatMarkdownComponents()}
39
  >
40
  {message.content}
41
  </ReactMarkdown>
 
42
  </div>
43
- </div>
44
- ))}
45
 
46
- {isLoading && (
47
- <div className="flex justify-start">
48
- <div className="bg-gray-100 p-3 rounded-lg">
49
- <div className="flex space-x-1">
50
- <div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce"></div>
51
- <div
52
- className="w-2 h-2 bg-gray-400 rounded-full animate-bounce"
53
- style={{ animationDelay: '0.1s' }}
54
- ></div>
55
- <div
56
- className="w-2 h-2 bg-gray-400 rounded-full animate-bounce"
57
- style={{ animationDelay: '0.2s' }}
58
- ></div>
59
  </div>
60
- </div>
 
61
  </div>
62
  )}
63
  </div>
64
 
65
- {/* Input */}
66
  <form onSubmit={handleSubmit} className="p-4 border-t">
67
  <div className="flex space-x-2">
68
  <input
69
  type="text"
70
  value={input}
71
  onChange={(e) => setInput(e.target.value)}
72
- placeholder="Type your message..."
73
- disabled={isLoading}
74
- className="flex-1 px-3 py-2 border rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 disabled:bg-gray-100"
75
  />
76
  <button
77
  type="submit"
78
- disabled={!input.trim() || isLoading}
79
  className="px-4 py-2 bg-blue-500 text-white rounded-lg hover:bg-blue-600 disabled:bg-gray-300 disabled:cursor-not-allowed"
80
  >
81
  {isLoading ? '...' : 'Send'}
 
1
+ import { useState, useEffect, useRef, useMemo } from 'react';
2
  import ReactMarkdown from 'react-markdown';
3
  import remarkMath from 'remark-math';
4
  import rehypeKatex from 'rehype-katex';
5
  import rehypeRaw from 'rehype-raw';
6
  import { getChatMarkdownComponents } from '../utils/markdownComponents.jsx';
7
 
8
+ const SimpleChat = ({ messages, currentChunkIndex, canEdit, onSend, isLoading }) => {
 
9
  const [input, setInput] = useState('');
10
+ const containerRef = useRef(null);
11
+ const anchorRef = useRef(null); // <- will be a tiny zero-height anchor BEFORE the bubble
12
 
13
  const handleSubmit = (e) => {
14
  e.preventDefault();
15
+ if (!input.trim() || isLoading || !canEdit) return;
16
  onSend(input.trim());
17
  setInput('');
18
  };
19
 
20
+ // Determine the latest message index for this chunk (same as you had)
21
+ const { anchorIndex, firstInChunkIndex } = useMemo(() => {
22
+ let first = -1;
23
+ let last = -1;
24
+ for (let i = 0; i < messages.length; i++) {
25
+ if (messages[i].chunkIndex === currentChunkIndex) {
26
+ if (first === -1) first = i;
27
+ last = i;
28
+ }
29
+ }
30
+ return { anchorIndex: last !== -1 ? last : first, firstInChunkIndex: first };
31
+ }, [messages, currentChunkIndex]);
32
+
33
+ // Scroll by scrolling the ZERO-HEIGHT anchor into view AFTER layout commits.
34
+ const scrollAfterLayout = () => {
35
+ requestAnimationFrame(() => {
36
+ requestAnimationFrame(() => {
37
+ if (anchorRef.current) {
38
+ // Scroll the anchor to the top of the nearest scrollable ancestor (your container).
39
+ anchorRef.current.scrollIntoView({ behavior: 'smooth', block: 'start', inline: 'nearest' });
40
+ } else if (containerRef.current) {
41
+ // fallback: go to top
42
+ containerRef.current.scrollTo({ top: 0, behavior: 'smooth' });
43
+ }
44
+ });
45
+ });
46
+ };
47
+
48
+ // When chunk changes, try to pin.
49
+ useEffect(() => {
50
+ if (anchorIndex !== -1) {
51
+ scrollAfterLayout();
52
+ } else if (containerRef.current) {
53
+ requestAnimationFrame(() => containerRef.current.scrollTo({ top: 0, behavior: 'smooth' }));
54
+ }
55
+ // eslint-disable-next-line react-hooks/exhaustive-deps
56
+ }, [currentChunkIndex, anchorIndex]);
57
+
58
+ // New messages: pin the new anchor after layout
59
+ useEffect(() => {
60
+ if (anchorIndex !== -1) scrollAfterLayout();
61
+ // eslint-disable-next-line react-hooks/exhaustive-deps
62
+ }, [messages.length, anchorIndex]);
63
+
64
  return (
65
+ <div className="flex flex-col h-full min-h-0">
66
+ <div
67
+ ref={containerRef}
68
+ className="flex-1 min-h-0 overflow-y-auto p-4 flex flex-col space-y-3"
69
+ >
70
+ {messages.map((message, idx) => {
71
+ const isCurrentChunk = message.chunkIndex === currentChunkIndex;
72
+ const isAnchor = idx === anchorIndex;
73
+
74
+ // Render a zero-height anchor just BEFORE the bubble for the anchor index.
75
+ if (isAnchor) {
76
+ return (
77
+ <div key={idx} className="flex flex-col">
78
+ {/* <-- ZERO-HEIGHT anchor: deterministic top-of-message alignment */}
79
+ <div ref={anchorRef} style={{ height: 0, margin: 0, padding: 0 }} />
80
+
81
+ <div className={`flex ${message.role === 'user' ? 'justify-end' : 'justify-start'}`}>
82
+ <div
83
+ className={`max-w-[90%] p-3 rounded-lg transition-opacity ${
84
+ message.role === 'user'
85
+ ? `bg-gray-100 text-white ${isCurrentChunk ? 'opacity-100' : 'opacity-40'}`
86
+ : `bg-white text-gray-900 ${isCurrentChunk ? 'opacity-100' : 'opacity-40'}`
87
+ }`}
88
+ >
89
+ <ReactMarkdown
90
+ remarkPlugins={[remarkMath]}
91
+ rehypePlugins={[rehypeRaw, rehypeKatex]}
92
+ components={getChatMarkdownComponents()}
93
+ >
94
+ {message.content}
95
+ </ReactMarkdown>
96
+ </div>
97
+ </div>
98
+
99
+ {isLoading && (
100
+ <div className="flex justify-start mt-3">
101
+ <div className="bg-gray-100 p-3 rounded-lg">
102
+ <div className="flex space-x-1">
103
+ <div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce"></div>
104
+ <div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce" style={{ animationDelay: '0.1s' }}></div>
105
+ <div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce" style={{ animationDelay: '0.2s' }}></div>
106
+ </div>
107
+ </div>
108
+ </div>
109
+ )}
110
+
111
+ {/* filler to push remaining whitespace below the pinned message */}
112
+ <div className="flex-1" />
113
+ </div>
114
+ );
115
+ }
116
+
117
+ // Non-anchor message: render normally
118
+ return (
119
  <div
120
+ key={idx}
121
+ className={`flex ${message.role === 'user' ? 'justify-end' : 'justify-start'}`}
 
 
 
122
  >
123
+ <div
124
+ className={`max-w-[90%] p-3 rounded-lg transition-opacity ${
125
+ message.role === 'user'
126
+ ? `bg-gray-100 text-white ${isCurrentChunk ? 'opacity-100' : 'opacity-40'}`
127
+ : `bg-white text-gray-900 ${isCurrentChunk ? 'opacity-100' : 'opacity-40'}`
128
+ }`}
129
+ >
130
+ <ReactMarkdown
131
  remarkPlugins={[remarkMath]}
132
  rehypePlugins={[rehypeRaw, rehypeKatex]}
133
  components={getChatMarkdownComponents()}
134
  >
135
  {message.content}
136
  </ReactMarkdown>
137
+ </div>
138
  </div>
139
+ );
140
+ })}
141
 
142
+ {/* if no messages in chunk yet, render typing+filler */}
143
+ {firstInChunkIndex === -1 && (
144
+ <div className="flex flex-col">
145
+ {isLoading && (
146
+ <div className="flex justify-start">
147
+ <div className="bg-gray-100 p-3 rounded-lg">
148
+ <div className="flex space-x-1">
149
+ <div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce"></div>
150
+ <div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce" style={{ animationDelay: '0.1s' }}></div>
151
+ <div className="w-2 h-2 bg-gray-400 rounded-full animate-bounce" style={{ animationDelay: '0.2s' }}></div>
152
+ </div>
153
+ </div>
 
154
  </div>
155
+ )}
156
+ <div className="flex-1" />
157
  </div>
158
  )}
159
  </div>
160
 
161
+ {/* Input (unchanged) */}
162
  <form onSubmit={handleSubmit} className="p-4 border-t">
163
  <div className="flex space-x-2">
164
  <input
165
  type="text"
166
  value={input}
167
  onChange={(e) => setInput(e.target.value)}
168
+ placeholder={canEdit ? 'Type your message...' : 'This chunk is completed - navigation only'}
169
+ disabled={isLoading || !canEdit}
170
+ className="flex-1 px-3 py-2 border rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 disabled:bg-gray-100 disabled:text-gray-500"
171
  />
172
  <button
173
  type="submit"
174
+ disabled={!input.trim() || isLoading || !canEdit}
175
  className="px-4 py-2 bg-blue-500 text-white rounded-lg hover:bg-blue-600 disabled:bg-gray-300 disabled:cursor-not-allowed"
176
  >
177
  {isLoading ? '...' : 'Send'}
frontend/src/hooks/useChunkNavigation.js CHANGED
@@ -4,8 +4,10 @@ export const useChunkNavigation = (documentData, clearTypingAnimation) => {
4
  const [chunkStates, setChunkStates] = useState({});
5
  const [currentChunkIndex, setCurrentChunkIndex] = useState(0);
6
  const [chunkExpanded, setChunkExpanded] = useState(true);
7
- const [chunkChatHistories, setChunkChatHistories] = useState({});
8
- const [showChat, setShowChat] = useState(false);
 
 
9
 
10
  const goToNextChunk = () => {
11
  if (documentData && currentChunkIndex < documentData.chunks.length - 1) {
@@ -14,7 +16,6 @@ export const useChunkNavigation = (documentData, clearTypingAnimation) => {
14
  }
15
  setCurrentChunkIndex(currentChunkIndex + 1);
16
  setChunkExpanded(true);
17
- setShowChat(false);
18
  }
19
  };
20
 
@@ -25,56 +26,70 @@ export const useChunkNavigation = (documentData, clearTypingAnimation) => {
25
  }
26
  setCurrentChunkIndex(currentChunkIndex - 1);
27
  setChunkExpanded(true);
28
- setShowChat(false);
29
  }
30
  };
31
 
32
- const skipChunk = () => {
33
- setChunkStates(prev => {
34
- const currentState = prev[currentChunkIndex];
35
- const newState = currentState === 'skipped' ? undefined : 'skipped';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- // Auto-advance to next chunk if setting to skipped (not toggling off)
38
- if (newState === 'skipped' && documentData && currentChunkIndex < documentData.chunks.length - 1) {
39
- setTimeout(() => {
40
- if (clearTypingAnimation) {
41
- clearTypingAnimation();
42
- }
43
- setCurrentChunkIndex(currentChunkIndex + 1);
44
- setChunkExpanded(true);
45
- setShowChat(false);
46
- }, 100); // Small delay to allow state update to complete
47
  }
48
-
49
- return {
50
- ...prev,
51
- [currentChunkIndex]: newState
52
- };
53
- });
 
 
 
 
 
 
 
54
  };
55
 
56
  const markChunkUnderstood = () => {
57
- setChunkStates(prev => {
58
- const currentState = prev[currentChunkIndex];
59
- const newState = currentState === 'understood' ? undefined : 'understood';
60
-
61
- // Auto-advance to next chunk if setting to understood (not toggling off)
62
- if (newState === 'understood' && documentData && currentChunkIndex < documentData.chunks.length - 1) {
63
- setTimeout(() => {
64
- if (clearTypingAnimation) {
65
- clearTypingAnimation();
66
- }
67
- setCurrentChunkIndex(currentChunkIndex + 1);
68
- setChunkExpanded(true);
69
- setShowChat(false);
70
- }, 100); // Small delay to allow state update to complete
71
- }
72
-
73
- return {
74
- ...prev,
75
- [currentChunkIndex]: newState
76
- };
77
- });
78
  };
79
 
80
  const startInteractiveLesson = (startChunkLessonFn) => {
@@ -86,21 +101,36 @@ export const useChunkNavigation = (documentData, clearTypingAnimation) => {
86
  };
87
 
88
  const setChunkAsInteractive = () => {
89
- setChunkStates(prev => ({
90
- ...prev,
91
- [currentChunkIndex]: 'interactive'
92
- }));
93
  };
94
 
95
- const updateChunkChatHistory = (messages) => {
96
- setChunkChatHistories(prev => ({
97
- ...prev,
98
- [currentChunkIndex]: messages
99
- }));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  };
101
 
102
- const getCurrentChunkChatHistory = () => {
103
- return chunkChatHistories[currentChunkIndex] || [];
104
  };
105
 
106
  return {
@@ -108,6 +138,7 @@ export const useChunkNavigation = (documentData, clearTypingAnimation) => {
108
  currentChunkIndex,
109
  chunkExpanded,
110
  showChat,
 
111
  goToNextChunk,
112
  goToPrevChunk,
113
  skipChunk,
@@ -116,7 +147,12 @@ export const useChunkNavigation = (documentData, clearTypingAnimation) => {
116
  setChunkExpanded,
117
  setShowChat,
118
  setChunkAsInteractive,
119
- updateChunkChatHistory,
120
- getCurrentChunkChatHistory
 
 
 
 
 
121
  };
122
  };
 
4
  const [chunkStates, setChunkStates] = useState({});
5
  const [currentChunkIndex, setCurrentChunkIndex] = useState(0);
6
  const [chunkExpanded, setChunkExpanded] = useState(true);
7
+ const [globalChatHistory, setGlobalChatHistory] = useState([]);
8
+ const [showChat, setShowChat] = useState(true);
9
+ const [isTransitioning, setIsTransitioning] = useState(false);
10
+ const [completedChunks, setCompletedChunks] = useState(new Set());
11
 
12
  const goToNextChunk = () => {
13
  if (documentData && currentChunkIndex < documentData.chunks.length - 1) {
 
16
  }
17
  setCurrentChunkIndex(currentChunkIndex + 1);
18
  setChunkExpanded(true);
 
19
  }
20
  };
21
 
 
26
  }
27
  setCurrentChunkIndex(currentChunkIndex - 1);
28
  setChunkExpanded(true);
 
29
  }
30
  };
31
 
32
+ const sendAutomatedMessage = async (action) => {
33
+ if (!documentData || currentChunkIndex >= documentData.chunks.length - 1) return;
34
+
35
+ setIsTransitioning(true);
36
+ const nextChunkIndex = currentChunkIndex + 1;
37
+ const nextChunk = documentData.chunks[nextChunkIndex];
38
+
39
+ // Mark current chunk as completed
40
+ setCompletedChunks(prev => new Set(prev).add(currentChunkIndex));
41
+
42
+ // Update chunk index immediately for UI feedback
43
+ setCurrentChunkIndex(nextChunkIndex);
44
+ setChunkExpanded(true);
45
+
46
+ // Check if we already have messages for this chunk
47
+ if (hasChunkMessages(nextChunkIndex)) {
48
+ // Don't generate new response, just navigate
49
+ setIsTransitioning(false);
50
+ return;
51
+ }
52
+
53
+ try {
54
+ const response = await fetch('/api/chat', {
55
+ method: 'POST',
56
+ headers: { 'Content-Type': 'application/json' },
57
+ body: JSON.stringify({
58
+ messages: globalChatHistory,
59
+ currentChunk: documentData.chunks[currentChunkIndex]?.text || '',
60
+ nextChunk: nextChunk.text,
61
+ action: action,
62
+ document: documentData ? JSON.stringify(documentData) : ''
63
+ })
64
+ });
65
+
66
+ const data = await response.json();
67
+ addMessageToChunk(
68
+ { role: 'assistant', content: data.content || 'Let\'s continue to the next section.' },
69
+ nextChunkIndex
70
+ );
71
 
72
+ // Clear any animations after successful response
73
+ if (clearTypingAnimation) {
74
+ clearTypingAnimation();
 
 
 
 
 
 
 
75
  }
76
+ } catch (error) {
77
+ console.error('Error in automated transition:', error);
78
+ // Clear animations on error too
79
+ if (clearTypingAnimation) {
80
+ clearTypingAnimation();
81
+ }
82
+ } finally {
83
+ setIsTransitioning(false);
84
+ }
85
+ };
86
+
87
+ const skipChunk = () => {
88
+ return sendAutomatedMessage('skip');
89
  };
90
 
91
  const markChunkUnderstood = () => {
92
+ return sendAutomatedMessage('understood');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  };
94
 
95
  const startInteractiveLesson = (startChunkLessonFn) => {
 
101
  };
102
 
103
  const setChunkAsInteractive = () => {
104
+ // No longer tracking status - this is just for compatibility
 
 
 
105
  };
106
 
107
+ const updateGlobalChatHistory = (messages) => {
108
+ setGlobalChatHistory(messages);
109
+ };
110
+
111
+ const getGlobalChatHistory = () => {
112
+ return globalChatHistory;
113
+ };
114
+
115
+ const addMessageToChunk = (message, chunkIndex) => {
116
+ const messageWithChunk = { ...message, chunkIndex };
117
+ setGlobalChatHistory(prev => [...prev, messageWithChunk]);
118
+ };
119
+
120
+ const getCurrentChunkMessages = () => {
121
+ return globalChatHistory.filter(msg => msg.chunkIndex === currentChunkIndex);
122
+ };
123
+
124
+ const hasChunkMessages = (chunkIndex) => {
125
+ return globalChatHistory.some(msg => msg.chunkIndex === chunkIndex);
126
+ };
127
+
128
+ const isChunkCompleted = (chunkIndex) => {
129
+ return completedChunks.has(chunkIndex);
130
  };
131
 
132
+ const canEditChunk = (chunkIndex) => {
133
+ return chunkIndex === currentChunkIndex && !isChunkCompleted(chunkIndex);
134
  };
135
 
136
  return {
 
138
  currentChunkIndex,
139
  chunkExpanded,
140
  showChat,
141
+ isTransitioning,
142
  goToNextChunk,
143
  goToPrevChunk,
144
  skipChunk,
 
147
  setChunkExpanded,
148
  setShowChat,
149
  setChunkAsInteractive,
150
+ updateGlobalChatHistory,
151
+ getGlobalChatHistory,
152
+ addMessageToChunk,
153
+ getCurrentChunkMessages,
154
+ hasChunkMessages,
155
+ isChunkCompleted,
156
+ canEditChunk
157
  };
158
  };
frontend/src/main.jsx CHANGED
@@ -4,7 +4,5 @@ import './index.css'
4
  import App from './App.jsx'
5
 
6
  createRoot(document.getElementById('root')).render(
7
- <StrictMode>
8
  <App />
9
- </StrictMode>,
10
  )
 
4
  import App from './App.jsx'
5
 
6
  createRoot(document.getElementById('root')).render(
 
7
  <App />
 
8
  )
frontend/src/utils/markdownComponents.jsx CHANGED
@@ -1,11 +1,13 @@
 
1
  export const getChatMarkdownComponents = () => ({
2
- p: ({ children }) => <p className="mb-2 text-gray-800 leading-relaxed">{children}</p>,
3
  h1: ({ children }) => <h1 className="text-xl font-bold mb-3 text-gray-900">{children}</h1>,
4
  h2: ({ children }) => <h2 className="text-lg font-bold mb-2 text-gray-900">{children}</h2>,
5
  h3: ({ children }) => <h3 className="text-base font-bold mb-2 text-gray-900">{children}</h3>,
6
  ul: ({ children }) => <ul className="mb-2 ml-4 list-disc">{children}</ul>,
7
  ol: ({ children }) => <ol className="mb-2 ml-4 list-decimal">{children}</ol>,
8
  li: ({ children }) => <li className="mb-1 text-gray-800">{children}</li>,
 
9
  strong: ({ children }) => <strong className="font-semibold text-gray-900">{children}</strong>,
10
  em: ({ children }) => <em className="italic">{children}</em>,
11
  code: ({ inline, children }) =>
@@ -19,4 +21,20 @@ export const getChatMarkdownComponents = () => ({
19
  {children}
20
  </blockquote>
21
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  });
 
1
+ // Default markdown components for chat content
2
  export const getChatMarkdownComponents = () => ({
3
+ p: ({ children }) => <p className="mb-3 text-gray-800 leading-relaxed">{children}</p>,
4
  h1: ({ children }) => <h1 className="text-xl font-bold mb-3 text-gray-900">{children}</h1>,
5
  h2: ({ children }) => <h2 className="text-lg font-bold mb-2 text-gray-900">{children}</h2>,
6
  h3: ({ children }) => <h3 className="text-base font-bold mb-2 text-gray-900">{children}</h3>,
7
  ul: ({ children }) => <ul className="mb-2 ml-4 list-disc">{children}</ul>,
8
  ol: ({ children }) => <ol className="mb-2 ml-4 list-decimal">{children}</ol>,
9
  li: ({ children }) => <li className="mb-1 text-gray-800">{children}</li>,
10
+ hr: () => <hr className="my-4 border-gray-300" />,
11
  strong: ({ children }) => <strong className="font-semibold text-gray-900">{children}</strong>,
12
  em: ({ children }) => <em className="italic">{children}</em>,
13
  code: ({ inline, children }) =>
 
21
  {children}
22
  </blockquote>
23
  )
24
+ });
25
+
26
+ // Title-specific markdown components with no bottom margins
27
+ export const getTitleMarkdownComponents = () => ({
28
+ p: ({ children }) => <span className="text-gray-900">{children}</span>,
29
+ h1: ({ children }) => <span className="text-xl font-bold text-gray-900">{children}</span>,
30
+ h2: ({ children }) => <span className="text-lg font-bold text-gray-900">{children}</span>,
31
+ h3: ({ children }) => <span className="text-base font-bold text-gray-900">{children}</span>,
32
+ strong: ({ children }) => <strong className="font-semibold text-gray-900">{children}</strong>,
33
+ em: ({ children }) => <em className="italic">{children}</em>,
34
+ code: ({ children }) => <code className="bg-gray-100 px-1 py-0.5 rounded text-sm font-mono">{children}</code>,
35
+ // Convert block elements to inline for titles
36
+ ul: ({ children }) => <span>{children}</span>,
37
+ ol: ({ children }) => <span>{children}</span>,
38
+ li: ({ children }) => <span>{children} </span>,
39
+ blockquote: ({ children }) => <span className="italic text-gray-700">{children}</span>
40
  });