Deevyankar commited on
Commit
048e2df
·
verified ·
1 Parent(s): b9eb209

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +706 -0
app.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import pickle
5
+ from urllib.parse import quote
6
+
7
+ import numpy as np
8
+ import gradio as gr
9
+ from rank_bm25 import BM25Okapi
10
+ from sentence_transformers import SentenceTransformer
11
+ from openai import OpenAI
12
+
13
+ # ============================================================
14
+ # BrainChat: High-contrast phone UI + evaluated quizzes (RAG)
15
+ # ============================================================
16
+
17
+ # --------- Build artifacts (RAG) ---------
18
+ BUILD_DIR = "brainchat_build"
19
+ CHUNKS_PATH = os.path.join(BUILD_DIR, "chunks.pkl")
20
+ TOKENS_PATH = os.path.join(BUILD_DIR, "tokenized_chunks.pkl")
21
+ EMBED_PATH = os.path.join(BUILD_DIR, "embeddings.npy")
22
+ CONFIG_PATH = os.path.join(BUILD_DIR, "config.json")
23
+
24
+ # --------- Logo auto-detection ---------
25
+ LOGO_CANDIDATES = [
26
+ "brainchat_logo.png.png",
27
+ "Brain chat-09.png",
28
+ "Brain Chat Imagen.svg",
29
+ "ebcbb9f5-022f-473a-bf51-7e7974f794b4.png",
30
+ "logo.png",
31
+ "logo.svg",
32
+ ]
33
+
34
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
35
+
36
+ # --------- Globals (lazy loaded) ---------
37
+ BM25 = None
38
+ CHUNKS = None
39
+ EMBEDDINGS = None
40
+ EMBED_MODEL = None
41
+ CLIENT = None
42
+
43
+
44
+ # ============================================================
45
+ # Retrieval
46
+ # ============================================================
47
+ def tokenize(text: str):
48
+ return re.findall(r"\w+", text.lower(), flags=re.UNICODE)
49
+
50
+
51
+ def ensure_loaded():
52
+ global BM25, CHUNKS, EMBEDDINGS, EMBED_MODEL, CLIENT
53
+
54
+ if CHUNKS is None:
55
+ missing = [p for p in [CHUNKS_PATH, TOKENS_PATH, EMBED_PATH, CONFIG_PATH] if not os.path.exists(p)]
56
+ if missing:
57
+ raise FileNotFoundError(
58
+ "Missing build files. Ensure brainchat_build/ exists and contains:\n"
59
+ "chunks.pkl, tokenized_chunks.pkl, embeddings.npy, config.json\n\nMissing:\n"
60
+ + "\n".join(missing)
61
+ )
62
+
63
+ with open(CHUNKS_PATH, "rb") as f:
64
+ CHUNKS = pickle.load(f)
65
+
66
+ with open(TOKENS_PATH, "rb") as f:
67
+ tokenized_chunks = pickle.load(f)
68
+
69
+ EMBEDDINGS = np.load(EMBED_PATH)
70
+
71
+ with open(CONFIG_PATH, "r", encoding="utf-8") as f:
72
+ cfg = json.load(f)
73
+
74
+ BM25 = BM25Okapi(tokenized_chunks)
75
+ EMBED_MODEL = SentenceTransformer(cfg["embedding_model"])
76
+
77
+ if CLIENT is None:
78
+ api_key = os.getenv("OPENAI_API_KEY")
79
+ if not api_key:
80
+ raise ValueError("OPENAI_API_KEY missing. Add it in your Space Secrets.")
81
+ CLIENT = OpenAI(api_key=api_key)
82
+
83
+
84
+ def search_hybrid(query: str, shortlist_k: int = 30, final_k: int = 5):
85
+ """
86
+ Hybrid retrieval:
87
+ 1) BM25 shortlist
88
+ 2) Dense cosine re-rank
89
+ """
90
+ ensure_loaded()
91
+ q_tokens = tokenize(query)
92
+ bm25_scores = BM25.get_scores(q_tokens)
93
+ shortlist_idx = np.argsort(bm25_scores)[::-1][:shortlist_k]
94
+
95
+ qvec = EMBED_MODEL.encode([query], normalize_embeddings=True).astype("float32")[0]
96
+ shortlist_emb = EMBEDDINGS[shortlist_idx]
97
+ dense_scores = shortlist_emb @ qvec
98
+
99
+ rerank = np.argsort(dense_scores)[::-1][:final_k]
100
+ final_idx = shortlist_idx[rerank]
101
+ return [CHUNKS[int(i)] for i in final_idx]
102
+
103
+
104
+ def build_context(records):
105
+ blocks = []
106
+ for i, r in enumerate(records, start=1):
107
+ blocks.append(
108
+ f"""[Source {i}]
109
+ Book: {r.get('book','')}
110
+ Section: {r.get('section_title','')}
111
+ Pages: {r.get('page_start','')}-{r.get('page_end','')}
112
+ Text:
113
+ {r.get('text','')}"""
114
+ )
115
+ return "\n\n".join(blocks)
116
+
117
+
118
+ def make_sources(records):
119
+ seen = set()
120
+ lines = []
121
+ for r in records:
122
+ key = (r.get("book"), r.get("section_title"), r.get("page_start"), r.get("page_end"))
123
+ if key in seen:
124
+ continue
125
+ seen.add(key)
126
+ lines.append(
127
+ f"• {r.get('book','')} | {r.get('section_title','')} | pp. {r.get('page_start','')}-{r.get('page_end','')}"
128
+ )
129
+ return "\n".join(lines)
130
+
131
+
132
+ # ============================================================
133
+ # Prompts
134
+ # ============================================================
135
+ def language_instruction(language_mode: str) -> str:
136
+ if language_mode == "English":
137
+ return "Answer only in English."
138
+ if language_mode == "Spanish":
139
+ return "Answer only in Spanish."
140
+ if language_mode == "Bilingual":
141
+ return "Answer first in English, then provide a Spanish version under the heading 'Español:'."
142
+ return "If the user's message is in Spanish, answer in Spanish; otherwise answer in English."
143
+
144
+
145
+ def choose_quiz_count(user_text: str, selector: str) -> int:
146
+ if selector in {"3", "5", "7"}:
147
+ return int(selector)
148
+
149
+ t = user_text.lower()
150
+ if any(k in t for k in ["mock test", "final exam", "exam practice", "full test"]):
151
+ return 7
152
+ if any(k in t for k in ["detailed", "revision", "comprehensive", "study"]):
153
+ return 5
154
+ return 3
155
+
156
+
157
+ def build_tutor_prompt(mode: str, language_mode: str, question: str, context: str) -> str:
158
+ style = {
159
+ "Explain": "Explain clearly like a friendly tutor using simple language and short headings.",
160
+ "Detailed": "Give a detailed explanation with key points and clinical relevance only when supported by context.",
161
+ "Short Notes": "Write concise revision notes using bullet points.",
162
+ "Flashcards": "Create 6 flashcards in Q/A format.",
163
+ "Case-Based": "Create a short clinical scenario (2–4 lines) and then explain the concept using the context.",
164
+ }.get(mode, "Explain clearly like a friendly tutor.")
165
+
166
+ return f"""
167
+ You are BrainChat, an interactive neurology and neuroanatomy tutor.
168
+
169
+ Rules:
170
+ - Use ONLY the provided context from the books.
171
+ - If the answer is not supported by the context, say exactly:
172
+ Not found in the course material.
173
+ - Do not invent facts outside the context.
174
+ - {language_instruction(language_mode)}
175
+
176
+ Style:
177
+ {style}
178
+
179
+ Context:
180
+ {context}
181
+
182
+ Student question:
183
+ {question}
184
+ """.strip()
185
+
186
+
187
+ def build_quiz_generation_prompt(language_mode: str, topic: str, context: str, n_questions: int) -> str:
188
+ return f"""
189
+ You are BrainChat, an interactive tutor.
190
+
191
+ Rules:
192
+ - Use ONLY the provided context.
193
+ - Create exactly {n_questions} questions.
194
+ - Provide a short answer key for each question.
195
+ - Keep answers short (1–2 lines).
196
+ - Return VALID JSON only.
197
+ - {language_instruction(language_mode)}
198
+
199
+ Return JSON in this schema:
200
+ {{
201
+ "title": "short quiz title",
202
+ "questions": [
203
+ {{"q": "question 1", "answer_key": "expected short answer"}},
204
+ {{"q": "question 2", "answer_key": "expected short answer"}}
205
+ ]
206
+ }}
207
+
208
+ Context:
209
+ {context}
210
+
211
+ Topic:
212
+ {topic}
213
+ """.strip()
214
+
215
+
216
+ def build_quiz_eval_prompt(language_mode: str, quiz_data: dict, user_answers: str) -> str:
217
+ quiz_json = json.dumps(quiz_data, ensure_ascii=False)
218
+ return f"""
219
+ You are BrainChat, an interactive tutor.
220
+
221
+ Evaluate the student's answers fairly using the answer keys.
222
+ Be lenient for paraphrases that are semantically correct.
223
+
224
+ Return VALID JSON only.
225
+
226
+ Return JSON in this schema:
227
+ {{
228
+ "score_obtained": 0,
229
+ "score_total": 0,
230
+ "summary": "short overall feedback",
231
+ "results": [
232
+ {{
233
+ "question": "question text",
234
+ "answer_key": "expected short answer",
235
+ "student_answer": "student answer",
236
+ "result": "Correct / Partially Correct / Incorrect",
237
+ "feedback": "short explanation"
238
+ }}
239
+ ],
240
+ "improvement_tip": "one short study suggestion"
241
+ }}
242
+
243
+ Quiz:
244
+ {quiz_json}
245
+
246
+ Student answers:
247
+ {user_answers}
248
+
249
+ Language:
250
+ {language_instruction(language_mode)}
251
+ """.strip()
252
+
253
+
254
+ # ============================================================
255
+ # OpenAI helpers (Chat Completions)
256
+ # ============================================================
257
+ def oai_text(prompt: str) -> str:
258
+ ensure_loaded()
259
+ resp = CLIENT.chat.completions.create(
260
+ model=OPENAI_MODEL,
261
+ temperature=0.2,
262
+ messages=[
263
+ {"role": "system", "content": "You are a helpful educational assistant."},
264
+ {"role": "user", "content": prompt},
265
+ ],
266
+ )
267
+ return resp.choices[0].message.content.strip()
268
+
269
+
270
+ def oai_json(prompt: str) -> dict:
271
+ ensure_loaded()
272
+ resp = CLIENT.chat.completions.create(
273
+ model=OPENAI_MODEL,
274
+ temperature=0.2,
275
+ response_format={"type": "json_object"},
276
+ messages=[
277
+ {"role": "system", "content": "Return only valid JSON."},
278
+ {"role": "user", "content": prompt},
279
+ ],
280
+ )
281
+ return json.loads(resp.choices[0].message.content)
282
+
283
+
284
+ # ============================================================
285
+ # Logo
286
+ # ============================================================
287
+ def find_logo_filename():
288
+ for name in LOGO_CANDIDATES:
289
+ if os.path.exists(name):
290
+ return name
291
+ return None
292
+
293
+
294
+ def logo_url():
295
+ f = find_logo_filename()
296
+ if not f:
297
+ return None
298
+ return f"/gradio_api/file={quote(f)}"
299
+
300
+
301
+ def phone_logo_html():
302
+ url = logo_url()
303
+ if url:
304
+ return f'<img class="bc-logo-img" src="{url}" alt="BrainChat logo">'
305
+ return '<div class="bc-logo-fallback">BRAIN<br/>CHAT</div>'
306
+
307
+
308
+ # ============================================================
309
+ # Chat logic
310
+ # ============================================================
311
+ def respond(user_msg, history, mode, language_mode, quiz_count_mode, show_sources, quiz_state):
312
+ history = history or []
313
+ quiz_state = quiz_state or {"active": False, "quiz_data": None, "language_mode": "Auto"}
314
+
315
+ text = (user_msg or "").strip()
316
+ if not text:
317
+ return "", history, quiz_state
318
+
319
+ try:
320
+ history = history + [{"role": "user", "content": text}]
321
+
322
+ # If quiz is active, treat this message as the answer sheet and evaluate.
323
+ if quiz_state.get("active", False):
324
+ evaluation = oai_json(build_quiz_eval_prompt(
325
+ quiz_state.get("language_mode", language_mode),
326
+ quiz_state.get("quiz_data", {}),
327
+ text
328
+ ))
329
+
330
+ lines = []
331
+ lines.append(f"**Score:** {evaluation.get('score_obtained', 0)}/{evaluation.get('score_total', 0)}")
332
+ if evaluation.get("summary"):
333
+ lines.append(f"\n**Overall:** {evaluation['summary']}")
334
+ if evaluation.get("improvement_tip"):
335
+ lines.append(f"\n**Tip:** {evaluation['improvement_tip']}\n")
336
+
337
+ results = evaluation.get("results", [])
338
+ if results:
339
+ lines.append("**Question-wise feedback:**")
340
+ for item in results:
341
+ lines.append("")
342
+ lines.append(f"**Q:** {item.get('question','')}")
343
+ lines.append(f"**Your answer:** {item.get('student_answer','')}")
344
+ lines.append(f"**Expected:** {item.get('answer_key','')}")
345
+ lines.append(f"**Result:** {item.get('result','')}")
346
+ lines.append(f"**Feedback:** {item.get('feedback','')}")
347
+
348
+ history = history + [{"role": "assistant", "content": "\n".join(lines).strip()}]
349
+ quiz_state = {"active": False, "quiz_data": None, "language_mode": language_mode}
350
+ return "", history, quiz_state
351
+
352
+ # Normal retrieval
353
+ records = search_hybrid(text, shortlist_k=30, final_k=5)
354
+ context = build_context(records)
355
+
356
+ # Quiz generation
357
+ if mode == "Quiz Me":
358
+ n_questions = choose_quiz_count(text, quiz_count_mode)
359
+ quiz_data = oai_json(build_quiz_generation_prompt(language_mode, text, context, n_questions))
360
+
361
+ lines = []
362
+ lines.append(f"**{quiz_data.get('title','Quiz')}**")
363
+ lines.append(f"\n**Total questions:** {len(quiz_data.get('questions', []))}\n")
364
+ lines.append("Reply in ONE message with numbered answers (e.g., `1. ... 2. ...`).\n")
365
+ for i, q in enumerate(quiz_data.get("questions", []), start=1):
366
+ lines.append(f"**Q{i}.** {q.get('q','')}")
367
+
368
+ if show_sources:
369
+ lines.append("\n\n**Sources used to create this quiz:**")
370
+ lines.append(make_sources(records))
371
+
372
+ history = history + [{"role": "assistant", "content": "\n".join(lines).strip()}]
373
+ quiz_state = {"active": True, "quiz_data": quiz_data, "language_mode": language_mode}
374
+ return "", history, quiz_state
375
+
376
+ # Tutor modes
377
+ answer = oai_text(build_tutor_prompt(mode, language_mode, text, context))
378
+ if show_sources:
379
+ answer = answer.strip() + "\n\n**Sources:**\n" + make_sources(records)
380
+
381
+ history = history + [{"role": "assistant", "content": answer.strip()}]
382
+ return "", history, quiz_state
383
+
384
+ except Exception as e:
385
+ history = history + [{"role": "assistant", "content": f"Error: {str(e)}"}]
386
+ quiz_state = {"active": False, "quiz_data": None, "language_mode": language_mode}
387
+ return "", history, quiz_state
388
+
389
+
390
+ def clear_all():
391
+ return "", [], {"active": False, "quiz_data": None, "language_mode": "Auto"}
392
+
393
+
394
+ # ============================================================
395
+ # CSS: High-contrast palette + phone UI
396
+ # Note: Custom CSS selectors may need updates across Gradio versions.
397
+ # Gradio explicitly warns DOM query selectors may not be stable. (See Change log.)
398
+ # ============================================================
399
+
400
+ CSS = r"""
401
+ :root{
402
+ --bc-page-bg: #DCDCDC;
403
+
404
+ --bc-grad-top: #E8C7D4;
405
+ --bc-grad-mid: #7D4579;
406
+ --bc-grad-bot: #2B0C46;
407
+
408
+ --bc-ink: #141414;
409
+ --bc-ink-muted: #3A3A3A;
410
+ --bc-ink-inverse: #FFFFFF;
411
+
412
+ --bc-accent: #FEF24A;
413
+
414
+ --bc-bubble-user: #FFFFFF;
415
+ --bc-bubble-bot: #F7F3B0;
416
+
417
+ --bc-input-bg: #FFFCE6;
418
+ --bc-btn-bg: #FEF24A;
419
+ --bc-btn-ink: #2B0C46;
420
+
421
+ --bc-shadow: rgba(0,0,0,.18);
422
+ }
423
+
424
+ body, .gradio-container{
425
+ background: var(--bc-page-bg) !important;
426
+ font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial;
427
+ }
428
+ footer{ display:none !important; }
429
+
430
+ #bc_settings{
431
+ max-width: 980px;
432
+ margin: 12px auto 10px auto;
433
+ }
434
+
435
+ #bc_phone{
436
+ max-width: 420px;
437
+ margin: 0 auto 0 auto;
438
+ border-radius: 40px;
439
+ background: linear-gradient(180deg, var(--bc-grad-top) 0%, var(--bc-grad-mid) 48%, var(--bc-grad-bot) 100%);
440
+ box-shadow: 0 18px 40px var(--bc-shadow);
441
+ border: 1px solid rgba(255,255,255,.22);
442
+ padding: 14px 14px 12px 14px;
443
+ position: relative;
444
+ }
445
+
446
+ #bc_phone_logo{
447
+ position: absolute;
448
+ top: 12px;
449
+ left: 50%;
450
+ transform: translateX(-50%);
451
+ z-index: 10;
452
+ }
453
+
454
+ .bc-logo-wrap{
455
+ width: 96px;
456
+ height: 96px;
457
+ border-radius: 999px;
458
+ background: var(--bc-accent);
459
+ display:flex;
460
+ align-items:center;
461
+ justify-content:center;
462
+ box-shadow: 0 10px 22px rgba(0,0,0,.20);
463
+ }
464
+
465
+ .bc-logo-img{
466
+ width: 84px;
467
+ height: 84px;
468
+ object-fit: contain;
469
+ display:block;
470
+ }
471
+
472
+ .bc-logo-fallback{
473
+ width: 84px;
474
+ height: 84px;
475
+ border-radius: 999px;
476
+ display:flex;
477
+ align-items:center;
478
+ justify-content:center;
479
+ font-weight: 900;
480
+ color: var(--bc-btn-ink);
481
+ text-align:center;
482
+ font-size: 12px;
483
+ line-height: 1.05;
484
+ background: rgba(255,255,255,.35);
485
+ }
486
+
487
+ #bc_chatbot{
488
+ margin-top: 96px;
489
+ }
490
+
491
+ #bc_chatbot, #bc_chatbot > div{
492
+ background: transparent !important;
493
+ border: none !important;
494
+ box-shadow: none !important;
495
+ }
496
+
497
+ #bc_chatbot .toolbar{ display:none !important; }
498
+
499
+ /* ============================================================
500
+ BC_DOM_SELECTORS_START
501
+ If Gradio updates internal DOM, update selectors here.
502
+ ============================================================ */
503
+ #bc_chatbot [data-testid="user"],
504
+ #bc_chatbot [data-testid="bot"],
505
+ #bc_chatbot [data-testid="assistant"]{
506
+ max-width: 82%;
507
+ border-radius: 18px !important;
508
+ padding: 12px 14px !important;
509
+ line-height: 1.35;
510
+ font-size: 14px;
511
+ color: var(--bc-ink) !important;
512
+ border: 0 !important;
513
+ box-shadow: 0 8px 18px rgba(0,0,0,.10);
514
+ position: relative;
515
+ }
516
+
517
+ #bc_chatbot [data-testid="user"]{
518
+ background: var(--bc-bubble-user) !important;
519
+ }
520
+ #bc_chatbot [data-testid="bot"],
521
+ #bc_chatbot [data-testid="assistant"]{
522
+ background: var(--bc-bubble-bot) !important;
523
+ }
524
+
525
+ /* Tails */
526
+ #bc_chatbot [data-testid="user"]::after{
527
+ content:"";
528
+ position:absolute;
529
+ right:-7px;
530
+ bottom: 12px;
531
+ width:0; height:0;
532
+ border-left: 10px solid var(--bc-bubble-user);
533
+ border-top: 8px solid transparent;
534
+ border-bottom: 8px solid transparent;
535
+ }
536
+ #bc_chatbot [data-testid="bot"]::before,
537
+ #bc_chatbot [data-testid="assistant"]::before{
538
+ content:"";
539
+ position:absolute;
540
+ left:-7px;
541
+ bottom: 12px;
542
+ width:0; height:0;
543
+ border-right: 10px solid var(--bc-bubble-bot);
544
+ border-top: 8px solid transparent;
545
+ border-bottom: 8px solid transparent;
546
+ }
547
+ /* ============================================================
548
+ BC_DOM_SELECTORS_END
549
+ ============================================================ */
550
+
551
+ #bc_input_row{
552
+ margin-top: 10px;
553
+ background: rgba(254,242,74,.96);
554
+ border-radius: 999px;
555
+ padding: 10px 10px;
556
+ box-shadow: 0 10px 22px rgba(0,0,0,.14);
557
+ align-items: center;
558
+ }
559
+
560
+ #bc_plus{
561
+ width: 34px;
562
+ height: 34px;
563
+ border-radius: 999px;
564
+ display:flex;
565
+ align-items:center;
566
+ justify-content:center;
567
+ font-weight: 900;
568
+ color: var(--bc-btn-ink);
569
+ background: rgba(255,255,255,.35);
570
+ user-select: none;
571
+ }
572
+
573
+ #bc_msg textarea{
574
+ background: rgba(255,255,255,.40) !important;
575
+ border-radius: 999px !important;
576
+ border: none !important;
577
+ padding: 10px 12px !important;
578
+ color: var(--bc-btn-ink) !important;
579
+ box-shadow: none !important;
580
+ }
581
+ #bc_msg textarea::placeholder{
582
+ color: rgba(43,12,70,.75) !important;
583
+ }
584
+
585
+ #bc_send{
586
+ min-width: 42px !important;
587
+ height: 38px !important;
588
+ border-radius: 999px !important;
589
+ border: none !important;
590
+ background: rgba(255,255,255,.35) !important;
591
+ color: var(--bc-btn-ink) !important;
592
+ font-size: 18px !important;
593
+ font-weight: 900 !important;
594
+ }
595
+ #bc_send:hover{ background: rgba(255,255,255,.55) !important; }
596
+
597
+ #bc_clear{
598
+ max-width: 420px;
599
+ margin: 10px auto 0 auto;
600
+ border-radius: 14px !important;
601
+ }
602
+
603
+ @media (max-width: 480px){
604
+ #bc_phone{ max-width: 95vw; }
605
+ #bc_chatbot [data-testid="user"],
606
+ #bc_chatbot [data-testid="bot"],
607
+ #bc_chatbot [data-testid="assistant"]{
608
+ max-width: 88%;
609
+ font-size: 14px;
610
+ }
611
+ }
612
+ """
613
+
614
+
615
+ # ============================================================
616
+ # UI
617
+ # ============================================================
618
+ def make_chatbot():
619
+ """
620
+ Compatibility shim:
621
+ - Some Gradio versions expose type="messages" (older docs mention it).
622
+ - Some Gradio versions default to messages and may not accept type.
623
+ See migration guide + chatbot docs. citeturn0search1turn39view0
624
+ """
625
+ base_kwargs = dict(
626
+ value=[],
627
+ elem_id="bc_chatbot",
628
+ height=560,
629
+ layout="bubble",
630
+ container=False,
631
+ show_label=False,
632
+ autoscroll=True,
633
+ buttons=[],
634
+ group_consecutive_messages=False,
635
+ placeholder="Ask a question, or start a quiz…",
636
+ )
637
+ try:
638
+ return gr.Chatbot(type="messages", **base_kwargs)
639
+ except TypeError:
640
+ # Gradio 6 may not accept `type`, but still expects messages-format values.
641
+ return gr.Chatbot(**base_kwargs)
642
+
643
+
644
+ with gr.Blocks() as demo:
645
+ quiz_state = gr.State({"active": False, "quiz_data": None, "language_mode": "Auto"})
646
+
647
+ with gr.Accordion("Settings", open=False, elem_id="bc_settings"):
648
+ mode = gr.Dropdown(
649
+ choices=["Explain", "Detailed", "Short Notes", "Flashcards", "Case-Based", "Quiz Me"],
650
+ value="Explain",
651
+ label="Tutor Mode",
652
+ )
653
+ language_mode = gr.Dropdown(
654
+ choices=["Auto", "English", "Spanish", "Bilingual"],
655
+ value="Auto",
656
+ label="Answer Language",
657
+ )
658
+ quiz_count_mode = gr.Dropdown(
659
+ choices=["Auto", "3", "5", "7"],
660
+ value="Auto",
661
+ label="Quiz Questions",
662
+ )
663
+ show_sources = gr.Checkbox(value=True, label="Show Sources")
664
+ gr.Markdown(
665
+ "Tip: Choose **Quiz Me** and type a topic (e.g., `cranial nerves`). "
666
+ "Your next message will be evaluated automatically."
667
+ )
668
+
669
+ with gr.Group(elem_id="bc_phone"):
670
+ gr.HTML(f'<div class="bc-logo-wrap">{phone_logo_html()}</div>', elem_id="bc_phone_logo")
671
+
672
+ chatbot = make_chatbot()
673
+
674
+ with gr.Row(elem_id="bc_input_row"):
675
+ gr.HTML("<div>+</div>", elem_id="bc_plus")
676
+ msg = gr.Textbox(
677
+ placeholder="Type a message…",
678
+ show_label=False,
679
+ container=False,
680
+ scale=8,
681
+ elem_id="bc_msg",
682
+ )
683
+ send_btn = gr.Button("➤", elem_id="bc_send", scale=1)
684
+
685
+ clear_btn = gr.Button("Clear chat", elem_id="bc_clear")
686
+
687
+ msg.submit(
688
+ respond,
689
+ inputs=[msg, chatbot, mode, language_mode, quiz_count_mode, show_sources, quiz_state],
690
+ outputs=[msg, chatbot, quiz_state],
691
+ )
692
+ send_btn.click(
693
+ respond,
694
+ inputs=[msg, chatbot, mode, language_mode, quiz_count_mode, show_sources, quiz_state],
695
+ outputs=[msg, chatbot, quiz_state],
696
+ )
697
+ clear_btn.click(
698
+ clear_all,
699
+ inputs=None,
700
+ outputs=[msg, chatbot, quiz_state],
701
+ queue=False,
702
+ )
703
+
704
+ if __name__ == "__main__":
705
+ demo.queue()
706
+ demo.launch(css=CSS)