File size: 27,204 Bytes
522f473
ee0e550
 
 
e51ffbb
 
ee0e550
e51ffbb
522f473
ee0e550
 
e51ffbb
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a39e77
ee0e550
 
5a39e77
 
 
 
 
 
 
 
79d3749
5a39e77
 
 
 
ee0e550
 
 
 
 
 
 
 
 
 
 
aaf6054
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79d3749
5a39e77
 
 
 
 
 
 
ee0e550
 
 
 
 
e79aafb
ee0e550
 
 
 
 
 
 
 
 
 
 
 
79d3749
5a39e77
 
 
 
 
 
 
ee0e550
 
 
 
 
e79aafb
ee0e550
 
 
 
 
 
 
 
 
 
 
 
79d3749
5a39e77
 
 
 
 
 
 
ee0e550
 
 
 
 
e79aafb
ee0e550
 
 
 
 
 
 
 
d0b8ad7
ee0e550
79d3749
5a39e77
 
 
 
ee0e550
 
5a39e77
 
9824893
c549326
5a39e77
c549326
 
 
 
 
5a39e77
 
c549326
 
 
 
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a39e77
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2c481d
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2c481d
ee0e550
 
5a39e77
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a39e77
ee0e550
 
 
5a39e77
 
 
 
 
 
 
 
ee0e550
5a39e77
ee0e550
 
 
5a39e77
ee0e550
5a39e77
ee0e550
 
 
 
 
 
 
 
5a39e77
ee0e550
5a39e77
 
 
 
 
ee0e550
 
 
 
 
5a39e77
 
 
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
c2c481d
ee0e550
 
 
 
 
 
c2c481d
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a39e77
ee0e550
5a39e77
 
ee0e550
 
 
 
 
5a39e77
 
 
ee0e550
5a39e77
 
ee0e550
 
 
 
 
5a39e77
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c549326
 
5a01390
c549326
5a39e77
c549326
5a39e77
c549326
ee0e550
aaf6054
ee0e550
 
 
 
 
 
5a39e77
ee0e550
 
 
 
 
 
 
 
5a39e77
ee0e550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
import os, re, json, datetime, pdfplumber, pytesseract, requests
import pandas as pd
import gradio as gr
from huggingface_hub import InferenceClient
from dataclasses import dataclass
from typing import Dict, Optional
from pptx import Presentation
from docx import Document
from PIL import Image

# =====================================================
# CORE CONFIG ****** CUSTOMIZE ******
# =====================================================

MODEL_NAME = "deepseek-ai/DeepSeek-R1"
HF_API_KEY = os.environ.get("HF_API_KEY")
if not HF_API_KEY:
    raise ValueError("HF_API_KEY env var is missing.")

client = InferenceClient(provider="novita", api_key=HF_API_KEY)

LOG_DIR = "logs"
os.makedirs(LOG_DIR, exist_ok=True)
LOG_PATH = os.path.join(LOG_DIR, "conversations.jsonl")

# =====================================================
# KNOWLEDGE BASE (CourseContent / CaseStudyContent)
# =====================================================

BASE_DIR = os.path.dirname(os.path.abspath(__file__))

COURSE_DIR = os.path.join(BASE_DIR, "CourseContent")
CASE_DIR = os.path.join(BASE_DIR, "CaseStudyContent")

USE_PREFIX_MODE = False
COURSE_PREFIX = "CourseContent__"
CASE_PREFIX = "CaseStudyContent__"

def _is_placeholder_name(name: str) -> bool:
    return "placeholder" in (name or "").lower()

# =====================================================
# MODES (single customization surface) ****** CUSTOMIZE ******
# =====================================================

@dataclass(frozen=True)
class ModeSpec:
    system_instructions: str
    gen: dict
    web_enabled: bool = False
    kb_scope: str = "course"   # "course" or "case"
    mode_id: Optional[str] = None  # stable internal id if you want it

DEFAULT_GEN_CONFIG = {"temperature": 0.2, "top_p": 0.8, "max_tokens": 900}

MODE_REGISTRY: Dict[str, ModeSpec] = {
    "Course Detailer": ModeSpec(
        system_instructions="""
ROLE
You are the Course Detailer (Operations & Logistics) for a university course at ESCP.

PRIMARY GOAL
Provide ONLY operational/logistical course details:
- schedule structure (sessions, timing, format)
- deliverables (what, when, how submitted)
- grading scheme (weights, rubrics at a high level)
- policies (attendance, late work, academic integrity, AI use rules)
- tooling (platforms, required software/accounts)
- office hours / contact / escalation
Refer students to "Socratic Reviewer" and "Reviewer" if they want to review course content, and to "Stakeholder" if they want to tackle the case study.

SOURCE POLICY
1) If the CourseContent folder has any file other than "PlaceHolder", treat it as authoritative and quote/reflect it faithfully.
2) If the CourseContent folder has only the "PlaceHolder" file or no file, enter DEMO MODE:
   - Explicitly state: "Demo mode: no official course material loaded." in the first interaction.
   - Then provide just the assumed demo mode course name
   - Present yourself as the Course Detailer and explain what you can do.
   - For any other questions, just invent content basing yourself on the details of the demo mode

CONSTRAINTS
- Do not invent details that contradict loaded course materials.
- If a requested detail is not available, say what is missing and which file to upload (e.g., syllabus PDF).

STYLE
- Use short, operational bullets; no long pedagogical explanations.
""".strip(),
        gen={**DEFAULT_GEN_CONFIG, "temperature": 0.35, "top_p": 0.9, "max_tokens": 2000},
        web_enabled=False,
        kb_scope="course",
        mode_id="course_detailer",
    ),

    "Reviewer": ModeSpec(
        system_instructions="""
ROLE
You are a course content reviewer.

PRIMARY GOAL
You summarize slides and generate questions based on those provided to you to help students prepare for the final exam.
Only if asked, refer students to "Socratic Reviewer" if they want a more quest-like review of the course content, to "Course Detailer" if they have questions related to scheduling and logistics, and to "Stakeholder" if they want to tackle the case study.

SOURCE POLICY
- If the CourseContent folder has any file other than "PlaceHolder", base your answers on it.
- If the CourseContent folder has only the "PlaceHolder" file or no file, proceed in DEMO MODE:
    - Explicitly state: "Demo mode: no official course material loaded." in the first interaction.
    - Then provide just the assumed demo mode course name
    - Present yourself as the Reviewer and explain what you can do.
    - For any other questions, just invent content basing yourself on the details of the demo mode and making sure you stay aligned with all other assistant responses.

CONSTRAINTS
- Base your answers only on content in CourseContent folder unless in demo mode.
""".strip(),
        gen={**DEFAULT_GEN_CONFIG, "temperature": 0.15, "top_p": 0.8, "max_tokens": 2000},
        web_enabled=False,
        kb_scope="course",
        mode_id="reviewer",
    ),

    "Socratic Reviewer": ModeSpec(
        system_instructions="""
ROLE
You are a Socratic course content Reviewer.

PRIMARY GOAL
You ask socratic questions to help students understand  slides and overall course purpose.
Only if asked, refer students to "Reviewer" if they want a more systematic review of the course content for prep to the final exam, to "Course Detailer" if they have questions related to scheduling and logistics, and to "Stakeholder" if they want to tackle the case study.

SOURCE POLICY
- If the CourseContent folder has any file other than "PlaceHolder", base your answers on it.
- If the CourseContent folder has only the "PlaceHolder" file or no file, proceed in DEMO MODE:
    - Explicitly state: "Demo mode: no official course material loaded." in the first interaction.
    - Then provide just the assumed demo mode course name
    - Present yourself as the Socratic Reviewer and explain what you can do.
    - For any other questions, just invent content basing yourself on the details of the demo mode and making sure you stay aligned with all other assistant responses.
CONSTRAINTS
- Questions only unless the user explicitly asks for an example.
- Do not rewrite the user's text.
""".strip(),
        gen={**DEFAULT_GEN_CONFIG, "temperature": 0.10, "top_p": 0.8, "max_tokens": 2000},
        web_enabled=False,
        kb_scope="course",
        mode_id="socratic_reviewer",
    ),

    "Stakeholder": ModeSpec(
        system_instructions="""
ROLE
You are a realistic key stakeholder of a case.

PRIMARY GOAL
Assume the persona described in the source and react exactly as that persona, knowing exactly everything available to that persona, and never going out of character.
Only if asked, refer students to "Reviewer" if they want a more systematic review of the course content for prep to the final exam, to "Socratic Reviewer" if they want a more quest-like review of the course content, to "Course Detailer" if they have questions related to scheduling and logistics, and to "Stakeholder" if they want to tackle the case study.

SOURCE POLICY
1) If the CaseStudyContent folder has any file other than "PlaceHolder", use it as the main reference to build your personality.
2) If the CaseStudyContent folder has only the "PlaceHolder" file or no file:
   - Assume a plausible case setup and proceed.
   - Otherwise, assume the persona described in the content.

TONE
Matching how the persona described in the source would be in a realistic business setting. Be a bit theatrical.
""".strip(),
        gen={**DEFAULT_GEN_CONFIG, "temperature": 0.45, "top_p": 0.95, "max_tokens": 2000},
        web_enabled=False,
        kb_scope="case",
        mode_id="stakeholder",
    ),
}

STUDENT_MODES = list(MODE_REGISTRY.keys())
DEFAULT_MODE = STUDENT_MODES[0] if STUDENT_MODES else "Course Detailer"

# =====================================================
# OPTIONAL WEB SEARCH (OFF by default) ****** CUSTOMIZE ******
# =====================================================

WEB_SEARCH_ENABLED_GLOBAL = False  # master switch
SEARCH_API_KEY = os.environ.get("SEARCH_API_KEY", "")

def web_search_snippets(query: str, max_chars: int = 1500) -> str:
    # Placeholder, return short snippets if real
    if not (WEB_SEARCH_ENABLED_GLOBAL and SEARCH_API_KEY and requests):
        return ""
    return ""

# =====================================================
# MEMORY + LOGGING
# =====================================================

def store_memory(memory_state: dict, text: str) -> dict:
    memory_state["session"].append(text)
    return memory_state

def retrieve_memory(memory_state: dict) -> str:
    return "\n".join(memory_state.get("session", []))

def log_turn(mode: str, user_msg: str, assistant_msg: str, upload_meta=None) -> None:
    rec = {
        "ts": datetime.datetime.utcnow().isoformat() + "Z",
        "mode": mode,
        "user": user_msg,
        "assistant": assistant_msg,
        "uploads": upload_meta or [],
    }
    with open(LOG_PATH, "a", encoding="utf-8") as f:
        f.write(json.dumps(rec, ensure_ascii=False) + "\n")

# =====================================================
# OUTPUT CLEANING
# =====================================================

_THINK_RE = re.compile(r"<think>.*?</think>", flags=re.DOTALL | re.IGNORECASE)

def strip_think(text: str) -> str:
    if not text:
        return ""
    cleaned = _THINK_RE.sub("", text).strip()
    return cleaned if cleaned else "(No visible answer returned. Please re-try.)"

# =====================================================
# FILE EXTRACTION
# =====================================================

def read_text_file(path: str, max_chars: int = 2000) -> str:
    try:
        with open(path, "r", encoding="utf-8", errors="ignore") as f:
            return f.read(max_chars)
    except Exception:
        return ""

def docx_to_text(path: str, max_chars: int = 2500) -> str:
    doc = Document(path)
    chunks = [p.text.strip() for p in doc.paragraphs if len(p.text.strip()) > 20]
    return "\n".join(chunks)[:max_chars]

def pptx_to_text(path: str, max_chars: int = 2500) -> str:
    prs = Presentation(path)
    chunks = []
    for slide in prs.slides:
        for shape in slide.shapes:
            if hasattr(shape, "text"):
                txt = (shape.text or "").strip()
                if len(txt) > 20:
                    chunks.append(txt)
    return "\n".join(chunks)[:max_chars]

def image_to_text(path: str, max_chars: int = 2000) -> str:
    if Image is None or pytesseract is None:
        return "[Image attached, but OCR is not installed in this Space. Install pytesseract + tesseract-ocr to extract text.]"
    try:
        img = Image.open(path)
        txt = pytesseract.image_to_string(img)
        txt = (txt or "").strip()
        return txt[:max_chars] if txt else "[Image attached, but OCR returned no text.]"
    except Exception as e:
        return f"[Image attached, OCR failed: {type(e).__name__}: {e}]"

def pdf_to_text(path: str, max_lines: int = 120, ocr_fallback: bool = True) -> str:
    chunks = []
    try:
        with pdfplumber.open(path) as pdf:
            for p in pdf.pages:
                txt = p.extract_text() or ""
                for line in txt.split("\n"):
                    line = line.strip()
                    if len(line) > 20:
                        chunks.append(line)
                    if len(chunks) >= max_lines:
                        break
                if len(chunks) >= max_lines:
                    break

            extracted = "\n".join(chunks).strip()
            if extracted:
                return extracted

            if not ocr_fallback:
                return ""

            if Image is None or pytesseract is None:
                return "[PDF appears scanned (no extractable text). OCR is not installed in this Space.]"

            try:
                first = pdf.pages[0]
                pil_img = first.to_image(resolution=200).original
                txt = pytesseract.image_to_string(pil_img)
                txt = (txt or "").strip()
                return txt[:2000] if txt else "[Scanned PDF attached; OCR returned no text.]"
            except Exception as e:
                return f"[Scanned PDF attached; OCR failed: {type(e).__name__}: {e}]"

    except Exception as e:
        return f"[PDF read failed: {type(e).__name__}: {e}]"

def _normalize_paths(upload_paths):
    if upload_paths is None:
        return []
    if isinstance(upload_paths, str):
        return [upload_paths]
    if isinstance(upload_paths, list):
        return [p for p in upload_paths if isinstance(p, str) and p]
    return []

def _single_file_to_text(path: str, max_chars_per_file: int = 2000) -> tuple[str, dict]:
    meta = {"path": path}
    if not path:
        meta["status"] = "no_file"
        return "", meta
    if not os.path.exists(path):
        meta["status"] = "missing_path"
        return "", meta

    name = os.path.basename(path)
    lower = name.lower()
    meta["name"] = name

    try:
        if lower.endswith((".png", ".jpg", ".jpeg")):
            meta["type"] = "image"
            txt = image_to_text(path, max_chars=max_chars_per_file)
            return txt, meta

        if lower.endswith(".pdf"):
            meta["type"] = "pdf"
            txt = pdf_to_text(path, max_lines=120, ocr_fallback=True)
            return txt[:max_chars_per_file], meta

        if lower.endswith(".docx"):
            meta["type"] = "docx"
            return docx_to_text(path, max_chars=max_chars_per_file), meta

        if lower.endswith(".pptx"):
            meta["type"] = "pptx"
            return pptx_to_text(path, max_chars=max_chars_per_file), meta

        if lower.endswith(".csv"):
            meta["type"] = "csv"
            df = pd.read_csv(path)
            return df.to_string(index=False)[:max_chars_per_file], meta

        if lower.endswith(".xlsx"):
            meta["type"] = "xlsx"
            df = pd.read_excel(path)
            return df.to_string(index=False)[:max_chars_per_file], meta

        if lower.endswith(".json"):
            meta["type"] = "json"
            try:
                df = pd.read_json(path)
                return df.to_string(index=False)[:max_chars_per_file], meta
            except Exception:
                return read_text_file(path, max_chars=max_chars_per_file), meta

        if any(lower.endswith(ext) for ext in [".txt", ".md", ".py", ".html", ".css", ".js", ".jsonl"]):
            meta["type"] = "text"
            return read_text_file(path, max_chars=max_chars_per_file), meta

        meta["status"] = "unsupported_type"
        return "", meta

    except Exception as e:
        meta["status"] = f"parse_error:{type(e).__name__}"
        return "", meta

def files_to_text(upload_paths) -> tuple[str, list[dict], str]:
    paths = _normalize_paths(upload_paths)
    if not paths:
        return "", [], ""

    MAX_FILES = 3
    MAX_CHARS_PER_FILE = 1600
    MAX_TOTAL_CHARS = 4500

    meta_list = []
    parts = []
    names = []

    for path in paths[:MAX_FILES]:
        text, meta = _single_file_to_text(path, max_chars_per_file=MAX_CHARS_PER_FILE)
        meta["extracted_chars"] = len(text or "")
        meta_list.append(meta)

        if meta.get("name"):
            names.append(meta["name"])

        if text:
            parts.append(f"--- FILE: {meta.get('name','(unknown)')} ---")
            parts.append(text)

    combined = "\n".join(parts).strip()
    combined = combined[:MAX_TOTAL_CHARS]

    badge = f"Attached: {', '.join(names[:MAX_FILES])}" if names else ""
    return combined, meta_list, badge

# =====================================================
# STATIC KB INGESTION
# =====================================================

def _list_kb_files():
    course_paths, case_paths = [], []

    if USE_PREFIX_MODE:
        for fn in os.listdir("."):
            if fn.startswith(COURSE_PREFIX):
                course_paths.append(os.path.abspath(fn))
            elif fn.startswith(CASE_PREFIX):
                case_paths.append(os.path.abspath(fn))
        return course_paths, case_paths

    if os.path.isdir(COURSE_DIR):
        for fn in os.listdir(COURSE_DIR):
            course_paths.append(os.path.join(COURSE_DIR, fn))
    if os.path.isdir(CASE_DIR):
        for fn in os.listdir(CASE_DIR):
            case_paths.append(os.path.join(CASE_DIR, fn))

    return course_paths, case_paths

def _kb_extract(paths, label: str) -> tuple[str, bool]:
    if not paths:
        return "", True

    names = [os.path.basename(p) for p in paths]
    placeholder_only = all(_is_placeholder_name(n) for n in names)

    MAX_KB_FILES = 6
    MAX_TOTAL_CHARS = 12000
    MAX_CHARS_PER_FILE = 2000

    parts = [f"[{label} Knowledge Base]"]
    used = 0

    for p in paths[:MAX_KB_FILES]:
        name = os.path.basename(p)
        txt, _meta = _single_file_to_text(p, max_chars_per_file=MAX_CHARS_PER_FILE)
        if not txt:
            continue
        block = f"\n--- KB FILE: {name} ---\n{txt}\n"
        parts.append(block)
        used += len(block)
        if used >= MAX_TOTAL_CHARS:
            break

    kb_text = "\n".join(parts).strip()
    kb_text = kb_text[:MAX_TOTAL_CHARS]
    return kb_text, placeholder_only

COURSE_PATHS, CASE_PATHS = _list_kb_files()
COURSE_KB_TEXT, COURSE_PLACEHOLDER_ONLY = _kb_extract(COURSE_PATHS, "CourseContent")
CASE_KB_TEXT, CASE_PLACEHOLDER_ONLY = _kb_extract(CASE_PATHS, "CaseStudyContent")

# =====================================================
# DEMO ASSUMPTIONS
# =====================================================

DEMO_COURSE_NAME = "AI in Business Analytics & Digital Transformation"

DEMO_CANON = f"""
Demo course canon (use consistently across ALL modes; do not contradict it):

Course: {DEMO_COURSE_NAME} (ESCP, graduate level)
Format: 5 sessions × 3 hours (in-person) + final assessment block
Platforms: Blackboard (materials), Teams (announcements), Google Colab (coding)

Demo classroom plan (invented for demo consistency):
- Session 1 (IS foundations + assessment): Paris Campus, Room P-121
- Session 2 (Data mining + descriptive analytics): Paris Campus, Room P-214
- Session 3 (Adoption & integration + prompt basics): Paris Campus, Room P-305
- Session 4 (Privacy/security + governance): Paris Campus, Room P-118
- Session 5 (Current trends: GenAI/Big Data/Cloud + wrap-up): Paris Campus, Room P-220
- Office hours (demo): Wednesdays 14:00–15:00, Paris Campus, Faculty Office Area (or via Teams)

Grading (demo):
- Group assignment (case + prompt reliability): 30%
- Individual quiz (privacy/security basics): 15%
- Participation / in-class activities: 15%
- Final (short applied questions + interpretation): 40%
""".strip()

ASSUMED_COURSE_CONTEXT = f"""Demo mode: no official course material loaded.
Assumption (for demonstration only): {DEMO_COURSE_NAME}.
I will use the same demo course canon throughout this session.
"""

ASSUMED_CASE_CONTEXT = """Assumption (Demo Mode):
I will assume a business case about deploying an AI assistant in a regulated service context (e.g., airline or financial services),
with constraints on reliability, hallucinations, and policy compliance. The stakeholder cares about risk, KPIs, governance, and
rollout and is panicked because there has been some data leakage.
"""

def _is_course_mode(mode: str) -> bool:
    spec = MODE_REGISTRY.get(mode)
    return bool(spec and spec.kb_scope == "course")

def _is_case_mode(mode: str) -> bool:
    spec = MODE_REGISTRY.get(mode)
    return bool(spec and spec.kb_scope == "case")

def demo_intro_text(mode: str, flags_state: dict) -> tuple[str, dict]:
    if _is_course_mode(mode):
        if COURSE_PLACEHOLDER_ONLY and not flags_state.get("demo_course_intro_shown", False):
            flags_state["demo_course_intro_shown"] = True
            return ASSUMED_COURSE_CONTEXT, flags_state
        return "", flags_state  # IMPORTANT: no reminders

    if _is_case_mode(mode):
        if CASE_PLACEHOLDER_ONLY and not flags_state.get("demo_case_intro_shown", False):
            flags_state["demo_case_intro_shown"] = True
            return ASSUMED_CASE_CONTEXT, flags_state
        return "", flags_state

    return "", flags_state

def kb_block_for_mode(mode: str) -> str:
    if _is_course_mode(mode):
        return COURSE_KB_TEXT if not COURSE_PLACEHOLDER_ONLY else "[CourseContent Knowledge Base]\n(placeholder / none loaded)"
    if _is_case_mode(mode):
        return CASE_KB_TEXT if not CASE_PLACEHOLDER_ONLY else "[CaseStudyContent Knowledge Base]\n(placeholder / none loaded)"
    # fallback
    return "[Knowledge Base]\n(none)"

# =====================================================
# LLM COMPLETION
# =====================================================

def complete_llm(messages, mode: str) -> str:
    spec = MODE_REGISTRY.get(mode) or MODE_REGISTRY.get(DEFAULT_MODE)
    cfg = (spec.gen if spec else DEFAULT_GEN_CONFIG)

    r = client.chat_completion(
        model=MODEL_NAME,
        messages=messages,
        temperature=cfg["temperature"],
        max_tokens=cfg["max_tokens"],
        top_p=cfg["top_p"],
        stream=False,
    )
    if isinstance(r, dict):
        return r["choices"][0]["message"]["content"]
    return r.choices[0].message.content

# =====================================================
# PER-MODE CHAT HISTORY SUPPORT
# =====================================================

def load_history_for_mode(selected_mode, histories):
    return histories.get(selected_mode, [])

# =====================================================
# MAIN CHAT HANDLER
# =====================================================

def chat_user(message, visible_history, mode, upload_paths, histories, memory_state, flags_state):
    if not message or not message.strip():
        return visible_history, "", upload_paths, histories, memory_state, flags_state

    user_msg = message.strip()

    # Use the real history for this mode (not whatever is currently displayed)
    history = histories.get(mode, [])

    file_text, upload_meta, badge_line = files_to_text(upload_paths)
    display_user_msg = f"{badge_line}\n{user_msg}" if badge_line else user_msg

    memory = retrieve_memory(memory_state)
    kb_block = kb_block_for_mode(mode)

    demo_canon_block = ""
    if COURSE_PLACEHOLDER_ONLY and _is_course_mode(mode):
        demo_canon_block = f"\n\n[Demo Canon]\n{DEMO_CANON}"

    if CASE_PLACEHOLDER_ONLY and _is_case_mode(mode):
        demo_canon_block = f"\n\n[Demo Canon]\n{DEMO_CANON}\n\n{ASSUMED_CASE_CONTEXT}"

    demo_intro, flags_state = demo_intro_text(mode, flags_state)
    demo_block = f"\n\n[Demo Assumption]\n{demo_intro}" if demo_intro else ""

    spec = MODE_REGISTRY.get(mode) or MODE_REGISTRY.get(DEFAULT_MODE)
    mode_instructions = (spec.system_instructions if spec else "")

    web_block = ""
    web_allowed = bool(WEB_SEARCH_ENABLED_GLOBAL and spec and spec.web_enabled)
    if web_allowed:
        snippets = web_search_snippets(user_msg)
        if snippets:
            web_block = f"\n\n[Web Search Snippets]\n{snippets}"

    system_prompt = (
        mode_instructions
        + demo_block
        + demo_canon_block
        + "\n\n[IMPORTANT RULE]\nIf demo mode intro has already been shown once in this session, DO NOT repeat the phrase 'Demo mode' again. Continue as if the demo canon is the course reality."
        + f"\n\n[Memory]\n{memory}"
        + f"\n\n[Knowledge Base]\n{kb_block}"
        + (f"\n\n[Uploaded Material]\n{file_text}" if file_text else "\n\n[Uploaded Material]\n(none)")
        + web_block
    )

    llm_msgs = [{"role": "system", "content": system_prompt}]
    for m in history:
        llm_msgs.append({"role": m["role"], "content": m["content"]})
    llm_msgs.append({"role": "user", "content": user_msg})

    new_history = history + [{"role": "user", "content": display_user_msg}]

    try:
        raw = complete_llm(llm_msgs, mode=mode)
        final = strip_think(raw)
    except Exception as e:
        final = f"[ERROR] LLM call failed: {type(e).__name__}: {e}"

    new_history.append({"role": "assistant", "content": final})

    # Persist per-mode history
    histories[mode] = new_history

    memory_state = store_memory(memory_state, f"User: {user_msg}\nAssistant: {final}")
    log_turn(mode=mode, user_msg=user_msg, assistant_msg=final, upload_meta=upload_meta)

    # Clear textbox AND upload after sending
    return new_history, "", None, histories, memory_state, flags_state

# =====================================================
# EXPORT & RESET
# =====================================================

def export_chat(history):
    path = "conversation.txt"
    with open(path, "w", encoding="utf-8") as f:
        for m in history:
            f.write(f"{m['role'].upper()}:\n{m['content']}\n\n")
    return path

def reset_chat(histories, memory_state, flags_state):
    memory_state["session"] = []
    flags_state["demo_course_intro_shown"] = False
    flags_state["demo_case_intro_shown"] = False
    for k in list(histories.keys()):
        histories[k] = []
    return [], None, "", histories, memory_state, flags_state

# =====================================================
# UI
# =====================================================

def load_css(path="style.css") -> str:
    try:
        with open(path, "r", encoding="utf-8") as f:
            return f.read()
    except FileNotFoundError:
        return ""

with gr.Blocks(css=load_css()) as demo:
    gr.Markdown(
        "<h1>AI Teaching Assistant Prototype (Student-Facing Version)</h1>",
        elem_id="escp_title",
    )

    mode = gr.Dropdown(
        label="Which TA?",
        choices=STUDENT_MODES,
        value=DEFAULT_MODE,  # auto-updates if someone renames/reorders modes
    )

    histories_state = gr.State({m: [] for m in STUDENT_MODES})
    memory_state = gr.State({"session": []})
    flags_state = gr.State({"demo_course_intro_shown": False, "demo_case_intro_shown": False})

    with gr.Row(equal_height=True):
        with gr.Column(scale=4, min_width=760):
            chatbot = gr.Chatbot(label="Chat Box", type="messages")

        with gr.Column(scale=2, min_width=420):
            upload = gr.File(
                label="Attach Files (optional, max. 5)",
                type="filepath",
                file_count="multiple",
            )

            message = gr.Textbox(
                label=" ",
                lines=1,
                placeholder="Type your question/response, attach your files, and press Enter",
            )

            reset_btn = gr.Button("Reset Conversation")
            export_btn = gr.DownloadButton("Export Conversation")

    mode.change(
        load_history_for_mode,
        inputs=[mode, histories_state],
        outputs=[chatbot],
    )

    message.submit(
        chat_user,
        inputs=[message, chatbot, mode, upload, histories_state, memory_state, flags_state],
        outputs=[chatbot, message, upload, histories_state, memory_state, flags_state],
    )

    reset_btn.click(
        reset_chat,
        inputs=[histories_state, memory_state, flags_state],
        outputs=[chatbot, upload, message, histories_state, memory_state, flags_state],
    )

    export_btn.click(export_chat, inputs=chatbot, outputs=export_btn)

demo.launch()