SamCChauhan commited on
Commit
b789cc9
·
unverified ·
1 Parent(s): d0d751d

Add files via upload

Browse files
Files changed (1) hide show
  1. local_app.py +390 -0
local_app.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from nicegui import ui, app, events
2
+ from PIL import Image
3
+ from huggingface_hub import hf_hub_download
4
+ from llama_cpp import Llama
5
+ import datetime
6
+ import os
7
+ import asyncio
8
+ import io
9
+ import base64
10
+ import concurrent.futures
11
+
12
+ # --- 1. SETUP & MODEL LOADING ---
13
+ hf_token = os.environ.get('HF_TOKEN')
14
+
15
+ print("Initializing Local AI Model...")
16
+
17
+ if hf_token:
18
+ print(f"✅ HF_TOKEN detected (starts with: {hf_token[:4]}...)")
19
+ else:
20
+ print("💡 Tip: Set HF_TOKEN environment variable to enable faster downloads and avoid rate limits.")
21
+
22
+ print("If this is your first run, it will download the ~2GB GGUF model file.")
23
+
24
+ try:
25
+ model_path = hf_hub_download(
26
+ repo_id="bartowski/Llama-3.2-3B-Instruct-GGUF",
27
+ filename="Llama-3.2-3B-Instruct-Q4_K_M.gguf",
28
+ token=hf_token
29
+ )
30
+
31
+ # Load the model globally
32
+ llm = Llama(
33
+ model_path=model_path,
34
+ n_ctx=4096,
35
+ n_threads=4,
36
+ n_gpu_layers=-1,
37
+ verbose=False,
38
+ chat_format="llama-3"
39
+ )
40
+ print("Model loaded successfully!")
41
+ except Exception as e:
42
+ print(f"❌ Error loading model: {e}")
43
+ print("Ensure you have a stable internet connection for the initial download.")
44
+
45
+ # --- AGGRESSIVE SYSTEM PROMPTS FOR SMALL MODELS ---
46
+ BASE_PERSONA = """ROLE: You are 'Code Mentor,' a coding tutor for high-school students learning {language} in {course}.
47
+ You are text-based. You cannot see images. Treat errors as puzzles."""
48
+
49
+ CODE_AWARENESS = """CONSTRAINTS: Avoid professional jargon. Explain errors in plain English."""
50
+
51
+ # Small models need extremely direct, numbered, capitalized rules.
52
+ PEDAGOGY_SOCRATIC = """*** STRICT SOCRATIC MODE RULES ***
53
+ 1. NO CODE: You must NEVER write, fix, or provide direct code solutions.
54
+ 2. BE BRIEF: Your entire response MUST be under 3 sentences. Do NOT be long-winded.
55
+ 3. ASK: You MUST end your response with exactly ONE guiding question.
56
+ 4. REFUSE: If the user asks you to write the code, politely decline and ask them a conceptual question instead.
57
+ VIOLATION OF THESE RULES IS STRICTLY FORBIDDEN."""
58
+
59
+ PEDAGOGY_DIRECT = """*** DIRECT INSTRUCTION MODE ***
60
+ 1. EXPLAIN: Provide direct explanations of syntax and logic.
61
+ 2. SMALL SNIPPETS: You may provide small code examples (maximum 5 lines).
62
+ 3. NO FULL SOLUTIONS: Do not write their entire assignment. Only show the specific concept they are stuck on."""
63
+
64
+ def build_system_prompt(mode, language, course):
65
+ lang_label = language if language else "General Programming"
66
+ course_label = course if course else "General Computer Science"
67
+
68
+ # We build the prompt so the MODE rules are at the VERY BOTTOM.
69
+ # Small models pay the most attention to the end of the system prompt.
70
+ prompt_parts = [
71
+ BASE_PERSONA.format(course=course_label, language=lang_label),
72
+ CODE_AWARENESS
73
+ ]
74
+
75
+ if mode == "Socratic":
76
+ prompt_parts.append(PEDAGOGY_SOCRATIC)
77
+ else:
78
+ prompt_parts.append(PEDAGOGY_DIRECT)
79
+
80
+ return "\n\n".join(prompt_parts)
81
+
82
+ # --- STATE MANAGEMENT ---
83
+ chat_history = []
84
+ session_storage = {}
85
+ pending_uploads = []
86
+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
87
+
88
+ def get_logo(width=400, height=100):
89
+ return f"""
90
+ <div style="display: flex; justify-content: center; align-items: center; padding: 20px 0;">
91
+ <svg width="{width}" height="{height}" viewBox="0 0 400 100" fill="none" xmlns="http://www.w3.org/2000/svg">
92
+ <defs>
93
+ <filter id="neonRed" x="-20%" y="-20%" width="140%" height="140%">
94
+ <feGaussianBlur stdDeviation="3" result="blur" />
95
+ <feDropShadow dx="0" dy="0" stdDeviation="5" flood-color="#dc2626" />
96
+ <feComposite in="SourceGraphic" in2="blur" operator="over" />
97
+ </filter>
98
+ </defs>
99
+ <path d="M40 30L20 50L40 70" stroke="#dc2626" stroke-width="5" stroke-linecap="round" filter="url(#neonRed)"/>
100
+ <path d="M70 30L90 50L70 70" stroke="#dc2626" stroke-width="5" stroke-linecap="round" filter="url(#neonRed)"/>
101
+ <text x="100" y="65" fill="#ffffff" style="font-family:'JetBrains Mono', monospace; font-weight:800; font-size:45px;">DA</text>
102
+ <text x="165" y="65" fill="#dc2626" style="font-family:'JetBrains Mono', monospace; font-weight:800; font-size:45px;" filter="url(#neonRed)">CODE</text>
103
+ <text x="285" y="65" fill="#ffffff" style="font-family:'JetBrains Mono', monospace; font-weight:200; font-size:45px;">X</text>
104
+ <rect x="100" y="75" width="230" height="2" fill="#dc2626" fill-opacity="0.5"/>
105
+ </svg>
106
+ </div>
107
+ """
108
+
109
+ @ui.page('/')
110
+ def main_page():
111
+ ui.add_css("""
112
+ @import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;800&display=swap');
113
+ body { background-color: #09090b; color: #e4e4e7; font-family: 'JetBrains Mono', monospace; }
114
+ .landing-container { height: 100vh; background: radial-gradient(circle at center, #1e1b4b 0%, #09090b 100%); }
115
+ .start-btn { border: 1px solid #dc2626 !important; box-shadow: 0 0 15px rgba(220, 38, 38, 0.4); letter-spacing: 2px; transition: all 0.3s ease !important; }
116
+ .start-btn:hover { box-shadow: 0 0 30px rgba(220, 38, 38, 0.8); transform: scale(1.05) !important; }
117
+ .q-message-text { background-color: #121217 !important; border: 1px solid #27272a; position: relative; }
118
+ .q-message-text--sent { background-color: #dc2626 !important; border: none; }
119
+ .q-message-name { color: #D1D5DB !important; }
120
+ .q-message-text-content { color: #ffffff !important; }
121
+ .q-message-text-content pre { background-color: #09090b !important; border: 1px solid #27272a; padding: 12px; border-radius: 8px; overflow-x: auto; margin: 0.5em 0; }
122
+ .copy-btn { position: absolute; top: 5px; right: 5px; padding: 4px 8px; background: #27272a; color: #e4e4e7; border: 1px solid #dc2626; border-radius: 4px; font-size: 10px; cursor: pointer; z-index: 10; opacity: 0.6; transition: opacity 0.2s; }
123
+ .copy-btn:hover { opacity: 1; background: #dc2626; }
124
+ .drawer-bg { background-color: #121217 !important; border-left: 1px solid #27272a; }
125
+ """)
126
+ ui.colors(primary='#dc2626', secondary='#121217', accent='#ef4444')
127
+
128
+ ui.add_head_html("""
129
+ <script>
130
+ function copyCode(btn) {
131
+ const pre = btn.parentElement;
132
+ const code = pre.querySelector('code').innerText;
133
+ navigator.clipboard.writeText(code).then(() => {
134
+ const oldText = btn.innerText;
135
+ btn.innerText = 'COPIED!';
136
+ setTimeout(() => { btn.innerText = oldText; }, 2000);
137
+ });
138
+ }
139
+ const observer = new MutationObserver((mutations) => {
140
+ document.querySelectorAll('pre:not(.has-copy-btn)').forEach((pre) => {
141
+ pre.classList.add('has-copy-btn');
142
+ const btn = document.createElement('button');
143
+ btn.className = 'copy-btn';
144
+ btn.innerText = 'COPY';
145
+ btn.onclick = function() { copyCode(this); };
146
+ pre.appendChild(btn);
147
+ });
148
+ });
149
+ document.addEventListener('DOMContentLoaded', () => {
150
+ observer.observe(document.body, { childList: true, subtree: true });
151
+ });
152
+ </script>
153
+ """)
154
+
155
+ with ui.column().classes('w-full items-center justify-center landing-container') as landing_view:
156
+ ui.html(get_logo(width=600, height=150))
157
+ ui.markdown("### // SYSTEM STATUS: ONLINE\n// ACADEMIC CORE: READY").classes('text-center')
158
+ start_btn = ui.button("INITIALIZE INTERFACE").classes('start-btn mt-4 px-8 py-4 text-lg font-bold rounded text-white')
159
+
160
+ with ui.right_drawer(value=False).classes('drawer-bg p-4') as drawer:
161
+ ui.html(get_logo(width=200, height=60)).classes('mb-4')
162
+
163
+ mode_select = ui.select(["Socratic", "Direct"], value="Socratic", label="Teaching Protocol").classes('w-full mt-2 text-white')
164
+ course_select = ui.select(["AP CS A", "AP CSP", "C++ Fundamentals", "Web Development 101", "Intro to Python", "AP Cybersecurity", "Other"], value="Intro to Python", label="Course Curriculum").classes('w-full mt-2 text-white')
165
+ language_select = ui.select(["Java", "Python", "JavaScript", "C++", "C#", "SQL"], value="Python", label="Target Language").classes('w-full mt-2 text-white')
166
+
167
+ ui.separator().classes('my-4')
168
+ ui.label("Session Archives").classes('text-lg font-bold text-gray-300')
169
+
170
+ history_dropdown = ui.select([], label="Previous Chats").classes('w-full mt-2 text-white')
171
+
172
+ def archive_session():
173
+ if not chat_history: return
174
+ timestamp = datetime.datetime.now().strftime("%H:%M:%S")
175
+ label = f"Session {timestamp} ({len(chat_history)} msgs)"
176
+ session_storage[label] = chat_history.copy()
177
+ history_dropdown.options = list(session_storage.keys())
178
+ history_dropdown.update()
179
+ chat_history.clear()
180
+ render_messages.refresh()
181
+
182
+ ui.button("Archive Current Session", on_click=archive_session).props('outline rounded').classes('w-full mt-2 text-white')
183
+
184
+ def load_session(e):
185
+ if e.value in session_storage:
186
+ chat_history.clear()
187
+ chat_history.extend(session_storage[e.value])
188
+ render_messages.refresh()
189
+ history_dropdown.on_value_change(load_session)
190
+
191
+ ui.separator().classes('my-4')
192
+
193
+ def download_transcript():
194
+ if not chat_history: return
195
+ transcript_text = "DACODEX MENTOR SESSION\n" + "="*30 + "\n\n"
196
+ for msg in chat_history:
197
+ prefix = "STUDENT" if msg["role"] == "user" else "MENTOR"
198
+ transcript_text += f"{prefix}:\n{msg['raw_text']}\n\n"
199
+
200
+ filename = f"DACodeX_Transcript_{datetime.datetime.now().strftime('%Y%m%d_%H%M')}.txt"
201
+ try:
202
+ downloads_path = os.path.join(os.path.expanduser('~'), 'Downloads')
203
+ if not os.path.exists(downloads_path): downloads_path = os.getcwd()
204
+ full_path = os.path.join(downloads_path, filename)
205
+ with open(full_path, "w", encoding="utf-8") as f: f.write(transcript_text)
206
+ ui.notify(f"Transcript saved to: {full_path}", type='positive')
207
+ except Exception as e:
208
+ ui.notify(f"Failed to save: {str(e)}", color='negative')
209
+
210
+ ui.button("Download Text File", on_click=download_transcript).classes('w-full mt-2 start-btn text-white')
211
+
212
+ with ui.column().classes('w-full h-screen relative') as main_chat_view:
213
+ main_chat_view.set_visibility(False)
214
+
215
+ with ui.row().classes('w-full p-4 border-b border-[#27272a] bg-[#121217] items-center justify-between z-10'):
216
+ ui.label('DACodeX - Coding Assistant').classes('text-xl font-bold ml-2 text-white')
217
+ ui.button(icon='menu', on_click=drawer.toggle).props('flat round dense color=white')
218
+
219
+ with ui.scroll_area().classes('flex-grow w-full p-4 pb-40') as scroll_area:
220
+ @ui.refreshable
221
+ def render_messages():
222
+ for index, msg in enumerate(chat_history):
223
+ with ui.chat_message(name=msg['name'], sent=msg['sent']):
224
+ ui.markdown(msg['text'], extras=['fenced-code-blocks', 'tables', 'cuddled-lists', 'breaks'])
225
+ for img_html in msg.get('images', []):
226
+ ui.html(img_html).classes('max-w-xs rounded mt-2')
227
+
228
+ render_messages()
229
+
230
+ with ui.column().classes('absolute bottom-0 w-full p-4 bg-[#09090b] border-t border-[#27272a] z-10'):
231
+
232
+ async def handle_native_upload():
233
+ try:
234
+ if not app.native.main_window: return
235
+ file_paths = await app.native.main_window.create_file_dialog(
236
+ dialog_type=10,
237
+ allow_multiple=True,
238
+ file_types=('Supported Files (*.png;*.jpg;*.jpeg;*.gif;*.webp;*.py;*.txt;*.md;*.js;*.html;*.css)', 'All Files (*.*)')
239
+ )
240
+ if not file_paths: return
241
+
242
+ for filepath in file_paths:
243
+ if not os.path.exists(filepath): continue
244
+ filename = os.path.basename(filepath)
245
+ ext = filename.split('.')[-1].lower() if '.' in filename else ''
246
+ try:
247
+ with open(filepath, 'rb') as f: content_bytes = f.read()
248
+ if ext in ['png', 'jpg', 'jpeg', 'webp', 'gif']:
249
+ img = Image.open(io.BytesIO(content_bytes))
250
+ pending_uploads.append({'type': 'image', 'data': img, 'name': filename})
251
+ else:
252
+ text_content = content_bytes.decode('utf-8', errors='ignore')
253
+ pending_uploads.append({'type': 'text', 'data': f"--- Uploaded File: {filename} ---\n{text_content}", 'name': filename})
254
+ except Exception as ex:
255
+ ui.notify(f"Could not read file {filename}: {ex}", color='negative')
256
+ render_previews.refresh()
257
+ except Exception as e:
258
+ ui.notify(f"Upload failed: {e}", color="negative")
259
+
260
+ with ui.column().classes('w-full bg-[#121217] border border-[#27272a] rounded-xl p-1 gap-0'):
261
+
262
+ @ui.refreshable
263
+ def render_previews():
264
+ if pending_uploads:
265
+ with ui.row().classes('w-full gap-3 px-3 pt-3 pb-1 overflow-x-auto no-wrap'):
266
+ for idx, item in enumerate(pending_uploads):
267
+ with ui.card().classes('w-16 h-16 p-0 bg-[#09090b] border border-[#3f3f46] rounded-lg relative shadow-none flex-shrink-0 flex items-center justify-center'):
268
+ if item['type'] == 'image':
269
+ buffered = io.BytesIO()
270
+ item['data'].save(buffered, format="PNG")
271
+ img_str = base64.b64encode(buffered.getvalue()).decode()
272
+ ui.html(f'<img src="data:image/png;base64,{img_str}" style="width: 100%; height: 100%; object-fit: cover; border-radius: 6px;" />')
273
+ else:
274
+ ui.label('📄').classes('text-2xl')
275
+
276
+ ui.button(icon='close', on_click=lambda i=idx: (pending_uploads.pop(i), render_previews.refresh())).props('flat round dense size=xs color=white').classes('absolute -top-2 -right-2 bg-[#dc2626] rounded-full z-10 w-5 h-5 min-h-0 min-w-0 p-0 shadow')
277
+
278
+ render_previews()
279
+
280
+ with ui.row().classes('w-full items-center no-wrap px-1 pb-1'):
281
+ ui.button(icon='attach_file', on_click=handle_native_upload).props('flat round dense color=white')
282
+ text_input = ui.input(placeholder="Type your message...").classes('flex-grow px-2').props('borderless dark')
283
+ ui.button(icon='send', on_click=lambda: asyncio.create_task(send_message())).props('flat round dense color=primary')
284
+
285
+ async def send_message():
286
+ user_text = text_input.value.strip()
287
+ if not user_text and not pending_uploads: return
288
+
289
+ images_for_ui = []
290
+ raw_text_record = user_text
291
+
292
+ for item in pending_uploads:
293
+ if item['type'] == 'image':
294
+ raw_text_record += f"\n\n[Note to AI: User attached an image named '{item['name']}', but since you are text-only, you cannot view it.]"
295
+ buffered = io.BytesIO()
296
+ item['data'].save(buffered, format="PNG")
297
+ img_str = base64.b64encode(buffered.getvalue()).decode()
298
+ images_for_ui.append(f'<img src="data:image/png;base64,{img_str}" />')
299
+ elif item['type'] == 'text':
300
+ raw_text_record += f"\n\n{item['data']}"
301
+
302
+ chat_history.append({
303
+ 'text': user_text if user_text else "📎 *(Attachments sent)*",
304
+ 'user_input_only': user_text,
305
+ 'name': 'Student',
306
+ 'sent': True,
307
+ 'role': 'user',
308
+ 'raw_text': raw_text_record,
309
+ 'images': images_for_ui
310
+ })
311
+
312
+ text_input.value = ""
313
+ pending_uploads.clear()
314
+ render_previews.refresh()
315
+ render_messages.refresh()
316
+ scroll_area.scroll_to(percent=1)
317
+
318
+ current_instruction = build_system_prompt(mode_select.value, language_select.value, course_select.value)
319
+ llama_messages = [{"role": "system", "content": current_instruction}]
320
+
321
+ # Limit history to last 6 messages to keep the context window focused on the instructions
322
+ for msg in chat_history[-6:]:
323
+ role = "assistant" if msg['role'] == "model" else msg['role']
324
+ llama_messages.append({"role": role, "content": msg['raw_text']})
325
+
326
+ try:
327
+ chat_history.append({'text': '', 'name': 'DACodeX', 'sent': False, 'role': 'model', 'raw_text': ''})
328
+ render_messages.refresh()
329
+ scroll_area.scroll_to(percent=1)
330
+
331
+ # --- PHYSICAL TOKENS & CREATIVITY CAPS ---
332
+ # In Socratic mode, we physically cut the model off at 150 tokens (~3-4 sentences)
333
+ # so it literally cannot write long-winded replies or huge blocks of code.
334
+ is_socratic = mode_select.value == "Socratic"
335
+ max_toks = 150 if is_socratic else 800
336
+ temp = 0.3 if is_socratic else 0.4 # Even lower temperature makes it strict
337
+
338
+ def generate():
339
+ return llm.create_chat_completion(
340
+ messages=llama_messages,
341
+ stream=True,
342
+ temperature=temp,
343
+ max_tokens=max_toks,
344
+ repeat_penalty=1.15
345
+ )
346
+
347
+ stream = await asyncio.get_event_loop().run_in_executor(executor, generate)
348
+ full_response = ""
349
+ displayed_text = ""
350
+
351
+ while True:
352
+ def get_next_chunk():
353
+ try: return next(stream)
354
+ except StopIteration: return None
355
+
356
+ chunk = await asyncio.get_event_loop().run_in_executor(executor, get_next_chunk)
357
+ if chunk is None: break
358
+
359
+ delta = chunk["choices"][0].get("delta", {})
360
+ if "content" in delta:
361
+ full_response += delta["content"]
362
+ while len(displayed_text) < len(full_response):
363
+ chars_to_add = min(len(full_response) - len(displayed_text), 5)
364
+ displayed_text += full_response[len(displayed_text):len(displayed_text) + chars_to_add]
365
+ chat_history[-1]['text'] = displayed_text
366
+ chat_history[-1]['raw_text'] = full_response
367
+ render_messages.refresh()
368
+ scroll_area.scroll_to(percent=1)
369
+ await asyncio.sleep(0.01)
370
+
371
+ except Exception as e:
372
+ ui.notify(f"🤖 Technical Hiccup: {str(e)}", color='negative')
373
+
374
+ text_input.on('keydown.enter', send_message)
375
+
376
+ def start_interface():
377
+ landing_view.set_visibility(False)
378
+ main_chat_view.set_visibility(True)
379
+ drawer.value = True
380
+
381
+ start_btn.on_click(start_interface)
382
+
383
+ if __name__ in {"__main__", "__mp_main__"}:
384
+ ui.run(
385
+ title="DACodeX - Academic Core",
386
+ dark=True,
387
+ native=True,
388
+ window_size=(1200, 800),
389
+ reload=False
390
+ )