Fix Windows encoding errors and add packaging support
Browse files- local_app.py +12 -9
local_app.py
CHANGED
|
@@ -8,6 +8,13 @@ import asyncio
|
|
| 8 |
import io
|
| 9 |
import base64
|
| 10 |
import concurrent.futures
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
# --- 1. SETUP & MODEL LOADING ---
|
| 13 |
hf_token = os.environ.get('HF_TOKEN')
|
|
@@ -48,7 +55,6 @@ You are text-based. You cannot see images. Treat errors as puzzles."""
|
|
| 48 |
|
| 49 |
CODE_AWARENESS = """CONSTRAINTS: Avoid professional jargon. Explain errors in plain English."""
|
| 50 |
|
| 51 |
-
# Small models need extremely direct, numbered, capitalized rules.
|
| 52 |
PEDAGOGY_SOCRATIC = """*** STRICT SOCRATIC MODE RULES ***
|
| 53 |
1. NO CODE: You must NEVER write, fix, or provide direct code solutions.
|
| 54 |
2. BE BRIEF: Your entire response MUST be under 3 sentences. Do NOT be long-winded.
|
|
@@ -65,8 +71,6 @@ def build_system_prompt(mode, language, course):
|
|
| 65 |
lang_label = language if language else "General Programming"
|
| 66 |
course_label = course if course else "General Computer Science"
|
| 67 |
|
| 68 |
-
# We build the prompt so the MODE rules are at the VERY BOTTOM.
|
| 69 |
-
# Small models pay the most attention to the end of the system prompt.
|
| 70 |
prompt_parts = [
|
| 71 |
BASE_PERSONA.format(course=course_label, language=lang_label),
|
| 72 |
CODE_AWARENESS
|
|
@@ -318,7 +322,6 @@ def main_page():
|
|
| 318 |
current_instruction = build_system_prompt(mode_select.value, language_select.value, course_select.value)
|
| 319 |
llama_messages = [{"role": "system", "content": current_instruction}]
|
| 320 |
|
| 321 |
-
# Limit history to last 6 messages to keep the context window focused on the instructions
|
| 322 |
for msg in chat_history[-6:]:
|
| 323 |
role = "assistant" if msg['role'] == "model" else msg['role']
|
| 324 |
llama_messages.append({"role": role, "content": msg['raw_text']})
|
|
@@ -328,12 +331,9 @@ def main_page():
|
|
| 328 |
render_messages.refresh()
|
| 329 |
scroll_area.scroll_to(percent=1)
|
| 330 |
|
| 331 |
-
# --- PHYSICAL TOKENS & CREATIVITY CAPS ---
|
| 332 |
-
# In Socratic mode, we physically cut the model off at 150 tokens (~3-4 sentences)
|
| 333 |
-
# so it literally cannot write long-winded replies or huge blocks of code.
|
| 334 |
is_socratic = mode_select.value == "Socratic"
|
| 335 |
max_toks = 150 if is_socratic else 800
|
| 336 |
-
temp = 0.3 if is_socratic else 0.4
|
| 337 |
|
| 338 |
def generate():
|
| 339 |
return llm.create_chat_completion(
|
|
@@ -381,10 +381,13 @@ def main_page():
|
|
| 381 |
start_btn.on_click(start_interface)
|
| 382 |
|
| 383 |
if __name__ in {"__main__", "__mp_main__"}:
|
|
|
|
|
|
|
|
|
|
| 384 |
ui.run(
|
| 385 |
title="DACodeX - Academic Core",
|
| 386 |
dark=True,
|
| 387 |
native=True,
|
| 388 |
window_size=(1200, 800),
|
| 389 |
reload=False
|
| 390 |
-
)
|
|
|
|
| 8 |
import io
|
| 9 |
import base64
|
| 10 |
import concurrent.futures
|
| 11 |
+
import multiprocessing # Added for Windows packaging support
|
| 12 |
+
import sys # Added for Windows encoding fix
|
| 13 |
+
|
| 14 |
+
# --- FIX FOR WINDOWS ENCODING ERRORS ---
|
| 15 |
+
# Prevents crash when the console tries to print emojis
|
| 16 |
+
if sys.platform == 'win32':
|
| 17 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
| 18 |
|
| 19 |
# --- 1. SETUP & MODEL LOADING ---
|
| 20 |
hf_token = os.environ.get('HF_TOKEN')
|
|
|
|
| 55 |
|
| 56 |
CODE_AWARENESS = """CONSTRAINTS: Avoid professional jargon. Explain errors in plain English."""
|
| 57 |
|
|
|
|
| 58 |
PEDAGOGY_SOCRATIC = """*** STRICT SOCRATIC MODE RULES ***
|
| 59 |
1. NO CODE: You must NEVER write, fix, or provide direct code solutions.
|
| 60 |
2. BE BRIEF: Your entire response MUST be under 3 sentences. Do NOT be long-winded.
|
|
|
|
| 71 |
lang_label = language if language else "General Programming"
|
| 72 |
course_label = course if course else "General Computer Science"
|
| 73 |
|
|
|
|
|
|
|
| 74 |
prompt_parts = [
|
| 75 |
BASE_PERSONA.format(course=course_label, language=lang_label),
|
| 76 |
CODE_AWARENESS
|
|
|
|
| 322 |
current_instruction = build_system_prompt(mode_select.value, language_select.value, course_select.value)
|
| 323 |
llama_messages = [{"role": "system", "content": current_instruction}]
|
| 324 |
|
|
|
|
| 325 |
for msg in chat_history[-6:]:
|
| 326 |
role = "assistant" if msg['role'] == "model" else msg['role']
|
| 327 |
llama_messages.append({"role": role, "content": msg['raw_text']})
|
|
|
|
| 331 |
render_messages.refresh()
|
| 332 |
scroll_area.scroll_to(percent=1)
|
| 333 |
|
|
|
|
|
|
|
|
|
|
| 334 |
is_socratic = mode_select.value == "Socratic"
|
| 335 |
max_toks = 150 if is_socratic else 800
|
| 336 |
+
temp = 0.3 if is_socratic else 0.4
|
| 337 |
|
| 338 |
def generate():
|
| 339 |
return llm.create_chat_completion(
|
|
|
|
| 381 |
start_btn.on_click(start_interface)
|
| 382 |
|
| 383 |
if __name__ in {"__main__", "__mp_main__"}:
|
| 384 |
+
# MANDATORY FOR WINDOWS EXE
|
| 385 |
+
multiprocessing.freeze_support()
|
| 386 |
+
|
| 387 |
ui.run(
|
| 388 |
title="DACodeX - Academic Core",
|
| 389 |
dark=True,
|
| 390 |
native=True,
|
| 391 |
window_size=(1200, 800),
|
| 392 |
reload=False
|
| 393 |
+
)
|