Spaces:
Sleeping
Sleeping
| # api/config.py | |
| import os | |
| from typing import List, Dict | |
| import httpx | |
| from openai import OpenAI | |
| from langchain_openai import ChatOpenAI, OpenAIEmbeddings | |
| # ============================ | |
| # Environment & Core Settings | |
| # ============================ | |
| OPENAI_API_KEY = (os.getenv("OPENAI_API_KEY") or "").strip() | |
| if not OPENAI_API_KEY: | |
| raise RuntimeError("OPENAI_API_KEY is not set. Please go to Settings β Secrets and add it.") | |
| # Optional: allow overriding base URL (useful for gateways / proxies) | |
| OPENAI_BASE_URL = (os.getenv("OPENAI_BASE_URL") or "").strip() or None | |
| # Models | |
| DEFAULT_MODEL = (os.getenv("CLARE_DEFAULT_MODEL") or "gpt-4.1-mini").strip() | |
| EMBEDDING_MODEL = (os.getenv("CLARE_EMBEDDING_MODEL") or "text-embedding-3-small").strip() | |
| # Timeout (seconds) - single source of truth | |
| OPENAI_TIMEOUT_SECONDS = float(os.getenv("CLARE_OPENAI_TIMEOUT_SECONDS", "20").strip()) | |
| # Connection pooling / keep-alive | |
| HTTP_MAX_CONNECTIONS = int(os.getenv("CLARE_HTTP_MAX_CONNECTIONS", "20").strip()) | |
| HTTP_MAX_KEEPALIVE = int(os.getenv("CLARE_HTTP_MAX_KEEPALIVE", "10").strip()) | |
| HTTP_KEEPALIVE_EXPIRY = float(os.getenv("CLARE_HTTP_KEEPALIVE_EXPIRY", "30").strip()) | |
| # Network retries (transport-level) | |
| HTTP_RETRIES = int(os.getenv("CLARE_HTTP_RETRIES", "2").strip()) | |
| # ============================ | |
| # Shared HTTP client (singleton) | |
| # ============================ | |
| # httpx Timeout object (connect/read/write/pool) | |
| _httpx_timeout = httpx.Timeout( | |
| timeout=OPENAI_TIMEOUT_SECONDS, | |
| connect=min(10.0, OPENAI_TIMEOUT_SECONDS), | |
| read=OPENAI_TIMEOUT_SECONDS, | |
| write=OPENAI_TIMEOUT_SECONDS, | |
| pool=min(10.0, OPENAI_TIMEOUT_SECONDS), | |
| ) | |
| _limits = httpx.Limits( | |
| max_connections=HTTP_MAX_CONNECTIONS, | |
| max_keepalive_connections=HTTP_MAX_KEEPALIVE, | |
| keepalive_expiry=HTTP_KEEPALIVE_EXPIRY, | |
| ) | |
| # A single httpx client reused across the process | |
| _http_client = httpx.Client( | |
| timeout=_httpx_timeout, | |
| limits=_limits, | |
| headers={ | |
| # Helps some proxies; safe default | |
| "Connection": "keep-alive", | |
| }, | |
| follow_redirects=True, | |
| ) | |
| # ============================ | |
| # OpenAI SDK client (singleton) | |
| # ============================ | |
| # Keep naming as `client` to avoid touching call sites | |
| client = OpenAI( | |
| api_key=OPENAI_API_KEY, | |
| base_url=OPENAI_BASE_URL, | |
| http_client=_http_client, | |
| max_retries=HTTP_RETRIES, | |
| ) | |
| # ============================ | |
| # LangChain wrappers (optional) | |
| # ============================ | |
| # If you use LangChain later, reuse the same timeout policy | |
| llm_default = ChatOpenAI( | |
| model=DEFAULT_MODEL, | |
| temperature=0.5, | |
| timeout=OPENAI_TIMEOUT_SECONDS, | |
| # Note: LangChain uses its own http stack; keep it simple. | |
| ) | |
| embedding_client = OpenAIEmbeddings( | |
| model=EMBEDDING_MODEL, | |
| ) | |
| # ============================ | |
| # Default course outline | |
| # ============================ | |
| DEFAULT_COURSE_TOPICS: List[str] = [ | |
| "Week 0 β Welcome & What is Generative AI; course outcomes LO1βLO5.", | |
| "Week 1 β Foundations of GenAI: LLMs, Transformer & self-attention, perplexity.", | |
| "Week 2 β Foundation Models & multimodal models; data scale, bias & risks.", | |
| "Week 3 β Choosing Pre-trained Models; open-source vs proprietary; cost vs quality.", | |
| "Week 4 β Prompt Engineering: core principles; zero/few-shot; CoT; ReAct.", | |
| "Week 5 β Building a Simple Chatbot; memory (short vs long term); LangChain & UI.", | |
| "Week 6 β Review Week; cross-module consolidation & self-check prompts.", | |
| "Week 7 β Retrieval-Augmented Generation (RAG); embeddings; hybrid retrieval.", | |
| "Week 8 β Agents & Agentic RAG; planning, tools, knowledge augmentation.", | |
| "Week 9 β Evaluating GenAI Apps; hallucination, bias/fairness, metrics.", | |
| "Week 10 β Responsible AI; risks, governance, EU AI Act-style ideas.", | |
| ] | |
| # ============================ | |
| # Learning modes | |
| # ============================ | |
| LEARNING_MODES: List[str] = [ | |
| "Concept Explainer", | |
| "Socratic Tutor", | |
| "Exam Prep / Quiz", | |
| "Assignment Helper", | |
| "Quick Summary", | |
| ] | |
| LEARNING_MODE_INSTRUCTIONS: Dict[str, str] = { | |
| "Concept Explainer": ( | |
| "Explain concepts step by step. Use clear definitions, key formulas or structures, " | |
| "and one or two simple examples. Focus on clarity over depth. Regularly check if " | |
| "the student is following." | |
| ), | |
| "Socratic Tutor": ( | |
| "Use a Socratic style. Ask the student ONE short question at a time, guide them to " | |
| "reason step by step, and only give full explanations after they try. Prioritize " | |
| "questions and hints over long lectures." | |
| ), | |
| "Exam Prep / Quiz": ( | |
| "Behave like an exam prep coach. Often propose short quiz-style questions " | |
| "(multiple choice or short answer), then explain the solutions clearly. Emphasize " | |
| "common traps and how to avoid them." | |
| ), | |
| "Assignment Helper": ( | |
| "Help with assignments WITHOUT giving full final solutions. Clarify requirements, " | |
| "break tasks into smaller steps, and provide hints, partial examples, or pseudo-code " | |
| "instead of complete code or final answers. Encourage the student to attempt each " | |
| "step before revealing more." | |
| ), | |
| "Quick Summary": ( | |
| "Provide concise, bullet-point style summaries and cheat-sheet style notes. " | |
| "Focus on key ideas and avoid long paragraphs." | |
| ), | |
| } | |
| # ============================ | |
| # Upload doc types | |
| # ============================ | |
| DOC_TYPES: List[str] = [ | |
| "Syllabus", | |
| "Lecture Slides / PPT", | |
| "Literature Review / Paper", | |
| "Other Course Document", | |
| ] | |
| # ============================ | |
| # Clare system prompt | |
| # ============================ | |
| CLARE_SYSTEM_PROMPT = """ | |
| You are Clare, an AI teaching assistant for Hanbridge University. | |
| Core identity: | |
| - You are patient, encouraging, and structured like a very good TA. | |
| - Your UI and responses should be in ENGLISH by default. | |
| - However, you can understand BOTH English and Chinese, and you may reply in Chinese | |
| if the student clearly prefers Chinese or asks you to. | |
| General responsibilities: | |
| 1. Help students understand course concepts step by step. | |
| 2. Ask short check-up questions to confirm understanding instead of giving huge long lectures. | |
| 3. When the student seems confused, break content into smaller chunks and use simple language first. | |
| 4. When the student is advanced, you can switch to more technical explanations. | |
| Safety and honesty: | |
| - If you donβt know, say you are not sure and suggest how to verify. | |
| - Do not fabricate references, exam answers, or grades. | |
| """ | |