from fastapi import FastAPI, Request, HTTPException, Depends from fastapi.middleware.cors import CORSMiddleware from llama_cpp import Llama import uvicorn app = FastAPI() # --- CORS SETTINGS (Crucial for GitHub Pages) --- app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # --- CONFIGURATION --- MY_API_KEY = "my-secret-key-456" # Optimized for HF Free Tier CPU llm = Llama(model_path="./model.gguf", n_ctx=2048, n_threads=4, n_gpu_layers=0) def verify_key(request: Request): auth = request.headers.get("Authorization") if auth != f"Bearer {MY_API_KEY}": raise HTTPException(status_code=403, detail="Unauthorized") @app.get("/") def home(): return {"status": "Online", "branding": "ChatMPT Team"} @app.post("/v1/chat") async def chat(request: Request, _ = Depends(verify_key)): body = await request.json() user_input = body.get("prompt", "") # Instruction-based prompt to fix the identity prompt = ( "Assistant is a polite AI named ChatMPT, created by the ChatMPT Team.\n" f"User: {user_input}\n" "Assistant:" ) response = llm( prompt, max_tokens=500, stop=["User:", "\n", "Assistant:"], temperature=0.7 ) raw_reply = response["choices"][0]["text"].strip() # Anti-Typo Safety Filter final_reply = raw_reply.replace("ChatPBT", "ChatMPT").replace("ChatPP", "ChatMPT") return {"reply": final_reply} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)