import os import tempfile import traceback from fastapi import FastAPI, UploadFile, File, Header, HTTPException, Body from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from spitch import Spitch from transformers import pipeline from langdetect import detect, DetectorFactory from smebuilder_vector import retriever # ----------------- CONFIG ----------------- DetectorFactory.seed = 0 SPITCH_API_KEY = os.getenv("SPITCH_API_KEY") HF_MODEL = os.getenv("HF_MODEL", "deepseek-ai/deepseek-coder-1.3b-instruct") FRONTEND_ORIGIN = os.getenv("ALLOWED_ORIGIN", "*") PROJECT_API_KEY = os.getenv("PROJECT_API_KEY") if not SPITCH_API_KEY: raise RuntimeError("Set SPITCH_API_KEY in environment before starting.") os.environ["SPITCH_API_KEY"] = SPITCH_API_KEY spitch_client = Spitch() # ----------------- HUGGINGFACE PIPELINE ----------------- llm_pipeline = pipeline( task="text-generation", model=HF_MODEL, temperature=0.7, top_p=0.9, do_sample=True, repetition_penalty=1.1, max_new_tokens=2048 ) # ----------------- FASTAPI ----------------- app = FastAPI(title="DevAssist AI Backend (FastAPI + HuggingFace Pipeline)") app.add_middleware( CORSMiddleware, allow_origins=[FRONTEND_ORIGIN] if FRONTEND_ORIGIN != "*" else ["*"], allow_credentials=True, allow_methods=["GET", "POST", "OPTIONS"], allow_headers=["Authorization", "Content-Type"], ) # ----------------- PROMPTS ----------------- chat_template = """You are DevAssist, an AI coding assistant. Guidelines: - Always format responses in Markdown. - Use section headers: Explanation:, Steps:, Fixed Code: - Use bullet points for steps. - Use fenced code blocks for code. - Be friendly yet professional. Question: {question} Answer: """ stt_chat_template = """You are DevAssist, an AI coding assistant. The input is transcribed speech. Interpret it as a developer question. Provide clear answers with code examples. If unclear, ask for clarification. Spoken Question: {speech} Answer: """ autodoc_template = """You are DevAssist DocBot. Read the code and produce professional documentation in markdown. Code: {code} Documentation: """ sme_template = """ You are a senior full-stack engineer specializing in modern front-end development. Your job is to generate **production-ready code** for websites and apps. Guidelines: - Always return three separate files: index.html, styles.css, and script.js - HTML must be semantic, responsive, and mobile-first - CSS should use Flexbox/Grid with hover/transition effects - JavaScript must add interactivity (animations, toggles, button actions) - Include hero, feature grid, testimonials, and footer - Use realistic content (no lorem ipsum, no placeholders) Prompt: {user_prompt} Context: {context} Output: """ # ----------------- REQUEST MODELS ----------------- class ChatRequest(BaseModel): question: str class AutoDocRequest(BaseModel): code: str # ----------------- AUTH ----------------- def check_auth(authorization: str | None): if not PROJECT_API_KEY: return if not authorization or not authorization.startswith("Bearer "): raise HTTPException(status_code=401, detail="Missing bearer token") token = authorization.split(" ", 1)[1] if token != PROJECT_API_KEY: raise HTTPException(status_code=403, detail="Invalid token") # ----------------- DEBUG LOGGING ----------------- DEBUG_LOG_FILE = "llm_debug.log" def run_pipeline(prompt_text: str): try: output_list = llm_pipeline(prompt_text, max_new_tokens=2048, do_sample=True) text = output_list[0]['generated_text'].strip() # Debug logging with open(DEBUG_LOG_FILE, "a", encoding="utf-8") as f: f.write("=== PROMPT START ===\n") f.write(prompt_text + "\n") f.write("--- MODEL OUTPUT ---\n") f.write(text + "\n") f.write("=== PROMPT END ===\n\n") if not text: return {"success": False, "error": "⚠️ LLM returned empty output", "prompt": prompt_text} return text except Exception: with open(DEBUG_LOG_FILE, "a", encoding="utf-8") as f: f.write("=== PROMPT START ===\n") f.write(prompt_text + "\n") f.write("--- EXCEPTION ---\n") f.write(traceback.format_exc() + "\n") f.write("=== PROMPT END ===\n\n") return {"success": False, "error": "⚠️ LLM error", "details": traceback.format_exc(), "prompt": prompt_text} # ----------------- AUDIO PROCESSING ----------------- async def process_audio(file: UploadFile, lang_hint: str | None = None): suffix = os.path.splitext(file.filename)[1] or ".wav" with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tf: tf.write(await file.read()) tmp_path = tf.name with open(tmp_path, "rb") as f: audio_bytes = f.read() try: if lang_hint: resp = spitch_client.speech.transcribe(language=lang_hint, content=audio_bytes) else: resp = spitch_client.speech.transcribe(content=audio_bytes) except Exception: resp = spitch_client.speech.transcribe(language="en", content=audio_bytes) transcription = getattr(resp, "text", "") or (resp.get("text", "") if isinstance(resp, dict) else "") detected_lang = "en" try: detected_lang = detect(transcription) if transcription.strip() else "en" except Exception: pass translation = transcription if detected_lang != "en": try: translation_resp = spitch_client.text.translate(text=transcription, source=detected_lang, target="en") translation = getattr(translation_resp, "text", "") or translation_resp.get("text", "") except Exception: translation = transcription return transcription, detected_lang, translation # ----------------- ENDPOINTS ----------------- @app.get("/") def root(): return {"status": "✅ DevAssist AI Backend running"} @app.post("/chat") def chat(req: ChatRequest, authorization: str | None = Header(None)): check_auth(authorization) prompt_text = chat_template.format(question=req.question) result = run_pipeline(prompt_text) return result if isinstance(result, dict) else {"reply": result} @app.post("/stt") async def stt_audio(file: UploadFile = File(...), lang_hint: str | None = None, authorization: str | None = Header(None)): check_auth(authorization) transcription, detected_lang, translation = await process_audio(file, lang_hint) prompt_text = stt_chat_template.format(speech=translation) result = run_pipeline(prompt_text) return { "transcription": transcription, "detected_language": detected_lang, "translation": translation, "reply": result if isinstance(result, str) else result.get("reply", "") } @app.post("/autodoc") def autodoc(req: AutoDocRequest, authorization: str | None = Header(None)): check_auth(authorization) prompt_text = autodoc_template.format(code=req.code) result = run_pipeline(prompt_text) return result if isinstance(result, dict) else {"documentation": result} @app.post("/sme/generate") async def sme_generate(payload: dict = Body(...), authorization: str | None = Header(None)): check_auth(authorization) try: user_prompt = payload.get("user_prompt", "") context_docs = retriever.get_relevant_documents(user_prompt) context = "\n".join([doc.page_content for doc in context_docs]) if context_docs else "No extra context" prompt_text = sme_template.format(user_prompt=user_prompt, context=context) result = run_pipeline(prompt_text) return {"success": True, "data": result if isinstance(result, str) else result.get("reply", "")} except Exception: return {"success": False, "error": "⚠️ LLM error", "details": traceback.format_exc()} @app.post("/sme/speech-generate") async def sme_speech_generate(file: UploadFile = File(...), lang_hint: str | None = None, authorization: str | None = Header(None)): check_auth(authorization) transcription, detected_lang, translation = await process_audio(file, lang_hint) try: context_docs = retriever.get_relevant_documents(translation) context = "\n".join([doc.page_content for doc in context_docs]) if context_docs else "No extra context" prompt_text = sme_template.format(user_prompt=translation, context=context) result = run_pipeline(prompt_text) return { "success": True, "transcription": transcription, "detected_language": detected_lang, "translation": translation, "sme_site": result if isinstance(result, str) else result.get("reply", "") } except Exception: return {"success": False, "error": "⚠️ LLM error", "details": traceback.format_exc()} # ----------------- MAIN ----------------- if __name__ == "__main__": import uvicorn uvicorn.run("main:app", host="0.0.0.0", port=7860, reload=False)