Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, HTTPException, Header | |
| from pydantic import BaseModel | |
| from transformers import pipeline | |
| from langchain.llms import HuggingFacePipeline | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import PromptTemplate | |
| from langchain.memory import ConversationBufferMemory | |
| import torch | |
| import logging | |
| # =============================================== | |
| # CONFIGURATION | |
| # =============================================== | |
| API_SECRET = "techdisciplesai404" | |
| MODEL_NAME = "google/flan-t5-large" | |
| DEVICE = 0 if torch.cuda.is_available() else -1 | |
| # =============================================== | |
| # LOGGING SETUP | |
| # =============================================== | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger("TechDisciplesAI") | |
| # =============================================== | |
| # FASTAPI APP | |
| # =============================================== | |
| app = FastAPI(title="Tech Disciples AI (LangChain Conversational)", version="3.0") | |
| # =============================================== | |
| # LOAD MODEL USING PIPELINE + LANGCHAIN | |
| # =============================================== | |
| try: | |
| logger.info(f"🚀 Loading model: {MODEL_NAME}") | |
| hf_pipeline = pipeline( | |
| "text2text-generation", | |
| model=MODEL_NAME, | |
| device=DEVICE, | |
| max_new_tokens=256, | |
| temperature=0.3, | |
| do_sample=True, | |
| top_p=0.9 | |
| ) | |
| llm = HuggingFacePipeline(pipeline=hf_pipeline) | |
| logger.info("✅ Model loaded successfully.") | |
| except Exception as e: | |
| logger.error(f"❌ Failed to load model: {e}") | |
| llm = None | |
| # =============================================== | |
| # MEMORY SYSTEM | |
| # =============================================== | |
| memory = ConversationBufferMemory(memory_key="conversation_history") | |
| # =============================================== | |
| # PROMPT TEMPLATE | |
| # =============================================== | |
| prompt_template = """ | |
| You are Tech Disciples AI — a spiritually aware, intelligent, and kind conversational assistant. | |
| You offer thoughtful, biblical, and insightful answers with grace, empathy, and calm intelligence. | |
| Conversation so far: | |
| {conversation_history} | |
| User: {query} | |
| Tech Disciples AI: | |
| """ | |
| prompt = PromptTemplate( | |
| template=prompt_template, | |
| input_variables=["conversation_history", "query"] | |
| ) | |
| # =============================================== | |
| # LLM CHAIN (with memory) | |
| # =============================================== | |
| chain = LLMChain( | |
| prompt=prompt, | |
| llm=llm, | |
| memory=memory | |
| ) | |
| # =============================================== | |
| # REQUEST MODEL | |
| # =============================================== | |
| class QueryInput(BaseModel): | |
| query: str | |
| session_id: str | None = "default" # optional: could be user/session-based | |
| # =============================================== | |
| # ROUTES | |
| # =============================================== | |
| async def root(): | |
| return {"message": "✅ Tech Disciples AI (LangChain Memory) is running."} | |
| async def ai_chat(data: QueryInput, x_api_key: str = Header(None)): | |
| # --- Authentication --- | |
| if x_api_key != API_SECRET: | |
| raise HTTPException(status_code=403, detail="Forbidden: Invalid API key") | |
| if not llm: | |
| raise HTTPException(status_code=500, detail="Model not initialized") | |
| # --- Process Query --- | |
| try: | |
| response = chain.run(query=data.query.strip()) | |
| return {"reply": response.strip()} | |
| except Exception as e: | |
| logger.error(f"⚠️ Model error: {e}") | |
| raise HTTPException(status_code=500, detail="Model failed to respond") | |