alaselababatunde commited on
Commit
b353ea6
·
1 Parent(s): a85851b
Files changed (1) hide show
  1. main.py +51 -70
main.py CHANGED
@@ -1,7 +1,3 @@
1
- # ===============================================
2
- # TechDisciples AI Backend — Stable + LangChain Memory
3
- # ===============================================
4
-
5
  from fastapi import FastAPI, HTTPException, Header
6
  from pydantic import BaseModel
7
  import torch
@@ -11,49 +7,36 @@ import logging
11
  from langchain.chains import LLMChain
12
  from langchain.prompts import PromptTemplate
13
  from langchain.memory import ConversationBufferMemory
14
- from langchain.llms.base import LLM
15
 
16
  # Transformers pipeline
17
  from transformers import pipeline
18
 
19
- # ==============================
20
- # Logging Setup
21
- # ==============================
22
- logging.basicConfig(level=logging.INFO)
23
- logger = logging.getLogger("TechDisciplesAI")
24
-
25
- # ==============================
26
- # FastAPI App Init
27
- # ==============================
28
- app = FastAPI(title="TechDisciples AI Backend")
29
 
30
- @app.get("/")
31
- async def root():
32
- return {"status": "✅ TechDisciples AI Backend is running and stable."}
33
-
34
- # ==============================
35
- # Auth Config
36
- # ==============================
37
  API_SECRET = "techdisciplesai404"
38
-
39
- def check_auth(x_api_key: str | None):
40
- if not x_api_key or x_api_key != API_SECRET:
41
- raise HTTPException(status_code=403, detail="Forbidden: Invalid API key")
42
-
43
- # ==============================
44
- # Request Schema
45
- # ==============================
46
- class QueryInput(BaseModel):
47
- query: str
48
-
49
- # ==============================
50
- # Hugging Face Pipeline
51
- # ==============================
52
  MODEL_NAME = "google/flan-t5-large"
53
  DEVICE = 0 if torch.cuda.is_available() else -1
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  try:
56
  logger.info(f"🚀 Loading model: {MODEL_NAME}")
 
57
  hf_pipeline = pipeline(
58
  "text2text-generation",
59
  model=MODEL_NAME,
@@ -63,40 +46,23 @@ try:
63
  do_sample=True,
64
  top_p=0.9
65
  )
 
 
 
66
  logger.info("✅ Model loaded successfully.")
 
67
  except Exception as e:
68
  logger.error(f"❌ Failed to load model: {e}")
69
- hf_pipeline = None
70
-
71
- # ==============================
72
- # Minimal LangChain LLM Wrapper
73
- # ==============================
74
- class HuggingFaceLLM(LLM):
75
- """Wraps a Hugging Face pipeline for LangChain compatibility."""
76
- def __init__(self, pipeline):
77
- self.pipeline = pipeline
78
-
79
- @property
80
- def _llm_type(self):
81
- return "huggingface_pipeline"
82
-
83
- def _call(self, prompt: str, stop=None):
84
- try:
85
- output = self.pipeline(prompt)
86
- if isinstance(output, list) and len(output) > 0:
87
- return output[0].get("generated_text", "")
88
- return str(output)
89
- except Exception as e:
90
- logger.error(f"⚠️ LLM wrapper error: {e}")
91
- return f"⚠️ Model error: {str(e)}"
92
-
93
- llm = HuggingFaceLLM(hf_pipeline) if hf_pipeline else None
94
-
95
- # ==============================
96
- # LangChain Memory + Prompt
97
- # ==============================
98
  memory = ConversationBufferMemory(memory_key="conversation_history")
99
 
 
 
 
100
  prompt_template = """
101
  You are Tech Disciples AI — a spiritually aware, intelligent, and kind conversational assistant.
102
  You offer thoughtful, biblical, and insightful answers with grace, empathy, and calm intelligence.
@@ -113,18 +79,33 @@ prompt = PromptTemplate(
113
  input_variables=["conversation_history", "query"]
114
  )
115
 
 
 
 
116
  chain = LLMChain(
117
- llm=llm,
118
  prompt=prompt,
 
119
  memory=memory
120
  )
121
 
122
- # ==============================
123
- # Endpoint
124
- # ==============================
 
 
 
 
 
 
 
 
 
 
 
125
  @app.post("/ai-chat")
126
  async def ai_chat(data: QueryInput, x_api_key: str = Header(None)):
127
- check_auth(x_api_key)
 
128
 
129
  if not llm:
130
  raise HTTPException(status_code=500, detail="Model not initialized")
 
 
 
 
 
1
  from fastapi import FastAPI, HTTPException, Header
2
  from pydantic import BaseModel
3
  import torch
 
7
  from langchain.chains import LLMChain
8
  from langchain.prompts import PromptTemplate
9
  from langchain.memory import ConversationBufferMemory
10
+ from langchain.llms.base import LLM # For custom LLM wrappers
11
 
12
  # Transformers pipeline
13
  from transformers import pipeline
14
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # ===============================================
17
+ # CONFIGURATION
18
+ # ===============================================
 
 
 
 
19
  API_SECRET = "techdisciplesai404"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  MODEL_NAME = "google/flan-t5-large"
21
  DEVICE = 0 if torch.cuda.is_available() else -1
22
 
23
+ # ===============================================
24
+ # LOGGING SETUP
25
+ # ===============================================
26
+ logging.basicConfig(level=logging.INFO)
27
+ logger = logging.getLogger("TechDisciplesAI")
28
+
29
+ # ===============================================
30
+ # FASTAPI APP
31
+ # ===============================================
32
+ app = FastAPI(title="Tech Disciples AI (LangChain Conversational)", version="3.0")
33
+
34
+ # ===============================================
35
+ # LOAD MODEL USING PIPELINE + LANGCHAIN
36
+ # ===============================================
37
  try:
38
  logger.info(f"🚀 Loading model: {MODEL_NAME}")
39
+
40
  hf_pipeline = pipeline(
41
  "text2text-generation",
42
  model=MODEL_NAME,
 
46
  do_sample=True,
47
  top_p=0.9
48
  )
49
+
50
+ # You can later wrap hf_pipeline in a custom LLM class compatible with LLMChain
51
+ llm = hf_pipeline
52
  logger.info("✅ Model loaded successfully.")
53
+
54
  except Exception as e:
55
  logger.error(f"❌ Failed to load model: {e}")
56
+ llm = None
57
+
58
+ # ===============================================
59
+ # MEMORY SYSTEM
60
+ # ===============================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  memory = ConversationBufferMemory(memory_key="conversation_history")
62
 
63
+ # ===============================================
64
+ # PROMPT TEMPLATE
65
+ # ===============================================
66
  prompt_template = """
67
  You are Tech Disciples AI — a spiritually aware, intelligent, and kind conversational assistant.
68
  You offer thoughtful, biblical, and insightful answers with grace, empathy, and calm intelligence.
 
79
  input_variables=["conversation_history", "query"]
80
  )
81
 
82
+ # ===============================================
83
+ # LLM CHAIN (with memory)
84
+ # ===============================================
85
  chain = LLMChain(
 
86
  prompt=prompt,
87
+ llm=llm,
88
  memory=memory
89
  )
90
 
91
+ # ===============================================
92
+ # REQUEST MODEL
93
+ # ===============================================
94
+ class QueryInput(BaseModel):
95
+ query: str
96
+ session_id: str | None = "default"
97
+
98
+ # ===============================================
99
+ # ROUTES
100
+ # ===============================================
101
+ @app.get("/")
102
+ async def root():
103
+ return {"message": "✅ Tech Disciples AI (LangChain Memory) is running."}
104
+
105
  @app.post("/ai-chat")
106
  async def ai_chat(data: QueryInput, x_api_key: str = Header(None)):
107
+ if x_api_key != API_SECRET:
108
+ raise HTTPException(status_code=403, detail="Forbidden: Invalid API key")
109
 
110
  if not llm:
111
  raise HTTPException(status_code=500, detail="Model not initialized")