alaselababatunde commited on
Commit
88128e6
·
1 Parent(s): 306f49b
Files changed (1) hide show
  1. main.py +52 -60
main.py CHANGED
@@ -1,15 +1,17 @@
1
- # ===============================================
2
- # Tech Disciples AI Backend — LangChain ≥1.0
3
- # ===============================================
4
 
5
  from fastapi import FastAPI, HTTPException, Header
6
  from pydantic import BaseModel
7
  import torch
8
  import logging
9
  import os
 
 
10
  from huggingface_hub import login
11
 
12
- # LangChain imports (modern ≥1.0)
13
  from langchain.llms.huggingface_pipeline import HuggingFacePipeline
14
  from langchain.chains import LLMChain
15
  from langchain.prompts.prompt import PromptTemplate
@@ -18,80 +20,75 @@ from langchain.memory import ConversationBufferMemory
18
  # Transformers pipeline
19
  from transformers import pipeline
20
 
21
- # ===============================================
22
  # CONFIGURATION
23
- # ===============================================
24
  API_SECRET = "techdisciplesai404"
25
  MODEL_NAME = "meta-llama/Llama-3.1-8B"
26
  DEVICE = 0 if torch.cuda.is_available() else -1
27
 
28
- # ===============================================
29
- # LOGGING SETUP
30
- # ===============================================
31
  logging.basicConfig(level=logging.INFO)
32
  logger = logging.getLogger("TechDisciplesAI")
33
 
34
- # ===============================================
35
  # FASTAPI APP
36
- # ===============================================
37
- app = FastAPI(title="Tech Disciples AI", version="3.0")
38
-
39
- # ===============================================
40
- # LOGIN TO HUGGINGFACE HUB
41
- # ===============================================
42
- hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
43
- if hf_token:
44
- try:
45
- login(token=hf_token)
46
- logger.info("✅ Logged into Hugging Face Hub successfully.")
47
- except Exception as e:
48
- logger.error(f"⚠️ Hugging Face login failed: {e}")
49
- else:
50
- logger.warning("⚠️ No HUGGINGFACEHUB_API_TOKEN found in environment.")
51
 
52
- # ===============================================
53
- # LOAD MODEL USING PIPELINE + LANGCHAIN
54
- # ===============================================
55
  try:
56
  logger.info(f"🚀 Loading model: {MODEL_NAME}")
57
 
 
 
 
 
 
 
 
 
58
  hf_pipeline = pipeline(
59
  "text-generation",
60
  model=MODEL_NAME,
61
  device=DEVICE,
62
  max_new_tokens=1024,
63
  temperature=0.4,
64
- do_sample=True,
65
  top_p=0.9,
66
- repetition_penalty=1.2,
 
 
67
  )
68
 
69
  llm = HuggingFacePipeline(pipeline=hf_pipeline)
70
- logger.info("✅ Model loaded successfully.")
71
 
72
  except Exception as e:
73
- logger.error(f"❌ Failed to load model: {e}")
74
  llm = None
75
 
76
- # ===============================================
77
- # MEMORY SYSTEM
78
- # ===============================================
79
  memory = ConversationBufferMemory(memory_key="conversation_history")
80
 
81
- # ===============================================
82
- # PROMPT TEMPLATE
83
- # ===============================================
84
  prompt_template = """
85
- You are Tech Disciples AI — a spiritually aware, friendly, pastor-like talk buddy.
86
- You respond warmly, naturally, and thoughtfully like a caring church member.
87
- Always provide a clear, detailed answer with biblical insights or Bible verses.
88
- Do not give vague responses. If a question is not related to Christianity or the Bible, politely inform the user.
89
 
90
  Conversation so far:
91
  {conversation_history}
92
 
93
  User: {query}
94
- Tech Disciples AI (respond fully with examples, guidance, or Bible references):
95
  """
96
 
97
  prompt = PromptTemplate(
@@ -99,41 +96,36 @@ prompt = PromptTemplate(
99
  input_variables=["conversation_history", "query"]
100
  )
101
 
102
- # ===============================================
103
- # LLM CHAIN (with memory)
104
- # ===============================================
105
- chain = LLMChain(
106
- prompt=prompt,
107
- llm=llm,
108
- memory=memory
109
- )
110
 
111
- # ===============================================
112
  # REQUEST MODEL
113
- # ===============================================
114
  class QueryInput(BaseModel):
115
  query: str
116
  session_id: str | None = "default"
117
 
118
- # ===============================================
119
  # ROUTES
120
- # ===============================================
121
  @app.get("/")
122
  async def root():
123
- return {"message": "✅ Tech Disciples AI is running."}
124
 
125
  @app.post("/ai-chat")
126
  async def ai_chat(data: QueryInput, x_api_key: str = Header(None)):
127
  if x_api_key != API_SECRET:
128
  raise HTTPException(status_code=403, detail="Forbidden: Invalid API key")
129
 
130
- if not llm:
131
- raise HTTPException(status_code=500, detail="Model not initialized")
132
 
133
  try:
134
  response = chain.run(query=data.query.strip())
135
  return {"reply": response.strip()}
136
  except Exception as e:
137
- logger.error(f"⚠️ Model error: {e}")
138
  raise HTTPException(status_code=500, detail="Model failed to respond")
139
-
 
1
+ # =====================================================
2
+ # Tech Disciples AI Backend — Llama 3.1 (8B) Version
3
+ # =====================================================
4
 
5
  from fastapi import FastAPI, HTTPException, Header
6
  from pydantic import BaseModel
7
  import torch
8
  import logging
9
  import os
10
+
11
+ # Hugging Face Hub
12
  from huggingface_hub import login
13
 
14
+ # LangChain
15
  from langchain.llms.huggingface_pipeline import HuggingFacePipeline
16
  from langchain.chains import LLMChain
17
  from langchain.prompts.prompt import PromptTemplate
 
20
  # Transformers pipeline
21
  from transformers import pipeline
22
 
23
+ # =====================================================
24
  # CONFIGURATION
25
+ # =====================================================
26
  API_SECRET = "techdisciplesai404"
27
  MODEL_NAME = "meta-llama/Llama-3.1-8B"
28
  DEVICE = 0 if torch.cuda.is_available() else -1
29
 
30
+ # =====================================================
31
+ # LOGGING
32
+ # =====================================================
33
  logging.basicConfig(level=logging.INFO)
34
  logger = logging.getLogger("TechDisciplesAI")
35
 
36
+ # =====================================================
37
  # FASTAPI APP
38
+ # =====================================================
39
+ app = FastAPI(title="Tech Disciples AI", version="3.1")
40
+
41
+ # =====================================================
42
+ # MODEL LOAD
43
+ # =====================================================
44
+ llm = None
 
 
 
 
 
 
 
 
45
 
 
 
 
46
  try:
47
  logger.info(f"🚀 Loading model: {MODEL_NAME}")
48
 
49
+ hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
50
+ if hf_token:
51
+ login(token=hf_token)
52
+ logger.info("🔐 Hugging Face authentication successful.")
53
+ else:
54
+ logger.warning("⚠️ HUGGINGFACEHUB_API_TOKEN not found — gated models may fail.")
55
+
56
+ # Load text generation pipeline
57
  hf_pipeline = pipeline(
58
  "text-generation",
59
  model=MODEL_NAME,
60
  device=DEVICE,
61
  max_new_tokens=1024,
62
  temperature=0.4,
 
63
  top_p=0.9,
64
+ repetition_penalty=1.15,
65
+ do_sample=True,
66
+ use_auth_token=True
67
  )
68
 
69
  llm = HuggingFacePipeline(pipeline=hf_pipeline)
70
+ logger.info("✅ Model loaded successfully (Llama 3.1 - 8B).")
71
 
72
  except Exception as e:
73
+ logger.error(f"❌ Model load failed: {e}")
74
  llm = None
75
 
76
+ # =====================================================
77
+ # MEMORY + PROMPT
78
+ # =====================================================
79
  memory = ConversationBufferMemory(memory_key="conversation_history")
80
 
 
 
 
81
  prompt_template = """
82
+ You are Tech Disciples AI — a warm, spiritual, and knowledgeable conversational AI built
83
+ to give Biblical guidance and Christian-based reflections. You speak with empathy, wisdom,
84
+ and natural tone never robotic. Always connect your points to scripture or Christian principles
85
+ when relevant.
86
 
87
  Conversation so far:
88
  {conversation_history}
89
 
90
  User: {query}
91
+ Tech Disciples AI (respond with warmth, depth, and Biblical understanding):
92
  """
93
 
94
  prompt = PromptTemplate(
 
96
  input_variables=["conversation_history", "query"]
97
  )
98
 
99
+ if llm:
100
+ chain = LLMChain(prompt=prompt, llm=llm, memory=memory)
101
+ else:
102
+ chain = None
 
 
 
 
103
 
104
+ # =====================================================
105
  # REQUEST MODEL
106
+ # =====================================================
107
  class QueryInput(BaseModel):
108
  query: str
109
  session_id: str | None = "default"
110
 
111
+ # =====================================================
112
  # ROUTES
113
+ # =====================================================
114
  @app.get("/")
115
  async def root():
116
+ return {"message": "✅ Tech Disciples AI (Llama 3.1) is running."}
117
 
118
  @app.post("/ai-chat")
119
  async def ai_chat(data: QueryInput, x_api_key: str = Header(None)):
120
  if x_api_key != API_SECRET:
121
  raise HTTPException(status_code=403, detail="Forbidden: Invalid API key")
122
 
123
+ if not chain:
124
+ raise HTTPException(status_code=500, detail="Model not initialized or failed to load")
125
 
126
  try:
127
  response = chain.run(query=data.query.strip())
128
  return {"reply": response.strip()}
129
  except Exception as e:
130
+ logger.error(f"⚠️ Error generating response: {e}")
131
  raise HTTPException(status_code=500, detail="Model failed to respond")