alaselababatunde commited on
Commit
a0dcbe2
·
1 Parent(s): 8f057f3
Files changed (2) hide show
  1. Dockerfile +3 -3
  2. app.py +88 -34
Dockerfile CHANGED
@@ -3,7 +3,7 @@
3
  # ==============================================================
4
 
5
  # Use lightweight official Python image
6
- FROM python:3.11-slim
7
 
8
  # Set environment variables
9
  ENV PYTHONDONTWRITEBYTECODE=1 \
@@ -28,7 +28,7 @@ RUN pip install --no-cache-dir -r requirements.txt
28
  COPY . .
29
 
30
  # Expose port for FastAPI
31
- EXPOSE 8000
32
 
33
  # Run the FastAPI app with Uvicorn
34
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
 
3
  # ==============================================================
4
 
5
  # Use lightweight official Python image
6
+ FROM python:3.10-slim
7
 
8
  # Set environment variables
9
  ENV PYTHONDONTWRITEBYTECODE=1 \
 
28
  COPY . .
29
 
30
  # Expose port for FastAPI
31
+ EXPOSE 7860
32
 
33
  # Run the FastAPI app with Uvicorn
34
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -1,5 +1,5 @@
1
  # ==============================================================
2
- # Tech Disciples AI Backend — Optimized Stable Release
3
  # ==============================================================
4
 
5
  import os
@@ -10,24 +10,27 @@ from fastapi.responses import JSONResponse
10
  from pydantic import BaseModel
11
  from transformers import pipeline
12
 
13
- # ==============================================================
14
  # Logging Setup
15
- # ==============================================================
16
- logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
 
 
 
17
  logger = logging.getLogger("Tech Disciples AI")
18
 
19
- # ==============================================================
20
  # FastAPI App Initialization
21
- # ==============================================================
22
  app = FastAPI(title="Tech Disciples AI")
23
 
24
  @app.get("/")
25
  async def root():
26
- return {"status": "Tech Disciples AI Backend is running and stable."}
27
 
28
- # ==============================================================
29
- # Authentication Configuration
30
- # ==============================================================
31
  PROJECT_API_KEY = os.getenv("PROJECT_API_KEY", "techdisciplesai404")
32
 
33
  def check_auth(authorization: str | None):
@@ -40,23 +43,24 @@ def check_auth(authorization: str | None):
40
  if token != PROJECT_API_KEY:
41
  raise HTTPException(status_code=403, detail="Invalid token")
42
 
43
- # ==============================================================
44
  # Global Exception Handler
45
- # ==============================================================
46
  @app.exception_handler(Exception)
47
  async def global_exception_handler(request: Request, exc: Exception):
48
  logger.error(f"Unhandled error: {exc}")
49
  return JSONResponse(status_code=500, content={"error": str(exc)})
50
 
51
- # ==============================================================
52
- # Request Models
53
- # ==============================================================
54
  class ChatRequest(BaseModel):
55
  query: str
 
56
 
57
- # ==============================================================
58
  # Hugging Face Configuration
59
- # ==============================================================
60
  HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
61
 
62
  if not HF_TOKEN:
@@ -64,13 +68,13 @@ if not HF_TOKEN:
64
  else:
65
  logger.info("✅ Hugging Face token detected.")
66
 
67
- # Device selection
68
  device = 0 if torch.cuda.is_available() else -1
69
  logger.info(f"🧠 Using device: {'GPU' if device == 0 else 'CPU'}")
70
 
71
- # ==============================================================
72
  # Model Pipeline Initialization
73
- # ==============================================================
74
  try:
75
  chat_pipe = pipeline(
76
  "text-generation",
@@ -83,32 +87,82 @@ except Exception as e:
83
  chat_pipe = None
84
  logger.error(f"❌ Failed to load model pipeline: {e}")
85
 
86
- # ==============================================================
87
- # Helper Functions
88
- # ==============================================================
89
- def run_conversational(pipe, prompt: str) -> str:
90
- """Executes text generation safely and returns formatted output."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  if not pipe:
92
  return "⚠️ Model pipeline not initialized."
93
 
 
 
 
 
 
 
 
 
 
94
  try:
95
- output = pipe(prompt, max_new_tokens=200, temperature=0.3, do_sample=True)
96
- if isinstance(output, list) and output:
97
- return output[0].get("generated_text", str(output))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  return str(output)
 
99
  except Exception as e:
100
  logger.error(f"Conversational pipeline error: {e}")
101
- return f"⚠️ Model error: {e}"
102
 
103
- # ==============================================================
104
  # API Endpoints
105
- # ==============================================================
106
  @app.post("/ai-chat")
107
  async def ai_chat(req: ChatRequest, authorization: str | None = Header(None)):
108
- """Handles conversational AI chat requests."""
 
 
 
109
  check_auth(authorization)
110
- reply = run_conversational(chat_pipe, req.query)
111
- return {"reply": reply}
112
 
113
  # ==============================================================
114
  # END OF FILE
 
1
  # ==============================================================
2
+ # Tech Disciples AI Backend — Character-Controlled Release
3
  # ==============================================================
4
 
5
  import os
 
10
  from pydantic import BaseModel
11
  from transformers import pipeline
12
 
13
+ # --------------------------------------------------------------
14
  # Logging Setup
15
+ # --------------------------------------------------------------
16
+ logging.basicConfig(
17
+ level=logging.INFO,
18
+ format="%(asctime)s [%(levelname)s] %(message)s"
19
+ )
20
  logger = logging.getLogger("Tech Disciples AI")
21
 
22
+ # --------------------------------------------------------------
23
  # FastAPI App Initialization
24
+ # --------------------------------------------------------------
25
  app = FastAPI(title="Tech Disciples AI")
26
 
27
  @app.get("/")
28
  async def root():
29
+ return {"status": "Tech Disciples AI Backend is running and stable."}
30
 
31
+ # --------------------------------------------------------------
32
+ # Authentication
33
+ # --------------------------------------------------------------
34
  PROJECT_API_KEY = os.getenv("PROJECT_API_KEY", "techdisciplesai404")
35
 
36
  def check_auth(authorization: str | None):
 
43
  if token != PROJECT_API_KEY:
44
  raise HTTPException(status_code=403, detail="Invalid token")
45
 
46
+ # --------------------------------------------------------------
47
  # Global Exception Handler
48
+ # --------------------------------------------------------------
49
  @app.exception_handler(Exception)
50
  async def global_exception_handler(request: Request, exc: Exception):
51
  logger.error(f"Unhandled error: {exc}")
52
  return JSONResponse(status_code=500, content={"error": str(exc)})
53
 
54
+ # --------------------------------------------------------------
55
+ # Request Model
56
+ # --------------------------------------------------------------
57
  class ChatRequest(BaseModel):
58
  query: str
59
+ tone: str | None = "neutral" # Optional tone parameter
60
 
61
+ # --------------------------------------------------------------
62
  # Hugging Face Configuration
63
+ # --------------------------------------------------------------
64
  HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
65
 
66
  if not HF_TOKEN:
 
68
  else:
69
  logger.info("✅ Hugging Face token detected.")
70
 
71
+ # Device setup
72
  device = 0 if torch.cuda.is_available() else -1
73
  logger.info(f"🧠 Using device: {'GPU' if device == 0 else 'CPU'}")
74
 
75
+ # --------------------------------------------------------------
76
  # Model Pipeline Initialization
77
+ # --------------------------------------------------------------
78
  try:
79
  chat_pipe = pipeline(
80
  "text-generation",
 
87
  chat_pipe = None
88
  logger.error(f"❌ Failed to load model pipeline: {e}")
89
 
90
+ # --------------------------------------------------------------
91
+ # System / Character Template
92
+ # --------------------------------------------------------------
93
+ SYSTEM_TEMPLATE = """
94
+ You are Tech Disciples AI — an intelligent, respectful, and spiritually aware assistant
95
+ created to provide biblical insight, wisdom, and intelligent guidance.
96
+
97
+ Your core personality:
98
+ - Speak with humility and warmth.
99
+ - Use clear, simple, and sincere language.
100
+ - When asked about faith, respond with biblical understanding and gentleness.
101
+ - When asked about general or technical topics, respond with calm intelligence.
102
+ - Avoid arguments, controversy, or judgmental language.
103
+ - Always sound like a wise, grounded, caring mentor.
104
+ """
105
+
106
+ # --------------------------------------------------------------
107
+ # Helper Function
108
+ # --------------------------------------------------------------
109
+ def run_conversational(pipe, user_input: str, tone: str = "neutral") -> str:
110
+ """Executes text generation safely and returns formatted output with personality control."""
111
  if not pipe:
112
  return "⚠️ Model pipeline not initialized."
113
 
114
+ # Define tone modifiers
115
+ tone_instruction = {
116
+ "neutral": "Keep tone calm, balanced, and thoughtful.",
117
+ "friendly": "Use a warm, conversational tone with kindness and empathy.",
118
+ "motivational": "Speak with encouragement, confidence, and faith-driven energy.",
119
+ "scholarly": "Respond with logical, scripture-supported, and analytical reasoning.",
120
+ "pastoral": "Speak gently as a guide offering spiritual comfort and care.",
121
+ }.get(tone.lower(), "Keep tone calm and thoughtful.")
122
+
123
  try:
124
+ # Combine the system and tone template with the user input
125
+ prompt = (
126
+ f"{SYSTEM_TEMPLATE}\n"
127
+ f"{tone_instruction}\n\n"
128
+ f"User: {user_input}\n"
129
+ f"Tech Disciples AI:"
130
+ )
131
+
132
+ output = pipe(
133
+ prompt,
134
+ max_new_tokens=220,
135
+ temperature=0.4,
136
+ top_p=0.9,
137
+ repetition_penalty=1.1,
138
+ do_sample=True,
139
+ )
140
+
141
+ # Parse generated text
142
+ if isinstance(output, list) and len(output) > 0:
143
+ text = output[0].get("generated_text", str(output))
144
+ if "Tech Disciples AI:" in text:
145
+ text = text.split("Tech Disciples AI:")[-1].strip()
146
+ return text
147
+
148
  return str(output)
149
+
150
  except Exception as e:
151
  logger.error(f"Conversational pipeline error: {e}")
152
+ return f"⚠️ Model error: {str(e)}"
153
 
154
+ # --------------------------------------------------------------
155
  # API Endpoints
156
+ # --------------------------------------------------------------
157
  @app.post("/ai-chat")
158
  async def ai_chat(req: ChatRequest, authorization: str | None = Header(None)):
159
+ """
160
+ Main conversational endpoint for Tech Disciples AI.
161
+ Accepts 'query' and optional 'tone' parameters.
162
+ """
163
  check_auth(authorization)
164
+ reply = run_conversational(chat_pipe, req.query, req.tone)
165
+ return {"reply": reply, "tone_used": req.tone}
166
 
167
  # ==============================================================
168
  # END OF FILE