alaselababatunde commited on
Commit
9b1d484
·
1 Parent(s): 4a789b6
Files changed (2) hide show
  1. Dockerfile +7 -33
  2. main.py +24 -12
Dockerfile CHANGED
@@ -1,59 +1,33 @@
1
  # ==============================================================
2
- # Tech Disciples AI Backend — Dockerfile (Authenticated Build)
3
  # ==============================================================
4
 
5
- # Use lightweight official Python image
6
  FROM python:3.10-slim
7
 
8
- # Set environment variables
9
  ENV PYTHONDONTWRITEBYTECODE=1 \
10
  PYTHONUNBUFFERED=1 \
11
  APP_HOME=/app
12
 
13
- # Set work directory
14
  WORKDIR $APP_HOME
15
 
16
- # --------------------------------------------------------------
17
- # System dependencies
18
- # --------------------------------------------------------------
19
  RUN apt-get update && apt-get install -y \
20
  git \
21
  && rm -rf /var/lib/apt/lists/*
22
 
23
- # --------------------------------------------------------------
24
- # Copy requirement file first (for caching)
25
- # --------------------------------------------------------------
26
- COPY requirements.txt .
27
-
28
- # --------------------------------------------------------------
29
  # Install Python dependencies
30
- # --------------------------------------------------------------
31
  RUN pip install --no-cache-dir -r requirements.txt
32
 
33
- # --------------------------------------------------------------
34
- # Copy application files
35
- # --------------------------------------------------------------
36
  COPY . .
37
 
38
- # --------------------------------------------------------------
39
- # Set up Hugging Face token (must be provided at runtime)
40
- # --------------------------------------------------------------
41
- # This expects the token to be passed in during container run:
42
- # docker run -e HUGGINGFACEHUB_API_TOKEN=hf_xxx -p 7860:7860 techdisciplesai
43
  ENV HUGGINGFACEHUB_API_TOKEN=""
44
 
45
- # --------------------------------------------------------------
46
- # Authenticate to Hugging Face Hub (non-interactive)
47
- # --------------------------------------------------------------
48
- RUN python -c "from huggingface_hub import login; import os; token=os.getenv('HUGGINGFACEHUB_API_TOKEN'); \
49
- print('⚠️ No HF token set during build!' if not token else '✅ Hugging Face token configured.')"
50
-
51
- # --------------------------------------------------------------
52
  # Expose port for FastAPI
53
- # --------------------------------------------------------------
54
  EXPOSE 7860
55
 
56
- # --------------------------------------------------------------
57
- # Run the FastAPI app with Uvicorn
58
- # --------------------------------------------------------------
59
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
  # ==============================================================
2
+ # Tech Disciples AI Backend — Dockerfile (for Hugging Face Spaces)
3
  # ==============================================================
4
 
 
5
  FROM python:3.10-slim
6
 
7
+ # Environment setup
8
  ENV PYTHONDONTWRITEBYTECODE=1 \
9
  PYTHONUNBUFFERED=1 \
10
  APP_HOME=/app
11
 
 
12
  WORKDIR $APP_HOME
13
 
14
+ # Install system dependencies
 
 
15
  RUN apt-get update && apt-get install -y \
16
  git \
17
  && rm -rf /var/lib/apt/lists/*
18
 
 
 
 
 
 
 
19
  # Install Python dependencies
20
+ COPY requirements.txt .
21
  RUN pip install --no-cache-dir -r requirements.txt
22
 
23
+ # Copy all source files
 
 
24
  COPY . .
25
 
26
+ # Define Hugging Face token env var (Spaces injects the secret automatically)
 
 
 
 
27
  ENV HUGGINGFACEHUB_API_TOKEN=""
28
 
 
 
 
 
 
 
 
29
  # Expose port for FastAPI
 
30
  EXPOSE 7860
31
 
32
+ # Launch the app
 
 
33
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py CHANGED
@@ -1,13 +1,15 @@
1
  # ===============================================
2
- # Tech Disciples AI Backend — Updated for LangChain ≥1.0
3
  # ===============================================
4
 
5
  from fastapi import FastAPI, HTTPException, Header
6
  from pydantic import BaseModel
7
  import torch
8
  import logging
 
 
9
 
10
- # LangChain imports (modern >=1.0)
11
  from langchain.llms.huggingface_pipeline import HuggingFacePipeline
12
  from langchain.chains import LLMChain
13
  from langchain.prompts.prompt import PromptTemplate
@@ -34,24 +36,36 @@ logger = logging.getLogger("TechDisciplesAI")
34
  # ===============================================
35
  app = FastAPI(title="Tech Disciples AI", version="3.0")
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  # ===============================================
38
  # LOAD MODEL USING PIPELINE + LANGCHAIN
39
  # ===============================================
40
  try:
41
  logger.info(f"🚀 Loading model: {MODEL_NAME}")
42
 
43
-
44
  hf_pipeline = pipeline(
45
  "text-generation",
46
  model=MODEL_NAME,
47
  device=DEVICE,
48
- max_new_tokens=1024,
49
- temperature=0.7,
50
- do_sample=True,
 
 
51
  )
52
 
53
-
54
-
55
  llm = HuggingFacePipeline(pipeline=hf_pipeline)
56
  logger.info("✅ Model loaded successfully.")
57
 
@@ -80,7 +94,6 @@ User: {query}
80
  Tech Disciples AI (respond fully with examples, guidance, or Bible references):
81
  """
82
 
83
-
84
  prompt = PromptTemplate(
85
  template=prompt_template,
86
  input_variables=["conversation_history", "query"]
@@ -100,7 +113,7 @@ chain = LLMChain(
100
  # ===============================================
101
  class QueryInput(BaseModel):
102
  query: str
103
- session_id: str | None = "default" # optional: could be user/session-based
104
 
105
  # ===============================================
106
  # ROUTES
@@ -111,17 +124,16 @@ async def root():
111
 
112
  @app.post("/ai-chat")
113
  async def ai_chat(data: QueryInput, x_api_key: str = Header(None)):
114
- # --- Authentication ---
115
  if x_api_key != API_SECRET:
116
  raise HTTPException(status_code=403, detail="Forbidden: Invalid API key")
117
 
118
  if not llm:
119
  raise HTTPException(status_code=500, detail="Model not initialized")
120
 
121
- # --- Process Query ---
122
  try:
123
  response = chain.run(query=data.query.strip())
124
  return {"reply": response.strip()}
125
  except Exception as e:
126
  logger.error(f"⚠️ Model error: {e}")
127
  raise HTTPException(status_code=500, detail="Model failed to respond")
 
 
1
  # ===============================================
2
+ # Tech Disciples AI Backend — LangChain ≥1.0
3
  # ===============================================
4
 
5
  from fastapi import FastAPI, HTTPException, Header
6
  from pydantic import BaseModel
7
  import torch
8
  import logging
9
+ import os
10
+ from huggingface_hub import login
11
 
12
+ # LangChain imports (modern 1.0)
13
  from langchain.llms.huggingface_pipeline import HuggingFacePipeline
14
  from langchain.chains import LLMChain
15
  from langchain.prompts.prompt import PromptTemplate
 
36
  # ===============================================
37
  app = FastAPI(title="Tech Disciples AI", version="3.0")
38
 
39
+ # ===============================================
40
+ # LOGIN TO HUGGINGFACE HUB
41
+ # ===============================================
42
+ hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
43
+ if hf_token:
44
+ try:
45
+ login(token=hf_token)
46
+ logger.info("✅ Logged into Hugging Face Hub successfully.")
47
+ except Exception as e:
48
+ logger.error(f"⚠️ Hugging Face login failed: {e}")
49
+ else:
50
+ logger.warning("⚠️ No HUGGINGFACEHUB_API_TOKEN found in environment.")
51
+
52
  # ===============================================
53
  # LOAD MODEL USING PIPELINE + LANGCHAIN
54
  # ===============================================
55
  try:
56
  logger.info(f"🚀 Loading model: {MODEL_NAME}")
57
 
 
58
  hf_pipeline = pipeline(
59
  "text-generation",
60
  model=MODEL_NAME,
61
  device=DEVICE,
62
+ max_new_tokens=1024,
63
+ temperature=0.4,
64
+ do_sample=True,
65
+ top_p=0.9,
66
+ repetition_penalty=1.2,
67
  )
68
 
 
 
69
  llm = HuggingFacePipeline(pipeline=hf_pipeline)
70
  logger.info("✅ Model loaded successfully.")
71
 
 
94
  Tech Disciples AI (respond fully with examples, guidance, or Bible references):
95
  """
96
 
 
97
  prompt = PromptTemplate(
98
  template=prompt_template,
99
  input_variables=["conversation_history", "query"]
 
113
  # ===============================================
114
  class QueryInput(BaseModel):
115
  query: str
116
+ session_id: str | None = "default"
117
 
118
  # ===============================================
119
  # ROUTES
 
124
 
125
  @app.post("/ai-chat")
126
  async def ai_chat(data: QueryInput, x_api_key: str = Header(None)):
 
127
  if x_api_key != API_SECRET:
128
  raise HTTPException(status_code=403, detail="Forbidden: Invalid API key")
129
 
130
  if not llm:
131
  raise HTTPException(status_code=500, detail="Model not initialized")
132
 
 
133
  try:
134
  response = chain.run(query=data.query.strip())
135
  return {"reply": response.strip()}
136
  except Exception as e:
137
  logger.error(f"⚠️ Model error: {e}")
138
  raise HTTPException(status_code=500, detail="Model failed to respond")
139
+