Spaces:
Sleeping
Sleeping
Commit ·
4ea2add
1
Parent(s): 8507fe1
Updated
Browse files
main.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
# ==============================================================
|
| 2 |
-
# Tech Disciples AI Backend — Free-Response Edition
|
| 3 |
# ==============================================================
|
| 4 |
|
| 5 |
import os
|
|
@@ -8,7 +8,7 @@ import torch
|
|
| 8 |
from fastapi import FastAPI, Header, HTTPException
|
| 9 |
from fastapi.responses import JSONResponse
|
| 10 |
from pydantic import BaseModel
|
| 11 |
-
from transformers import AutoTokenizer,
|
| 12 |
|
| 13 |
# --------------------------------------------------------------
|
| 14 |
# Logging Setup
|
|
@@ -57,7 +57,7 @@ class ChatRequest(BaseModel):
|
|
| 57 |
query: str
|
| 58 |
|
| 59 |
# --------------------------------------------------------------
|
| 60 |
-
# System Prompt
|
| 61 |
# --------------------------------------------------------------
|
| 62 |
SYSTEM_TEMPLATE = """You are Tech Disciples AI — a spiritually aware, intelligent, and kind conversational assistant.
|
| 63 |
You respond clearly and truthfully, offering thoughtful, biblically grounded, and intelligent answers.
|
|
@@ -77,7 +77,7 @@ chat_pipe = None
|
|
| 77 |
try:
|
| 78 |
logger.info(f"🚀 Loading model: {MODEL_NAME}")
|
| 79 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN if HF_TOKEN else None)
|
| 80 |
-
model =
|
| 81 |
|
| 82 |
chat_pipe = pipeline(
|
| 83 |
"text2text-generation",
|
|
@@ -103,7 +103,7 @@ def generate_reply(pipe, user_input: str) -> str:
|
|
| 103 |
result = pipe(
|
| 104 |
prompt,
|
| 105 |
do_sample=True,
|
| 106 |
-
temperature=0.3,
|
| 107 |
top_p=0.9,
|
| 108 |
repetition_penalty=1.05
|
| 109 |
)
|
|
@@ -133,4 +133,3 @@ async def ai_chat(req: ChatRequest, authorization: str | None = Header(None)):
|
|
| 133 |
# ==============================================================
|
| 134 |
# END OF FILE
|
| 135 |
# ==============================================================
|
| 136 |
-
|
|
|
|
| 1 |
# ==============================================================
|
| 2 |
+
# Tech Disciples AI Backend — Free-Response Edition (Flan-T5 Fixed)
|
| 3 |
# ==============================================================
|
| 4 |
|
| 5 |
import os
|
|
|
|
| 8 |
from fastapi import FastAPI, Header, HTTPException
|
| 9 |
from fastapi.responses import JSONResponse
|
| 10 |
from pydantic import BaseModel
|
| 11 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
| 12 |
|
| 13 |
# --------------------------------------------------------------
|
| 14 |
# Logging Setup
|
|
|
|
| 57 |
query: str
|
| 58 |
|
| 59 |
# --------------------------------------------------------------
|
| 60 |
+
# System Prompt
|
| 61 |
# --------------------------------------------------------------
|
| 62 |
SYSTEM_TEMPLATE = """You are Tech Disciples AI — a spiritually aware, intelligent, and kind conversational assistant.
|
| 63 |
You respond clearly and truthfully, offering thoughtful, biblically grounded, and intelligent answers.
|
|
|
|
| 77 |
try:
|
| 78 |
logger.info(f"🚀 Loading model: {MODEL_NAME}")
|
| 79 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN if HF_TOKEN else None)
|
| 80 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, token=HF_TOKEN if HF_TOKEN else None)
|
| 81 |
|
| 82 |
chat_pipe = pipeline(
|
| 83 |
"text2text-generation",
|
|
|
|
| 103 |
result = pipe(
|
| 104 |
prompt,
|
| 105 |
do_sample=True,
|
| 106 |
+
temperature=0.3,
|
| 107 |
top_p=0.9,
|
| 108 |
repetition_penalty=1.05
|
| 109 |
)
|
|
|
|
| 133 |
# ==============================================================
|
| 134 |
# END OF FILE
|
| 135 |
# ==============================================================
|
|
|