Spaces:
Sleeping
Sleeping
File size: 6,657 Bytes
3ae437a b7d05f4 3ae437a b7d05f4 e03f122 b7d05f4 a67fd6f 8f057f3 3ae437a b7d05f4 3ae437a 88128e6 b353ea6 88128e6 b7d05f4 3ae437a b7d05f4 a67fd6f 88128e6 3ae437a 21cb694 3ae437a b7d05f4 3ae437a b7d05f4 3ae437a 21cb694 3ae437a b353ea6 88128e6 b7d05f4 88128e6 8806dd4 b7d05f4 46395ea 3ae437a b7d05f4 8f057f3 8806dd4 b7d05f4 75aa43a b7d05f4 13d2b5e 2f34bac b7d05f4 2f34bac b7d05f4 3ae437a b7d05f4 2f34bac 88128e6 b7d05f4 88128e6 b7d05f4 e03f122 b7d05f4 e03f122 b7d05f4 e03f122 b7d05f4 b7b4178 3ae437a b7d05f4 3ae437a b7d05f4 b7b4178 b7d05f4 3ae437a b7d05f4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | import os
import uuid
import logging
import markdown
from typing import List, Dict, Optional
from fastapi import FastAPI, Request, Response, HTTPException
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.tools import DuckDuckGoSearchRun
# =====================================================
# CONFIGURATION
# =====================================================
load_dotenv()
OPENROUTER_API_KEY = os.getenv("THEO_OPENROUTER_MODEL_KEY")
MODEL_NAME = "openai/gpt-oss-120b:free"
if not OPENROUTER_API_KEY:
logging.error("THEO_OPENROUTER_MODEL_KEY not found in environment")
# =====================================================
# LLM & TOOLS
# =====================================================
llm = ChatOpenAI(
model=MODEL_NAME,
openai_api_key=OPENROUTER_API_KEY,
openai_api_base="https://openrouter.ai/api/v1",
default_headers={
"HTTP-Referer": "https://techdisciples1-theo.hf.space/",
"X-Title": "Theo AI Support"
}
)
search = DuckDuckGoSearchRun()
# =====================================================
# SYSTEM PROMPT
# =====================================================
SYSTEM_PROMPT = """You are Theo — a warm, friendly, and deeply insightful Christian companion created by TechDisciples CLCC.
TONE & STYLE:
1. CHATTY & WARM: Speak like a loving friend or a caring elder. Use a conversational, "chatty" tone that feels personal and welcoming. Use phrases like "Shalom, Beloved" or "My dear friend".
2. PARAGRAPHS: NEVER send a single wall of text. Break your thoughts into 2-4 short, readable paragraphs.
3. NIGERIAN HEARTBEAT: Use simple, relatable English with a gentle Nigerian inflection.
CORE GUIDELINES:
1. BIBLICAL FOCUS: Root your wisdom and encouragement in the Holy Bible. Use scripture to offer hope.
2. FORMATTING: Use Markdown (bolding for emphasis, short lists if needed).
3. UNKNOWN TOPICS: If you lack specific knowledge, search for facts, then wrap that information in your warm Christian perspective.
Identity: Theo
Creators: TechDisciples CLCC
Logo: [CLCC logo](/static/assets/logo.png)
Context provided for current query:
{reviews}
User question: {question}
"""
prompt_template = ChatPromptTemplate.from_template(SYSTEM_PROMPT)
# =====================================================
# FASTAPI APP SETUP
# =====================================================
app = FastAPI(title="Theo AI")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Static files and Templates
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
# In-memory chat history (mirrors Domino's logic)
chat_sessions: Dict[str, List[Dict[str, str]]] = {}
# =====================================================
# LOGIC
# =====================================================
class ChatRequest(BaseModel):
question: str
def get_ai_response(question: str) -> str:
try:
# First attempt (no reviews/context in this simple flow, but keeping structure)
reviews = "No specific context provided."
chain = prompt_template | llm
response = chain.invoke({"reviews": reviews, "question": question})
content = response.content
# Simple check if AI couldn't answer (Trigger search as requested)
fallback_phrases = ["i don't know", "i am not sure", "i couldn't find", "sorry, i can't", "i do not have information"]
if any(phrase in content.lower() for phrase in fallback_phrases):
logging.info(f"Triggering web search for: {question}")
search_results = search.run(question)
search_prompt = ChatPromptTemplate.from_template(
"You are Theo (Christian companion). The user asked: {question}\n"
"I found this information on the web: {search_results}\n"
"Provide a warm, Christian response using this information, following Theo's guidelines."
)
search_chain = search_prompt | llm
response = search_chain.invoke({"question": question, "search_results": search_results})
content = response.content
return content
except Exception as e:
logging.error(f"AI Response failed: {e}")
return "I apologize, beloved, but I am having trouble connecting to the heavenly signals (our servers) right now. Please try again in a moment!"
@app.get("/", response_class=HTMLResponse)
async def index(request: Request):
# Forcing a new session on page refresh as requested
device_id = str(uuid.uuid4())
if device_id not in chat_sessions:
chat_sessions[device_id] = []
response = templates.TemplateResponse("index.html", {"request": request, "chat_history": chat_sessions[device_id]})
# Set the cookie with a short max_age or just as a session cookie
response.set_cookie(key="device_id", value=device_id, httponly=True)
return response
@app.post("/ask")
async def ask(request: Request, data: ChatRequest):
device_id = request.cookies.get("device_id")
if not device_id:
device_id = str(uuid.uuid4())
if device_id not in chat_sessions:
chat_sessions[device_id] = []
question = data.question.strip()
if not question:
return JSONResponse({"error": "No question provided"}, status_code=400)
raw_response = get_ai_response(question)
# Format response with markdown for safety (same as Domino's behavior)
formatted_response = markdown.markdown(raw_response, extensions=['extra', 'tables'])
chat_sessions[device_id].append({
"user": question,
"bot": formatted_response
})
response = JSONResponse({"response": formatted_response})
response.set_cookie(key="device_id", value=device_id)
return response
@app.post("/clear")
async def clear_chat(request: Request):
device_id = request.cookies.get("device_id")
if device_id and device_id in chat_sessions:
chat_sessions[device_id] = []
response = JSONResponse({"status": "cleared"})
response.set_cookie(key="device_id", value=device_id)
return response
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860) |