LLM_Model / app.py
Shreekant Kalwar (Nokia)
new server
cd55ee8
raw
history blame
1.76 kB
# app.py
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from bot_instance import gemini_bot, llama_bot # singleton ErrorBot instance
from typing import List, Optional
app = FastAPI(title="ErrorBot API")
# ✅ Allow all origins (adjust in production)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# ---------------- Request Models ----------------
class MessageItem(BaseModel):
role: str # "user" or "bot"
content: str
class ChatRequest(BaseModel):
message: str
history: Optional[List[MessageItem]] = [] # optional conversation history
# ---------------- Endpoints ----------------
@app.get("/")
def root():
return {"status": "ok"}
# @app.post("/chat")
# def chat(request: ChatRequest):
# """
# Main chat endpoint:
# - Accepts a message and optional conversation history
# - Uses ErrorBot with RAG + LLM
# """
# history_list = [
# {"role": msg.role, "content": msg.content} for msg in request.history
# ]
# # Ask bot with history
# answer = bot.ask(request.message, history=history_list)
# return {"reply": answer}
@app.post("/gemini/chat")
def gemini_chat(request: ChatRequest):
history_list = [{"role": msg.role, "content": msg.content} for msg in request.history]
answer = gemini_bot.ask(request.message, history=history_list)
return {"reply": answer}
@app.post("/llama/chat")
def llama_chat(request: ChatRequest):
history_list = [{"role": msg.role, "content": msg.content} for msg in request.history]
answer = llama_bot.ask(request.message, history=history_list)
return {"reply": answer}