|
|
|
|
|
from fastapi import FastAPI |
|
|
from pydantic import BaseModel |
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
from bot_instance import gemini_bot, llama_bot |
|
|
from typing import List, Optional,Any |
|
|
|
|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
from util import ErrorBot |
|
|
|
|
|
|
|
|
app = FastAPI(title="ErrorBot API") |
|
|
|
|
|
|
|
|
app.add_middleware( |
|
|
CORSMiddleware, |
|
|
allow_origins=["*"], |
|
|
allow_credentials=True, |
|
|
allow_methods=["*"], |
|
|
allow_headers=["*"], |
|
|
) |
|
|
|
|
|
|
|
|
class MessageItem(BaseModel): |
|
|
role: str |
|
|
content: str |
|
|
|
|
|
class ChatRequest(BaseModel): |
|
|
message: str |
|
|
history: Optional[List[MessageItem]] = [] |
|
|
lastContext: List[Any] = None |
|
|
|
|
|
|
|
|
@app.get("/") |
|
|
def root(): |
|
|
return {"status": "ok"} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY") |
|
|
|
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
|
|
|
|
EMBEDDING_MODEL = "BAAI/bge-base-en-v1.5" |
|
|
|
|
|
@app.post("/gemini/chat") |
|
|
def gemini_chat(request: ChatRequest): |
|
|
history_list = [{"role": msg.role, "content": msg.content} for msg in request.history] |
|
|
gemini_bot = ErrorBot( |
|
|
embedding_model_name=EMBEDDING_MODEL, |
|
|
llm_model_name="gemini-2.5-flash", |
|
|
google_api_key=GOOGLE_API_KEY, |
|
|
llm_provider="gemini", |
|
|
last_context = request.lastContext |
|
|
) |
|
|
print("In App.py") |
|
|
print(request.lastContext) |
|
|
answer, last_context = gemini_bot.ask(request.message, history=history_list) |
|
|
print(answer) |
|
|
print(last_context) |
|
|
return {"reply": answer, "last_context": last_context} |
|
|
|
|
|
@app.post("/llama/chat") |
|
|
def llama_chat(request: ChatRequest): |
|
|
history_list = [{"role": msg.role, "content": msg.content} for msg in request.history] |
|
|
llama_bot = ErrorBot( |
|
|
embedding_model_name=EMBEDDING_MODEL, |
|
|
llm_model_name="llama-3.3-70b-versatile", |
|
|
groq_api_key=GROQ_API_KEY, |
|
|
llm_provider="groq", |
|
|
last_context = request.lastContext |
|
|
|
|
|
) |
|
|
answer, last_context = llama_bot.ask(request.message, history=history_list) |
|
|
print(answer) |
|
|
print(last_context) |
|
|
return {"reply": answer, "last_context": last_context} |