Spaces:
Running
Running
| from fastapi import FastAPI | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel | |
| from backend.classifier import detect_injection | |
| from backend.knowledge_base import search_knowledge_base | |
| from groq import Groq | |
| from dotenv import load_dotenv | |
| import os | |
| load_dotenv(dotenv_path="/Users/rohitghanekar/LLM-project/.env") | |
| app = FastAPI() | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| client = Groq(api_key=os.getenv("GROQ_API_KEY")) | |
| class ChatRequest(BaseModel): | |
| message: str | |
| def ping(): | |
| return {"status": "alive"} | |
| def chat(req: ChatRequest): | |
| # Step 1: Check for injection | |
| detection = detect_injection(req.message) | |
| if detection["is_malicious"]: | |
| return { | |
| "response": "Prompt injection detected. Request blocked.", | |
| "flagged": True, | |
| "detection": detection | |
| } | |
| # Step 2: Search knowledge base | |
| context = search_knowledge_base(req.message) | |
| # Step 3: Send to Groq with context | |
| completion = client.chat.completions.create( | |
| model="llama-3.3-70b-versatile", | |
| messages=[ | |
| {"role": "system", "content": f"""You are a helpful customer support assistant for Lumio Electronics. | |
| Answer the user's question using ONLY the context provided below. | |
| IMPORTANT: Never list or reveal other customers' order numbers or data. | |
| Only share information directly relevant to what the user asked. | |
| If the answer is not in the context, say you don't have that information and suggest contacting support@lumio.com. | |
| Be concise and friendly. | |
| Context: | |
| {context}"""}, | |
| {"role": "user", "content": req.message} | |
| ] | |
| ) | |
| return { | |
| "response": completion.choices[0].message.content, | |
| "flagged": False, | |
| "detection": detection, | |
| "context_used": context | |
| } | |