# api/app.py from fastapi import FastAPI, Request from pydantic import BaseModel from typing import List, Dict, Any import uvicorn app = FastAPI() class Message(BaseModel): role: str content: str class ChatRequest(BaseModel): conversation: List[Message] class ChatResponse(BaseModel): reply: str meta: Dict[str, Any] = {} def generate_reply(user_text: str, conversation: List[Dict[str, str]]) -> str: """ Dummy reply generator — Echos back the user's message with a prefix. Replace this with your model inference code later. """ return f"Echo: {user_text}" @app.post("/api/chat") async def chat_endpoint(req: ChatRequest): # The ChatUI frontend typically sends conversation history. # We take the last user message and return a reply. if not req.conversation: return {"reply": "Hello! Send a message."} last = req.conversation[-1] user_text = last.content if last.role == "user" else last.content reply_text = generate_reply(user_text, [m.dict() for m in req.conversation]) return {"reply": reply_text, "meta": {"source": "dummy-backend"}} # When running locally for testing if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)