File size: 1,173 Bytes
41ce4b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# api/app.py
from fastapi import FastAPI, Request
from pydantic import BaseModel
from typing import List, Dict, Any
import uvicorn


app = FastAPI()


class Message(BaseModel):
role: str
content: str


class ChatRequest(BaseModel):
conversation: List[Message]


class ChatResponse(BaseModel):
reply: str
meta: Dict[str, Any] = {}




def generate_reply(user_text: str, conversation: List[Dict[str, str]]) -> str:
"""
Dummy reply generator — Echos back the user's message with a prefix.
Replace this with your model inference code later.
"""
return f"Echo: {user_text}"




@app.post("/api/chat")
async def chat_endpoint(req: ChatRequest):
# The ChatUI frontend typically sends conversation history.
# We take the last user message and return a reply.
if not req.conversation:
return {"reply": "Hello! Send a message."}


last = req.conversation[-1]
user_text = last.content if last.role == "user" else last.content


reply_text = generate_reply(user_text, [m.dict() for m in req.conversation])


return {"reply": reply_text, "meta": {"source": "dummy-backend"}}




# When running locally for testing
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)