schat / engine-ddo /main.py
VeuReu's picture
Upload 12 files
ba54b37 verified
import os
from fastapi import FastAPI, UploadFile, File, Depends, HTTPException, Form
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm import Session
from db import Base, engine, get_db
from models import Product, CustomerProfile
from schemas import (
ProductOut, CustomerOut, CustomerUpdate,
ChatRequest, ChatResponse, ConversationOut
)
from services import extract_and_upsert_products_from_llm, ensure_default_customers, get_or_create_conversation, add_message, get_history
from typing import List
import tempfile
app = FastAPI(title="engine-ddo", openapi_url="/openapi.json")
# CORS for Streamlit UI Space
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize DB
Base.metadata.create_all(bind=engine)
@app.get("/health")
def health():
return {"status": "ok"}
# -------- PRODUCTS --------
@app.post("/products/ingest", response_model=List[ProductOut])
async def ingest_products(public_offering: UploadFile = File(...), private_notes: UploadFile = File(...), db: Session = Depends(get_db)):
# Save temp files to pass paths to the service
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as f1:
f1.write(await public_offering.read())
public_path = f1.name
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as f2:
f2.write(await private_notes.read())
notes_path = f2.name
try:
# Call the new service to process PDFs with an LLM
extract_and_upsert_products_from_llm(db, public_path, notes_path)
finally:
# Clean up the temporary files
os.remove(public_path)
os.remove(notes_path)
# Return all products from the database
rows = db.query(Product).order_by(Product.name.asc()).all()
return rows
@app.get("/products/list", response_model=List[ProductOut])
def list_products(db: Session = Depends(get_db)):
rows = db.query(Product).order_by(Product.name.asc()).all()
return rows
# -------- CUSTOMERS --------
@app.get("/customers/list", response_model=List[CustomerOut])
def list_customers(db: Session = Depends(get_db)):
ensure_default_customers(db)
rows = db.query(CustomerProfile).order_by(CustomerProfile.name.asc()).all()
return rows
@app.post("/customers/update", response_model=CustomerOut)
def update_customer(payload: CustomerUpdate, db: Session = Depends(get_db)):
row = db.query(CustomerProfile).filter_by(name=payload.name).first()
if not row:
row = CustomerProfile(name=payload.name)
db.add(row)
if payload.attributes is not None:
row.attributes = payload.attributes
if payload.wcltv is not None:
row.wcltv = payload.wcltv
if payload.n is not None:
row.n = payload.n
db.commit()
db.refresh(row)
return row
# -------- INTERACTIONS (chat) --------
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
async def llm_reply(system_prompt: str, history: list, user_text: str) -> str:
"""Return a reply from an external LLM if OPENAI_API_KEY set, else a rule-based stub."""
if OPENAI_API_KEY:
try:
from openai import OpenAI
client = OpenAI(api_key=OPENAI_API_KEY)
messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": user_text}]
resp = client.chat.completions.create(model=MODEL, messages=messages)
return resp.choices[0].message.content.strip()
except Exception as e:
return f"[LLM error fallback] I couldn't reach the model ({e}). Let's continue anyway."
# Fallback deterministic reply for demo
return "Thanks for the details! Could you share your main need, budget, and timeline? I can match a product for you."
@app.post("/interactions/chat", response_model=ChatResponse)
async def chat(req: ChatRequest, db: Session = Depends(get_db)):
profile = req.profile_name or "random"
convo = get_or_create_conversation(db, profile)
add_message(db, convo.id, sender="customer", text=req.user_text)
# Build history for LLM
hist = []
for turn in get_history(db, convo.id):
role = "user" if turn["sender"] == "customer" else "assistant"
hist.append({"role": role, "content": turn["text"]})
system_prompt = (
"You are a helpful sales assistant. Keep answers short, ask clarifying questions, and reference products generically."
)
reply = await llm_reply(system_prompt, hist, req.user_text)
add_message(db, convo.id, sender="agent", text=reply)
return {"reply": reply, "conversation_id": convo.id}
@app.get("/interactions/history", response_model=ConversationOut)
async def history(profile_name: str, db: Session = Depends(get_db)):
convo = get_or_create_conversation(db, profile_name)
hist = get_history(db, convo.id)
return {"id": convo.id, "profile_name": profile_name, "history": hist}