Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -1,15 +1,16 @@
|
|
| 1 |
from flask import Flask, request, jsonify
|
| 2 |
from flask_cors import CORS
|
| 3 |
import json
|
|
|
|
|
|
|
| 4 |
|
| 5 |
from firestore_client import get_firestore_client
|
| 6 |
from openai_client import ask_gpt
|
| 7 |
from prompt_instructions import build_system_message
|
| 8 |
-
from role_access import get_allowed_collections
|
| 9 |
from data_fetcher import fetch_data_from_firestore
|
| 10 |
from data_planner import determine_data_requirements # π§ Gemini planner
|
| 11 |
from resolver import resolve_user_context
|
| 12 |
-
from typing import Optional, Dict, Any
|
| 13 |
from schema_utils import has_field, resolve_field
|
| 14 |
|
| 15 |
app = Flask(__name__)
|
|
@@ -17,6 +18,28 @@ CORS(app)
|
|
| 17 |
|
| 18 |
db = get_firestore_client()
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
# π§ Normalize Gemini plan into proper Firestore fetch format
|
| 21 |
def normalize_plan(plan: dict, token_map: Optional[Dict[str, Any]] = None) -> dict:
|
| 22 |
token_map = token_map or {}
|
|
@@ -24,29 +47,34 @@ def normalize_plan(plan: dict, token_map: Optional[Dict[str, Any]] = None) -> di
|
|
| 24 |
planned_cols = plan.get("collections", []) or []
|
| 25 |
|
| 26 |
def canonical_value(key, val):
|
|
|
|
| 27 |
if isinstance(val, str) and val in token_map:
|
| 28 |
-
val = token_map[val]
|
|
|
|
| 29 |
if key == "status" and val == "running":
|
| 30 |
return "active"
|
| 31 |
return val
|
| 32 |
|
| 33 |
collections_out = []
|
| 34 |
for col in planned_cols:
|
|
|
|
| 35 |
name = col["name"] if isinstance(col, dict) else col
|
| 36 |
col_filters = []
|
| 37 |
for k, v in filters.items():
|
| 38 |
canon_key = resolve_field(name, k)
|
|
|
|
| 39 |
if has_field(name, canon_key):
|
| 40 |
col_filters.append({
|
| 41 |
"field": canon_key,
|
| 42 |
"op": "==",
|
| 43 |
-
"value": canonical_value(k, v)
|
| 44 |
})
|
| 45 |
# else: skip invalid field for this collection
|
| 46 |
collections_out.append({"name": name, "filters": col_filters, "limit": 50})
|
| 47 |
|
| 48 |
return {"collections": collections_out}
|
| 49 |
|
|
|
|
| 50 |
|
| 51 |
@app.route('/chat', methods=['POST'])
|
| 52 |
def chat():
|
|
@@ -59,7 +87,7 @@ def chat():
|
|
| 59 |
if not role or not user_input or not company_code or not user_id:
|
| 60 |
return jsonify({"error": "Missing role, message, companyCode, or userId"}), 400
|
| 61 |
|
| 62 |
-
# π Resolve current user's email + participantId
|
| 63 |
ctx = resolve_user_context(user_id, company_code)
|
| 64 |
token_map = {
|
| 65 |
"{{participantId}}": ctx.get("participantId"),
|
|
@@ -76,22 +104,26 @@ def chat():
|
|
| 76 |
if "error" in planning_result:
|
| 77 |
return jsonify({"reply": f"β οΈ Planning error: {planning_result['error']}"})
|
| 78 |
|
| 79 |
-
# π οΈ Normalize & replace tokens
|
| 80 |
normalized_plan = normalize_plan(planning_result, token_map)
|
| 81 |
|
| 82 |
# π₯ Fetch
|
| 83 |
firestore_data = fetch_data_from_firestore(normalized_plan)
|
| 84 |
|
| 85 |
-
# π§©
|
| 86 |
system_msg = build_system_message(company_code)
|
|
|
|
|
|
|
|
|
|
| 87 |
data_msg = {
|
| 88 |
"role": "system",
|
| 89 |
"content": (
|
| 90 |
-
|
| 91 |
-
|
| 92 |
)
|
| 93 |
}
|
| 94 |
user_msg = {"role": "user", "content": user_input}
|
|
|
|
| 95 |
final_response = ask_gpt([system_msg, data_msg, user_msg])
|
| 96 |
return jsonify({"reply": final_response})
|
| 97 |
|
|
|
|
| 1 |
from flask import Flask, request, jsonify
|
| 2 |
from flask_cors import CORS
|
| 3 |
import json
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from typing import Optional, Dict, Any
|
| 6 |
|
| 7 |
from firestore_client import get_firestore_client
|
| 8 |
from openai_client import ask_gpt
|
| 9 |
from prompt_instructions import build_system_message
|
| 10 |
+
from role_access import get_allowed_collections # (currently unused but kept)
|
| 11 |
from data_fetcher import fetch_data_from_firestore
|
| 12 |
from data_planner import determine_data_requirements # π§ Gemini planner
|
| 13 |
from resolver import resolve_user_context
|
|
|
|
| 14 |
from schema_utils import has_field, resolve_field
|
| 15 |
|
| 16 |
app = Flask(__name__)
|
|
|
|
| 18 |
|
| 19 |
db = get_firestore_client()
|
| 20 |
|
| 21 |
+
# -- helpers --------------------------------------------------------------
|
| 22 |
+
|
| 23 |
+
def to_jsonable(obj):
|
| 24 |
+
"""Recursively convert Firestore types (e.g., DatetimeWithNanoseconds) to JSON-safe values."""
|
| 25 |
+
# Firestore timestamp types come through as DatetimeWithNanoseconds (duck-type datetime)
|
| 26 |
+
if isinstance(obj, datetime):
|
| 27 |
+
return obj.isoformat()
|
| 28 |
+
|
| 29 |
+
# Some Firestore SDKs expose Timestamp-like objects without subclassing datetime.
|
| 30 |
+
# Fallback: detect presence of isoformat()
|
| 31 |
+
if hasattr(obj, "isoformat") and callable(getattr(obj, "isoformat")):
|
| 32 |
+
try:
|
| 33 |
+
return obj.isoformat()
|
| 34 |
+
except Exception:
|
| 35 |
+
pass
|
| 36 |
+
|
| 37 |
+
if isinstance(obj, dict):
|
| 38 |
+
return {k: to_jsonable(v) for k, v in obj.items()}
|
| 39 |
+
if isinstance(obj, list):
|
| 40 |
+
return [to_jsonable(v) for v in obj]
|
| 41 |
+
return obj
|
| 42 |
+
|
| 43 |
# π§ Normalize Gemini plan into proper Firestore fetch format
|
| 44 |
def normalize_plan(plan: dict, token_map: Optional[Dict[str, Any]] = None) -> dict:
|
| 45 |
token_map = token_map or {}
|
|
|
|
| 47 |
planned_cols = plan.get("collections", []) or []
|
| 48 |
|
| 49 |
def canonical_value(key, val):
|
| 50 |
+
# replace tokens like {{participantId}}
|
| 51 |
if isinstance(val, str) and val in token_map:
|
| 52 |
+
val = token_map[val]
|
| 53 |
+
# special-case status normalization
|
| 54 |
if key == "status" and val == "running":
|
| 55 |
return "active"
|
| 56 |
return val
|
| 57 |
|
| 58 |
collections_out = []
|
| 59 |
for col in planned_cols:
|
| 60 |
+
# Allow both strings ("participants") and objects ({"name":"participants","fields":[...]}).
|
| 61 |
name = col["name"] if isinstance(col, dict) else col
|
| 62 |
col_filters = []
|
| 63 |
for k, v in filters.items():
|
| 64 |
canon_key = resolve_field(name, k)
|
| 65 |
+
# only add filters that exist for this collection (schema-aware)
|
| 66 |
if has_field(name, canon_key):
|
| 67 |
col_filters.append({
|
| 68 |
"field": canon_key,
|
| 69 |
"op": "==",
|
| 70 |
+
"value": canonical_value(k, v),
|
| 71 |
})
|
| 72 |
# else: skip invalid field for this collection
|
| 73 |
collections_out.append({"name": name, "filters": col_filters, "limit": 50})
|
| 74 |
|
| 75 |
return {"collections": collections_out}
|
| 76 |
|
| 77 |
+
# -- route ---------------------------------------------------------------
|
| 78 |
|
| 79 |
@app.route('/chat', methods=['POST'])
|
| 80 |
def chat():
|
|
|
|
| 87 |
if not role or not user_input or not company_code or not user_id:
|
| 88 |
return jsonify({"error": "Missing role, message, companyCode, or userId"}), 400
|
| 89 |
|
| 90 |
+
# π Resolve current user's email + participantId (participants has no companyCode; resolve via applications)
|
| 91 |
ctx = resolve_user_context(user_id, company_code)
|
| 92 |
token_map = {
|
| 93 |
"{{participantId}}": ctx.get("participantId"),
|
|
|
|
| 104 |
if "error" in planning_result:
|
| 105 |
return jsonify({"reply": f"β οΈ Planning error: {planning_result['error']}"})
|
| 106 |
|
| 107 |
+
# π οΈ Normalize & replace tokens (schema-aware)
|
| 108 |
normalized_plan = normalize_plan(planning_result, token_map)
|
| 109 |
|
| 110 |
# π₯ Fetch
|
| 111 |
firestore_data = fetch_data_from_firestore(normalized_plan)
|
| 112 |
|
| 113 |
+
# π§© Build messages β convert Firestore payload to JSON-safe types
|
| 114 |
system_msg = build_system_message(company_code)
|
| 115 |
+
safe_ctx = to_jsonable(ctx)
|
| 116 |
+
safe_data = to_jsonable(firestore_data)
|
| 117 |
+
|
| 118 |
data_msg = {
|
| 119 |
"role": "system",
|
| 120 |
"content": (
|
| 121 |
+
f"CurrentUserContext: {json.dumps(safe_ctx)}\n"
|
| 122 |
+
f"Here is the data from Firestore:\n{json.dumps(safe_data)}"
|
| 123 |
)
|
| 124 |
}
|
| 125 |
user_msg = {"role": "user", "content": user_input}
|
| 126 |
+
|
| 127 |
final_response = ask_gpt([system_msg, data_msg, user_msg])
|
| 128 |
return jsonify({"reply": final_response})
|
| 129 |
|