Spaces:
Sleeping
Sleeping
Add FAISS index using Git LFS
Browse files- .gitattributes +1 -0
- README_MODULES.md +16 -0
- app.py +141 -0
- character_functions.py +507 -0
- config.py +48 -0
- database.py +151 -0
- faiss_Psychology2e_WEB.index +3 -0
- faiss_index_file.index +3 -0
- faiss_service.py +168 -0
- llm_service.py +948 -0
- matching_functions.py +1112 -0
- models.py +125 -0
- psychology2e.index +0 -0
- routes/__init__.py +8 -0
- routes/auth_routes.py +173 -0
- routes/expectation_routes.py +415 -0
- routes/llm_routes.py +181 -0
- routes/matching_routes.py +325 -0
- routes/profile_routes.py +441 -0
- server.py +0 -2039
- surrounded_by_idiots.index +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.index filter=lfs diff=lfs merge=lfs -text
|
README_MODULES.md
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Refactor notes
|
| 2 |
+
|
| 3 |
+
- Created `backend/` package with wrapper modules that re-export names from
|
| 4 |
+
`server.py` to preserve runtime behavior while splitting the project into
|
| 5 |
+
modules.
|
| 6 |
+
- The wrappers are temporary and intended to make incremental movement of
|
| 7 |
+
functions/classes from `server.py` into dedicated files safer.
|
| 8 |
+
|
| 9 |
+
Next steps
|
| 10 |
+
|
| 11 |
+
- Move related functions and classes from `server.py` into the appropriate
|
| 12 |
+
files in `backend/` (e.g. models into `backend/db.py`, routes into
|
| 13 |
+
`backend/routes.py`, matching logic into `backend/services.py`).
|
| 14 |
+
- Update imports throughout the codebase to reference `backend.*` modules.
|
| 15 |
+
- Remove re-export wrappers and keep the new modules as the single source of
|
| 16 |
+
truth.
|
app.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app.py (updated with debug routes)
|
| 2 |
+
import os
|
| 3 |
+
import datetime
|
| 4 |
+
from flask import Flask, jsonify, request
|
| 5 |
+
from flask_cors import CORS
|
| 6 |
+
|
| 7 |
+
from config import SQL_DRIVER, SQL_SERVER, SQL_DB, SQL_TRUSTED, SQL_USER, SQL_PASSWORD, SQL_PORT, SQL_ENCRYPT, SQL_TRUSTCERT, IS_HUGGING_FACE
|
| 8 |
+
from models import db
|
| 9 |
+
|
| 10 |
+
def create_app():
|
| 11 |
+
app = Flask(__name__)
|
| 12 |
+
CORS(app, resources={r"/*": {"origins": "*"}})
|
| 13 |
+
|
| 14 |
+
# Debug: Print all imports
|
| 15 |
+
print(f"🚀 DEBUG: Starting app creation...")
|
| 16 |
+
print(f"🚀 DEBUG: SQL_SERVER = {SQL_SERVER}")
|
| 17 |
+
print(f"🚀 DEBUG: SQL_DB = {SQL_DB}")
|
| 18 |
+
|
| 19 |
+
# Add request logging middleware
|
| 20 |
+
@app.before_request
|
| 21 |
+
def log_request_info():
|
| 22 |
+
print(f"\n{'='*60}")
|
| 23 |
+
print(f"📥 INCOMING REQUEST:")
|
| 24 |
+
print(f" Time: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
| 25 |
+
print(f" Method: {request.method}")
|
| 26 |
+
print(f" Path: {request.path}")
|
| 27 |
+
print(f" URL: {request.url}")
|
| 28 |
+
print(f" Remote Address: {request.remote_addr}")
|
| 29 |
+
if request.user_agent:
|
| 30 |
+
print(f" User Agent: {request.user_agent.string[:50]}...")
|
| 31 |
+
print(f" Referrer: {request.referrer}")
|
| 32 |
+
print(f"{'='*60}")
|
| 33 |
+
|
| 34 |
+
# Initialize database
|
| 35 |
+
try:
|
| 36 |
+
from database import init_database
|
| 37 |
+
init_database(app)
|
| 38 |
+
print("✅ DEBUG: Database initialized successfully")
|
| 39 |
+
except Exception as e:
|
| 40 |
+
print(f"❌ DEBUG: Failed to initialize database: {e}")
|
| 41 |
+
|
| 42 |
+
# Try to import and register blueprints
|
| 43 |
+
try:
|
| 44 |
+
import routes
|
| 45 |
+
print("✅ DEBUG: Imported routes module")
|
| 46 |
+
print(f"✅ DEBUG: Available blueprints: {[bp.name for bp in [routes.auth_bp, routes.profiles_bp, routes.expectations_bp, routes.matching_bp, routes.llm_bp]]}")
|
| 47 |
+
|
| 48 |
+
# Register blueprints with url_prefix
|
| 49 |
+
app.register_blueprint(routes.auth_bp, url_prefix='/api')
|
| 50 |
+
app.register_blueprint(routes.profiles_bp)
|
| 51 |
+
app.register_blueprint(routes.expectations_bp)
|
| 52 |
+
app.register_blueprint(routes.matching_bp, url_prefix='/api')
|
| 53 |
+
app.register_blueprint(routes.llm_bp)
|
| 54 |
+
print("✅ DEBUG: All blueprints registered")
|
| 55 |
+
|
| 56 |
+
except ImportError as e:
|
| 57 |
+
print(f"❌ DEBUG: Failed to import routes: {e}")
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print(f"❌ DEBUG: Failed to register blueprints: {e}")
|
| 60 |
+
import traceback
|
| 61 |
+
traceback.print_exc()
|
| 62 |
+
|
| 63 |
+
# Add debug endpoint to list all routes
|
| 64 |
+
@app.route('/debug/routes')
|
| 65 |
+
def debug_routes():
|
| 66 |
+
routes_list = []
|
| 67 |
+
for rule in app.url_map.iter_rules():
|
| 68 |
+
routes_list.append({
|
| 69 |
+
'endpoint': rule.endpoint,
|
| 70 |
+
'methods': list(rule.methods),
|
| 71 |
+
'rule': str(rule)
|
| 72 |
+
})
|
| 73 |
+
return jsonify({'routes': routes_list})
|
| 74 |
+
|
| 75 |
+
# Health endpoint
|
| 76 |
+
@app.get("/health")
|
| 77 |
+
def health():
|
| 78 |
+
try:
|
| 79 |
+
from faiss_service import knowledge, TEXT_CHUNKS
|
| 80 |
+
from llm_service import CHAIN_BATCH
|
| 81 |
+
faiss_loaded = len(TEXT_CHUNKS) > 0 if TEXT_CHUNKS else False
|
| 82 |
+
knowledge_loaded = knowledge is not None and hasattr(knowledge, 'indices') and len(knowledge.indices) > 0
|
| 83 |
+
except ImportError:
|
| 84 |
+
faiss_loaded = False
|
| 85 |
+
knowledge_loaded = False
|
| 86 |
+
CHAIN_BATCH = None
|
| 87 |
+
|
| 88 |
+
return {
|
| 89 |
+
"status": "ok",
|
| 90 |
+
"llm": ("openai" if CHAIN_BATCH is not None else "offline-fallback"),
|
| 91 |
+
"has_openai_key": bool(os.getenv("OPENAI_API_KEY")),
|
| 92 |
+
"db": {"server": SQL_SERVER, "database": SQL_DB},
|
| 93 |
+
"faiss_loaded": faiss_loaded,
|
| 94 |
+
"faiss_chunks": len(TEXT_CHUNKS) if 'TEXT_CHUNKS' in locals() else 0,
|
| 95 |
+
"knowledge_base_loaded": knowledge_loaded,
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
# Home endpoint
|
| 99 |
+
@app.get("/")
|
| 100 |
+
def home():
|
| 101 |
+
return {
|
| 102 |
+
"message": "Unified Py-Match Service (FAISS-enabled)",
|
| 103 |
+
"try": [
|
| 104 |
+
"POST /api/signup",
|
| 105 |
+
"POST /api/questions/select-role",
|
| 106 |
+
"GET /api/questions/<role>",
|
| 107 |
+
"POST /api/questions/submit-answers/<role>",
|
| 108 |
+
"POST /llm/start (body: { user_id, role, n_questions, batch_size })",
|
| 109 |
+
"POST /llm/next (body: { session_id, selected_color })",
|
| 110 |
+
"GET /api/match/<user_id> (query: ?role=<role>&limit=<num>)",
|
| 111 |
+
"GET /debug/routes (to see all registered routes)",
|
| 112 |
+
]
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
# Error handlers
|
| 116 |
+
@app.errorhandler(404)
|
| 117 |
+
def not_found(error):
|
| 118 |
+
return jsonify({"error": "Endpoint not found", "path": request.path}), 404
|
| 119 |
+
|
| 120 |
+
@app.errorhandler(405)
|
| 121 |
+
def method_not_allowed(error):
|
| 122 |
+
return jsonify({
|
| 123 |
+
"error": "Method not allowed",
|
| 124 |
+
"message": f"Method {request.method} not allowed for {request.path}",
|
| 125 |
+
"allowed_methods": error.valid_methods if hasattr(error, 'valid_methods') else []
|
| 126 |
+
}), 405
|
| 127 |
+
|
| 128 |
+
@app.errorhandler(500)
|
| 129 |
+
def internal_error(error):
|
| 130 |
+
return jsonify({"error": "Internal server error"}), 500
|
| 131 |
+
|
| 132 |
+
return app
|
| 133 |
+
|
| 134 |
+
if __name__ == "__main__":
|
| 135 |
+
app = create_app()
|
| 136 |
+
|
| 137 |
+
print(f"\n{'='*60}")
|
| 138 |
+
print("🚀 Flask server starting...")
|
| 139 |
+
print(f"{'='*60}")
|
| 140 |
+
|
| 141 |
+
app.run(host="0.0.0.0", port=5000, debug=True)
|
character_functions.py
ADDED
|
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# character_functions.py
|
| 2 |
+
import json
|
| 3 |
+
import numpy as np
|
| 4 |
+
from typing import Dict, List, Optional
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
from config import COLOR_KEYS
|
| 8 |
+
from models import LLMGeneratedQuestions, Users, Marriage
|
| 9 |
+
|
| 10 |
+
# Try importing LLM libraries
|
| 11 |
+
try:
|
| 12 |
+
from pydantic import BaseModel, Field
|
| 13 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 14 |
+
from langchain_core.output_parsers import PydanticOutputParser, StrOutputParser
|
| 15 |
+
from langchain_openai import ChatOpenAI
|
| 16 |
+
HAS_LLM_STACK = True
|
| 17 |
+
HAS_LLM = True
|
| 18 |
+
except Exception:
|
| 19 |
+
HAS_LLM_STACK = False
|
| 20 |
+
HAS_LLM = False
|
| 21 |
+
|
| 22 |
+
def cosine_sim(a: np.ndarray, b: np.ndarray) -> float:
|
| 23 |
+
na, nb = np.linalg.norm(a), np.linalg.norm(b)
|
| 24 |
+
if na == 0.0 or nb == 0.0: return 0.0
|
| 25 |
+
return float(np.dot(a, b) / (na * nb))
|
| 26 |
+
|
| 27 |
+
def calculate_character_similarity(b, g, y, r):
|
| 28 |
+
# 🚨 FIX: Convert all inputs to numbers
|
| 29 |
+
try:
|
| 30 |
+
b = float(b) if b is not None else 0.0
|
| 31 |
+
g = float(g) if g is not None else 0.0
|
| 32 |
+
y = float(y) if y is not None else 0.0
|
| 33 |
+
r = float(r) if r is not None else 0.0
|
| 34 |
+
except (ValueError, TypeError) as e:
|
| 35 |
+
print(f"❌ Error converting character scores to numbers: {e}")
|
| 36 |
+
b, g, y, r = 0.0, 0.0, 0.0, 0.0
|
| 37 |
+
|
| 38 |
+
total = b + g + y + r
|
| 39 |
+
|
| 40 |
+
if total <= 0:
|
| 41 |
+
return 0.0
|
| 42 |
+
|
| 43 |
+
# Normalize the values
|
| 44 |
+
b_norm = b / total
|
| 45 |
+
g_norm = g / total
|
| 46 |
+
y_norm = y / total
|
| 47 |
+
r_norm = r / total
|
| 48 |
+
|
| 49 |
+
# Ideal distribution (you can adjust these weights based on your preference)
|
| 50 |
+
ideal_b = 0.4 # 40% blue (stable/calm)
|
| 51 |
+
ideal_g = 0.3 # 30% green (growth-oriented)
|
| 52 |
+
ideal_y = 0.2 # 20% yellow (cautious)
|
| 53 |
+
ideal_r = 0.1 # 10% red (passionate)
|
| 54 |
+
|
| 55 |
+
# Calculate similarity using cosine similarity or simple difference
|
| 56 |
+
# Using simple weighted difference for now
|
| 57 |
+
similarity = 1.0 - (
|
| 58 |
+
abs(b_norm - ideal_b) * 0.25 +
|
| 59 |
+
abs(g_norm - ideal_g) * 0.25 +
|
| 60 |
+
abs(y_norm - ideal_y) * 0.25 +
|
| 61 |
+
abs(r_norm - ideal_r) * 0.25
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Ensure score is between 0 and 1
|
| 65 |
+
return max(0.0, min(1.0, similarity))
|
| 66 |
+
|
| 67 |
+
def get_user_background(user_id: int) -> Dict:
|
| 68 |
+
"""Get comprehensive user background for LLM analysis"""
|
| 69 |
+
background = {}
|
| 70 |
+
|
| 71 |
+
# Get basic user info
|
| 72 |
+
user = Users.query.filter_by(user_id=user_id).first()
|
| 73 |
+
if user:
|
| 74 |
+
background.update({
|
| 75 |
+
"name": user.name or "Unknown",
|
| 76 |
+
"email": user.email or "",
|
| 77 |
+
})
|
| 78 |
+
|
| 79 |
+
# Get marriage profile if exists
|
| 80 |
+
marriage_profile = Marriage.query.filter_by(user_id=user_id).first()
|
| 81 |
+
if marriage_profile:
|
| 82 |
+
background.update({
|
| 83 |
+
"current_location": marriage_profile.current_city or "",
|
| 84 |
+
"education": marriage_profile.education_level or "",
|
| 85 |
+
"employment": marriage_profile.employment_status or "",
|
| 86 |
+
"hobbies": marriage_profile.hobbies_interests or "",
|
| 87 |
+
"conflict_style": marriage_profile.conflict_approach or "",
|
| 88 |
+
"financial_style": marriage_profile.financial_style or "",
|
| 89 |
+
"family_type": marriage_profile.family_type or "",
|
| 90 |
+
})
|
| 91 |
+
|
| 92 |
+
return background
|
| 93 |
+
|
| 94 |
+
def generate_character_llm_explanation(u_vec, v_vec):
|
| 95 |
+
"""Character explanation using FAISS + LLM - NO FALLBACK"""
|
| 96 |
+
|
| 97 |
+
print("🟢 Starting LLM character explanation...")
|
| 98 |
+
|
| 99 |
+
if not HAS_LLM:
|
| 100 |
+
raise Exception("LLM service is currently unavailable. Please try again later.")
|
| 101 |
+
|
| 102 |
+
# Import inside function to avoid circular imports
|
| 103 |
+
try:
|
| 104 |
+
from faiss_service import get_faiss_context
|
| 105 |
+
context = get_faiss_context(3)
|
| 106 |
+
except ImportError:
|
| 107 |
+
context = ""
|
| 108 |
+
|
| 109 |
+
data = {
|
| 110 |
+
"User1": [float(u_vec[0]), float(u_vec[1]), float(u_vec[2]), float(u_vec[3])],
|
| 111 |
+
"User2": [float(v_vec[0]), float(v_vec[1]), float(v_vec[2]), float(v_vec[3])]
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
json_data = json.dumps(data, indent=2)
|
| 115 |
+
|
| 116 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 117 |
+
("system", """
|
| 118 |
+
You are a personality and relationship compatibility expert.
|
| 119 |
+
|
| 120 |
+
Generate CHARACTER compatibility in EXACTLY 3 groups with these EXACT section headers:
|
| 121 |
+
|
| 122 |
+
1. Character Strengths
|
| 123 |
+
2. Character Risks
|
| 124 |
+
3. Sacrifices Needed
|
| 125 |
+
|
| 126 |
+
CRITICAL RULES:
|
| 127 |
+
- Use ONLY these exact section headers: "Character Strengths", "Character Risks", "Sacrifices Needed"
|
| 128 |
+
- NO markdown formatting
|
| 129 |
+
- Each section should have 1-5 points based on actual needs
|
| 130 |
+
- Write only the points that are truly necessary
|
| 131 |
+
- If only one point is needed, write only one point
|
| 132 |
+
- If no points are needed in a section, write "None" for that section
|
| 133 |
+
- Maximum 5 points per section for very low compatibility cases
|
| 134 |
+
- Each point should be a complete sentence starting with a capital letter
|
| 135 |
+
- Separate sections with a blank line
|
| 136 |
+
- No color names, no trait labels, no percentages
|
| 137 |
+
- Use simple English that anyone can understand
|
| 138 |
+
- Don't use "User1" or "User2" - refer to them as "the two people" or "both persons"
|
| 139 |
+
- BE TRUTHFUL: Write only real strengths, risks, and sacrifices based on their actual compatibility
|
| 140 |
+
"""),
|
| 141 |
+
("human", """
|
| 142 |
+
### PERSONALITY DATA
|
| 143 |
+
{json_data}
|
| 144 |
+
|
| 145 |
+
### BOOK CONTEXT
|
| 146 |
+
{context}
|
| 147 |
+
|
| 148 |
+
Generate the character analysis in the exact format specified above.
|
| 149 |
+
Write only the points that are truly needed - no filler content.
|
| 150 |
+
Use simple language that everyone can understand.
|
| 151 |
+
""")
|
| 152 |
+
])
|
| 153 |
+
|
| 154 |
+
try:
|
| 155 |
+
llm = ChatOpenAI(
|
| 156 |
+
model="gpt-4o-mini",
|
| 157 |
+
temperature=0.65,
|
| 158 |
+
api_key=os.getenv("OPENAI_API_KEY"),
|
| 159 |
+
timeout=60,
|
| 160 |
+
max_retries=2,
|
| 161 |
+
request_timeout=60
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
chain = prompt | llm | StrOutputParser()
|
| 165 |
+
|
| 166 |
+
print("⏳ Generating AI-powered character analysis...")
|
| 167 |
+
res = chain.invoke({"json_data": json_data, "context": context})
|
| 168 |
+
|
| 169 |
+
print(f"🔍 DEBUG: Raw LLM response = {res}")
|
| 170 |
+
|
| 171 |
+
if not res or len(res.strip()) < 10:
|
| 172 |
+
raise Exception("AI analysis returned insufficient response")
|
| 173 |
+
|
| 174 |
+
# Process the response into lines
|
| 175 |
+
lines = []
|
| 176 |
+
for line in res.split('\n'):
|
| 177 |
+
line = line.strip()
|
| 178 |
+
if line and not line.startswith('###'): # Remove markdown headers
|
| 179 |
+
lines.append(line)
|
| 180 |
+
|
| 181 |
+
# Ensure we have all three sections
|
| 182 |
+
if len(lines) < 3: # At least headers for all three sections
|
| 183 |
+
print(f"⚠️ LLM returned insufficient lines: {len(lines)}")
|
| 184 |
+
# Fallback: use the original response but clean it up
|
| 185 |
+
lines = [line for line in res.split('\n') if line.strip() and not line.startswith('###')]
|
| 186 |
+
|
| 187 |
+
print(f"✅ AI character analysis completed with {len(lines)} lines")
|
| 188 |
+
return lines[:15] # Increased limit for flexible points
|
| 189 |
+
|
| 190 |
+
except Exception as e:
|
| 191 |
+
print(f"🔴 LLM character explanation failed: {e}")
|
| 192 |
+
raise Exception(f"AI analysis failed: {str(e)}. Please try again.")
|
| 193 |
+
|
| 194 |
+
def generate_character_fallback_explanation(u_vec, v_vec):
|
| 195 |
+
"""Generate structured fallback explanation when LLM is unavailable"""
|
| 196 |
+
|
| 197 |
+
# Calculate basic similarity
|
| 198 |
+
character_score = cosine_sim(u_vec, v_vec)
|
| 199 |
+
|
| 200 |
+
# Analyze dominant colors from vectors
|
| 201 |
+
colors = ['Blue', 'Green', 'Yellow', 'Red']
|
| 202 |
+
u_dominant_idx = np.argmax(u_vec)
|
| 203 |
+
v_dominant_idx = np.argmax(v_vec)
|
| 204 |
+
|
| 205 |
+
dominant_color_u = colors[u_dominant_idx]
|
| 206 |
+
dominant_color_v = colors[v_dominant_idx]
|
| 207 |
+
|
| 208 |
+
# Generate sections based on color combinations
|
| 209 |
+
strengths = generate_fallback_strengths(dominant_color_u, dominant_color_v)
|
| 210 |
+
risks = generate_fallback_risks(dominant_color_u, dominant_color_v)
|
| 211 |
+
sacrifices = generate_fallback_sacrifices(dominant_color_u, dominant_color_v)
|
| 212 |
+
|
| 213 |
+
# Build explanation in LLM-like format
|
| 214 |
+
explanation = []
|
| 215 |
+
explanation.append(f"Character Score: {round(character_score * 100, 1)}%")
|
| 216 |
+
explanation.append("")
|
| 217 |
+
explanation.append("Character Strengths")
|
| 218 |
+
explanation.extend([f"• {s}" for s in strengths])
|
| 219 |
+
explanation.append("")
|
| 220 |
+
explanation.append("Character Risks")
|
| 221 |
+
explanation.extend([f"• {r}" for r in risks])
|
| 222 |
+
explanation.append("")
|
| 223 |
+
explanation.append("Sacrifices Needed From Both Partners")
|
| 224 |
+
explanation.extend([f"• {s}" for s in sacrifices])
|
| 225 |
+
|
| 226 |
+
return explanation
|
| 227 |
+
|
| 228 |
+
def generate_fallback_strengths(color1, color2):
|
| 229 |
+
"""Generate strengths based on color combination"""
|
| 230 |
+
combinations = {
|
| 231 |
+
('Blue', 'Red'): [
|
| 232 |
+
"Analytical thinking complements decisive action",
|
| 233 |
+
"Thorough planning balances quick decision-making",
|
| 234 |
+
"Data-driven approach supports confident leadership"
|
| 235 |
+
],
|
| 236 |
+
('Green', 'Yellow'): [
|
| 237 |
+
"Structured organization grounds creative ideas",
|
| 238 |
+
"Process-oriented approach gives vision practical form",
|
| 239 |
+
"Reliability provides stability for innovation"
|
| 240 |
+
],
|
| 241 |
+
('Blue', 'Green'): [
|
| 242 |
+
"Detailed analysis combines with systematic execution",
|
| 243 |
+
"Methodical approach ensures thorough implementation",
|
| 244 |
+
"Precision and organization create reliable outcomes"
|
| 245 |
+
],
|
| 246 |
+
('Red', 'Yellow'): [
|
| 247 |
+
"Action-oriented drive brings creative ideas to life",
|
| 248 |
+
"Bold decisions support visionary thinking",
|
| 249 |
+
"Energy and enthusiasm fuel innovative projects"
|
| 250 |
+
],
|
| 251 |
+
('Blue', 'Yellow'): [
|
| 252 |
+
"Analytical depth enhances creative problem-solving",
|
| 253 |
+
"Thorough research supports innovative approaches",
|
| 254 |
+
"Logical thinking balances imaginative ideas"
|
| 255 |
+
],
|
| 256 |
+
('Green', 'Red'): [
|
| 257 |
+
"Organized planning directs decisive action",
|
| 258 |
+
"Systematic approach channels energetic drive",
|
| 259 |
+
"Process efficiency supports quick implementation"
|
| 260 |
+
]
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
key = tuple(sorted([color1, color2]))
|
| 264 |
+
return combinations.get(key, [
|
| 265 |
+
"Complementary personality traits create balance",
|
| 266 |
+
"Different approaches bring diverse perspectives",
|
| 267 |
+
"Varied strengths cover multiple relationship aspects"
|
| 268 |
+
])
|
| 269 |
+
|
| 270 |
+
def generate_fallback_risks(color1, color2):
|
| 271 |
+
"""Generate risks based on color combination"""
|
| 272 |
+
combinations = {
|
| 273 |
+
('Blue', 'Red'): [
|
| 274 |
+
"Over-analysis may frustrate action-oriented partner",
|
| 275 |
+
"Quick decisions might overlook important details",
|
| 276 |
+
"Direct communication may clash with thoughtful processing"
|
| 277 |
+
],
|
| 278 |
+
('Green', 'Yellow'): [
|
| 279 |
+
"Rigid routines may limit spontaneous creativity",
|
| 280 |
+
"Unstructured ideas may disrupt organized systems",
|
| 281 |
+
"Process focus might slow down innovative thinking"
|
| 282 |
+
],
|
| 283 |
+
('Blue', 'Green'): [
|
| 284 |
+
"Excessive planning may delay actual progress",
|
| 285 |
+
"Over-caution might prevent necessary risks",
|
| 286 |
+
"Analysis paralysis in decision-making situations"
|
| 287 |
+
],
|
| 288 |
+
('Red', 'Yellow'): [
|
| 289 |
+
"Impulsive actions may lack long-term vision",
|
| 290 |
+
"Big ideas might overlook practical implementation",
|
| 291 |
+
"Enthusiasm may override careful consideration"
|
| 292 |
+
],
|
| 293 |
+
('Blue', 'Yellow'): [
|
| 294 |
+
"Over-thinking may dampen spontaneous creativity",
|
| 295 |
+
"Abstract ideas might lack practical grounding",
|
| 296 |
+
"Detail focus could miss the bigger picture"
|
| 297 |
+
],
|
| 298 |
+
('Green', 'Red'): [
|
| 299 |
+
"Bureaucratic processes may frustrate quick action",
|
| 300 |
+
"Impulsive decisions could disrupt careful planning",
|
| 301 |
+
"Directness may overwhelm methodical approach"
|
| 302 |
+
]
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
key = tuple(sorted([color1, color2]))
|
| 306 |
+
return combinations.get(key, [
|
| 307 |
+
"Different communication styles may cause misunderstandings",
|
| 308 |
+
"Varying energy levels could lead to timing conflicts",
|
| 309 |
+
"Contrasting approaches to problems may create tension"
|
| 310 |
+
])
|
| 311 |
+
|
| 312 |
+
def generate_fallback_sacrifices(color1, color2):
|
| 313 |
+
"""Generate sacrifices based on color combination"""
|
| 314 |
+
combinations = {
|
| 315 |
+
('Blue', 'Red'): [
|
| 316 |
+
"Analytical partner must accept quicker decisions sometimes",
|
| 317 |
+
"Action-oriented partner needs to allow time for reflection",
|
| 318 |
+
"Both must find middle ground between speed and thoroughness"
|
| 319 |
+
],
|
| 320 |
+
('Green', 'Yellow'): [
|
| 321 |
+
"Organized partner should embrace some spontaneity",
|
| 322 |
+
"Creative partner needs to respect established routines",
|
| 323 |
+
"Both must balance structure with flexibility"
|
| 324 |
+
],
|
| 325 |
+
('Blue', 'Green'): [
|
| 326 |
+
"Need to move from planning to action more quickly",
|
| 327 |
+
"Must embrace some uncertainty in decision-making",
|
| 328 |
+
"Both should practice more direct communication"
|
| 329 |
+
],
|
| 330 |
+
('Red', 'Yellow'): [
|
| 331 |
+
"Need to ground big ideas with practical steps",
|
| 332 |
+
"Must balance enthusiasm with realistic planning",
|
| 333 |
+
"Both should develop more patience in execution"
|
| 334 |
+
],
|
| 335 |
+
('Blue', 'Yellow'): [
|
| 336 |
+
"Analytical thinker should embrace intuitive leaps",
|
| 337 |
+
"Creative partner needs to consider practical constraints",
|
| 338 |
+
"Both must balance imagination with reality checks"
|
| 339 |
+
],
|
| 340 |
+
('Green', 'Red'): [
|
| 341 |
+
"Structured partner should allow faster execution sometimes",
|
| 342 |
+
"Action-oriented partner needs to follow established processes",
|
| 343 |
+
"Both must compromise between speed and quality"
|
| 344 |
+
]
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
key = tuple(sorted([color1, color2]))
|
| 348 |
+
return combinations.get(key, [
|
| 349 |
+
"Both partners need to understand different communication styles",
|
| 350 |
+
"Compromise between individual preferences and shared needs",
|
| 351 |
+
"Balance personal approaches with relationship harmony"
|
| 352 |
+
])
|
| 353 |
+
|
| 354 |
+
def detailed_explanation(user1_id: int, user2_id: int, u_vec: np.ndarray, v_vec: np.ndarray) -> List[str]:
|
| 355 |
+
"""Main function to generate detailed explanations"""
|
| 356 |
+
return generate_dynamic_explanation(user1_id, user2_id, u_vec, v_vec)
|
| 357 |
+
|
| 358 |
+
def generate_dynamic_explanation(user1_id: int, user2_id: int, user1_vec: np.ndarray, user2_vec: np.ndarray) -> List[str]:
|
| 359 |
+
"""Generate dynamic explanation using LLM and knowledge base"""
|
| 360 |
+
|
| 361 |
+
# Get user backgrounds
|
| 362 |
+
user1_bg = get_user_background(user1_id)
|
| 363 |
+
user2_bg = get_user_background(user2_id)
|
| 364 |
+
|
| 365 |
+
# Create query for knowledge base
|
| 366 |
+
query = f"compatibility between personality types: {user1_bg.get('conflict_style', '')} and {user2_bg.get('conflict_style', '')}"
|
| 367 |
+
|
| 368 |
+
# Import knowledge inside the function to avoid circular import
|
| 369 |
+
try:
|
| 370 |
+
from faiss_service import knowledge
|
| 371 |
+
context_chunks = knowledge.get_relevant_context(query, topk=2) if knowledge else []
|
| 372 |
+
except ImportError:
|
| 373 |
+
context_chunks = []
|
| 374 |
+
|
| 375 |
+
context = "\n".join(context_chunks) if context_chunks else "No specific psychological context available."
|
| 376 |
+
# Try LLM first if available
|
| 377 |
+
if HAS_LLM and os.getenv("OPENAI_API_KEY"):
|
| 378 |
+
try:
|
| 379 |
+
llm = ChatOpenAI(
|
| 380 |
+
model="gpt-4o-mini",
|
| 381 |
+
temperature=0.7,
|
| 382 |
+
max_retries=2,
|
| 383 |
+
timeout=30
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
prompt_template = ChatPromptTemplate.from_messages([
|
| 387 |
+
("system", """You are a relationship compatibility expert specializing in personality color analysis (Blue, Green, Yellow, Red).
|
| 388 |
+
Based on the personality profiles, relevant psychological context, and user profiles, provide a detailed compatibility analysis.
|
| 389 |
+
|
| 390 |
+
CRITICAL GUIDELINES:
|
| 391 |
+
1. Focus on practical relationship dynamics, not just theoretical compatibility
|
| 392 |
+
2. Use insights from the provided psychological context when relevant
|
| 393 |
+
3. Be specific about strengths and potential challenges
|
| 394 |
+
4. Provide actionable advice for the couple
|
| 395 |
+
5. Keep explanations natural and conversational, not robotic
|
| 396 |
+
6. Reference specific personality traits and how they interact
|
| 397 |
+
7. Consider cultural and personal background when relevant
|
| 398 |
+
8. Balance positivity with realistic expectations
|
| 399 |
+
|
| 400 |
+
Structure your response with:
|
| 401 |
+
- Compatibility overview (1-2 sentences)
|
| 402 |
+
- Key strengths of this pairing
|
| 403 |
+
- Potential challenges to be aware of
|
| 404 |
+
- Practical advice for success
|
| 405 |
+
- Daily life compatibility"""),
|
| 406 |
+
("human", """Personality Profiles:
|
| 407 |
+
User 1 ({user1_name}, {user1_gender}): Blue {user1_blue}%, Green {user1_green}%, Yellow {user1_yellow}%, Red {user1_red}%
|
| 408 |
+
User 2 ({user2_name}, {user2_gender}): Blue {user2_blue}%, Green {user2_green}%, Yellow {user2_yellow}%, Red {user2_red}%
|
| 409 |
+
|
| 410 |
+
User 1 Background: {user1_background}
|
| 411 |
+
User 2 Background: {user2_background}
|
| 412 |
+
|
| 413 |
+
Relevant Psychological Context:
|
| 414 |
+
{context}
|
| 415 |
+
|
| 416 |
+
Please provide a comprehensive compatibility analysis:""")
|
| 417 |
+
])
|
| 418 |
+
|
| 419 |
+
chain = prompt_template | llm | StrOutputParser()
|
| 420 |
+
|
| 421 |
+
response = chain.invoke({
|
| 422 |
+
"user1_name": user1_bg.get("name", "User 1"),
|
| 423 |
+
"user1_gender": user1_bg.get("gender", "Not specified"),
|
| 424 |
+
"user1_blue": round(user1_vec[0] * 100, 1),
|
| 425 |
+
"user1_green": round(user1_vec[1] * 100, 1),
|
| 426 |
+
"user1_yellow": round(user1_vec[2] * 100, 1),
|
| 427 |
+
"user1_red": round(user1_vec[3] * 100, 1),
|
| 428 |
+
"user2_name": user2_bg.get("name", "User 2"),
|
| 429 |
+
"user2_gender": user2_bg.get("gender", "Not specified"),
|
| 430 |
+
"user2_blue": round(user2_vec[0] * 100, 1),
|
| 431 |
+
"user2_green": round(user2_vec[1] * 100, 1),
|
| 432 |
+
"user2_yellow": round(user2_vec[2] * 100, 1),
|
| 433 |
+
"user2_red": round(user2_vec[3] * 100, 1),
|
| 434 |
+
"user1_background": json.dumps(user1_bg, indent=2),
|
| 435 |
+
"user2_background": json.dumps(user2_bg, indent=2),
|
| 436 |
+
"context": context
|
| 437 |
+
})
|
| 438 |
+
|
| 439 |
+
# Parse LLM response into structured points
|
| 440 |
+
points = []
|
| 441 |
+
lines = response.split('\n')
|
| 442 |
+
for line in lines:
|
| 443 |
+
line = line.strip()
|
| 444 |
+
if line and not line.startswith(('- Compatibility', '- Key', '- Potential', '- Practical', '- Daily')):
|
| 445 |
+
if line.startswith('•') or line.startswith('-'):
|
| 446 |
+
points.append(line[1:].strip())
|
| 447 |
+
elif len(line) > 20: # Substantive lines
|
| 448 |
+
points.append(line)
|
| 449 |
+
|
| 450 |
+
if points:
|
| 451 |
+
return points[:5] # Return top 5 most relevant points
|
| 452 |
+
except Exception as e:
|
| 453 |
+
print(f"LLM explanation failed: {e}")
|
| 454 |
+
|
| 455 |
+
# Fallback to rule-based explanations
|
| 456 |
+
return generate_rule_based_explanation(user1_vec, user2_vec, user1_bg, user2_bg)
|
| 457 |
+
|
| 458 |
+
def generate_rule_based_explanation(user1_vec: np.ndarray, user2_vec: np.ndarray, user1_bg: Dict, user2_bg: Dict) -> List[str]:
|
| 459 |
+
"""Rule-based fallback explanation"""
|
| 460 |
+
labels = ["Blue", "Green", "Yellow", "Red"]
|
| 461 |
+
user1_dom = labels[int(np.argmax(user1_vec))]
|
| 462 |
+
user2_dom = labels[int(np.argmax(user2_vec))]
|
| 463 |
+
|
| 464 |
+
explanations = []
|
| 465 |
+
|
| 466 |
+
# Dominant trait analysis
|
| 467 |
+
if user1_dom == user2_dom:
|
| 468 |
+
explanations.append(f"Both share {user1_dom} dominance: Strong alignment in core approach and values.")
|
| 469 |
+
else:
|
| 470 |
+
explanations.append(f"{user1_dom}-{user2_dom} pairing: Complementary strengths create balanced dynamics.")
|
| 471 |
+
|
| 472 |
+
# Difference analysis
|
| 473 |
+
diffs = user2_vec - user1_vec
|
| 474 |
+
for idx, diff in enumerate(diffs):
|
| 475 |
+
color = labels[idx]
|
| 476 |
+
if abs(diff) > 0.15:
|
| 477 |
+
if diff > 0:
|
| 478 |
+
explanations.append(f"Higher {color} influence brings {get_color_strength(color)} to the relationship.")
|
| 479 |
+
else:
|
| 480 |
+
explanations.append(f"Lower {color} presence allows for more {get_color_balance(color)} in dynamics.")
|
| 481 |
+
|
| 482 |
+
# Background considerations
|
| 483 |
+
if user1_bg.get("hobbies") and user2_bg.get("hobbies"):
|
| 484 |
+
explanations.append("Shared interests and hobbies create strong bonding opportunities.")
|
| 485 |
+
|
| 486 |
+
if user1_bg.get("conflict_style") and user2_bg.get("conflict_style"):
|
| 487 |
+
explanations.append("Complementary conflict styles can lead to effective problem-solving.")
|
| 488 |
+
|
| 489 |
+
return explanations[:4] # Limit to 4 points
|
| 490 |
+
|
| 491 |
+
def get_color_strength(color: str) -> str:
|
| 492 |
+
strengths = {
|
| 493 |
+
"Blue": "analytical precision and structured thinking",
|
| 494 |
+
"Green": "emotional stability and patient understanding",
|
| 495 |
+
"Yellow": "creative energy and social connection",
|
| 496 |
+
"Red": "decisive action and goal orientation"
|
| 497 |
+
}
|
| 498 |
+
return strengths.get(color, "unique strengths")
|
| 499 |
+
|
| 500 |
+
def get_color_balance(color: str) -> str:
|
| 501 |
+
balances = {
|
| 502 |
+
"Blue": "flexibility and spontaneity",
|
| 503 |
+
"Green": "directness and assertiveness",
|
| 504 |
+
"Yellow": "focus and routine",
|
| 505 |
+
"Red": "collaboration and patience"
|
| 506 |
+
}
|
| 507 |
+
return balances.get(color, "balanced approaches")
|
config.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# config.py
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
|
| 5 |
+
# --- load .env so OPENAI_API_KEY (and others) are available ---
|
| 6 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 7 |
+
|
| 8 |
+
# Load environment variables - priority: Hugging Face secrets > .env file
|
| 9 |
+
IS_HUGGING_FACE = os.environ.get('HUGGINGFACE_SPACES') == 'true' or os.environ.get('SPACE_ID') is not None
|
| 10 |
+
if not IS_HUGGING_FACE:
|
| 11 |
+
# Only load from .env file when running locally
|
| 12 |
+
load_dotenv(os.path.join(BASE_DIR, ".env"))
|
| 13 |
+
else:
|
| 14 |
+
# On Hugging Face, secrets are automatically available as environment variables
|
| 15 |
+
print("Running on Hugging Face Spaces - using secrets from environment variables")
|
| 16 |
+
|
| 17 |
+
if IS_HUGGING_FACE:
|
| 18 |
+
# Hugging Face Spaces configuration
|
| 19 |
+
DEFAULT_SQL_SERVER = "pykara-sqlserver.c5aosm6ie5j3.eu-north-1.rds.amazonaws.com,1433"
|
| 20 |
+
DEFAULT_SQL_DB = "PyMatch"
|
| 21 |
+
DEFAULT_SQL_TRUSTED = "yes" # Use SQL authentication on Hugging Face
|
| 22 |
+
else:
|
| 23 |
+
# Local development configuration
|
| 24 |
+
DEFAULT_SQL_SERVER = "localhost\sqlexpress"
|
| 25 |
+
DEFAULT_SQL_DB = "Py_Match"
|
| 26 |
+
DEFAULT_SQL_TRUSTED = "yes" # Use Windows authentication locally
|
| 27 |
+
|
| 28 |
+
SQL_DRIVER = os.getenv("PYMATCH_SQL_DRIVER", "ODBC Driver 17 for SQL Server")
|
| 29 |
+
SQL_SERVER = os.getenv("PYMATCH_SQL_SERVER", DEFAULT_SQL_SERVER)
|
| 30 |
+
SQL_DB = os.getenv("PYMATCH_SQL_DB", DEFAULT_SQL_DB)
|
| 31 |
+
SQL_TRUSTED = os.getenv("PYMATCH_SQL_TRUSTED", DEFAULT_SQL_TRUSTED) # yes/no
|
| 32 |
+
SQL_USER = os.getenv("PYMATCH_SQL_USER", "")
|
| 33 |
+
SQL_PASSWORD = os.getenv("PYMATCH_SQL_PASSWORD", "")
|
| 34 |
+
SQL_PORT = os.getenv("PYMATCH_SQL_PORT", "")
|
| 35 |
+
SQL_ENCRYPT = os.getenv("PYMATCH_SQL_ENCRYPT", "no").lower().strip()
|
| 36 |
+
SQL_TRUSTCERT = os.getenv("PYMATCH_SQL_TRUST_CERT", "yes").lower().strip()
|
| 37 |
+
|
| 38 |
+
PROGRESS_TBL = os.getenv("PYMATCH_PROGRESS_TABLE", "LLMGeneratedQuestions")
|
| 39 |
+
DEFAULT_N_QUESTIONS = int(os.getenv("PYMATCH_DEFAULT_N_QUESTIONS", "20"))
|
| 40 |
+
DEFAULT_BATCH_SIZE = int(os.getenv("PYMATCH_DEFAULT_BATCH_SIZE", "10"))
|
| 41 |
+
MAX_QUESTIONS = int(os.getenv("PYMATCH_MAX_QUESTIONS", "50"))
|
| 42 |
+
|
| 43 |
+
# Some constants used across the app
|
| 44 |
+
COLOR_KEYS = ["blue", "green", "red", "yellow"]
|
| 45 |
+
DOMAINS = ["marriage", "interview", "partnership", "general"]
|
| 46 |
+
|
| 47 |
+
# Faiss index / chunks defaults - user should update FAISS_INDEX_PATH or provide companion chunks file
|
| 48 |
+
FAISS_INDEX_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "faiss_index_file.index")
|
database.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# database.py
|
| 2 |
+
import pyodbc
|
| 3 |
+
import urllib.parse
|
| 4 |
+
import hashlib
|
| 5 |
+
import json
|
| 6 |
+
import pickle
|
| 7 |
+
import random
|
| 8 |
+
from typing import Dict, List
|
| 9 |
+
from flask import Flask
|
| 10 |
+
from config import SQL_DRIVER, SQL_SERVER, SQL_DB, SQL_TRUSTED, SQL_USER, SQL_PASSWORD, SQL_PORT, SQL_ENCRYPT, SQL_TRUSTCERT
|
| 11 |
+
from models import db
|
| 12 |
+
|
| 13 |
+
def get_db_connection():
|
| 14 |
+
"""Get a raw pyodbc connection"""
|
| 15 |
+
return pyodbc.connect(
|
| 16 |
+
f"DRIVER={SQL_DRIVER};"
|
| 17 |
+
f"SERVER={SQL_SERVER};"
|
| 18 |
+
f"DATABASE={SQL_DB};"
|
| 19 |
+
f"Trusted_Connection={SQL_TRUSTED};"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
def row_to_dict(cursor, row) -> Dict:
|
| 23 |
+
"""Convert a database row to dictionary"""
|
| 24 |
+
if row is None:
|
| 25 |
+
return {}
|
| 26 |
+
cols = [col[0] for col in cursor.description]
|
| 27 |
+
return {cols[i]: row[i] for i in range(len(cols))}
|
| 28 |
+
|
| 29 |
+
def hash_password(password: str) -> str:
|
| 30 |
+
"""Hash password using SHA256"""
|
| 31 |
+
return hashlib.sha256(password.encode("utf-8")).hexdigest()
|
| 32 |
+
|
| 33 |
+
def init_database(app: Flask):
|
| 34 |
+
"""Initialize database connection for Flask app"""
|
| 35 |
+
_server = SQL_SERVER
|
| 36 |
+
if SQL_PORT:
|
| 37 |
+
_server = f"{SQL_SERVER},{SQL_PORT}"
|
| 38 |
+
|
| 39 |
+
if SQL_TRUSTED == "yes":
|
| 40 |
+
raw = (
|
| 41 |
+
f"DRIVER={{{SQL_DRIVER}}};"
|
| 42 |
+
f"SERVER={_server};"
|
| 43 |
+
f"DATABASE={SQL_DB};"
|
| 44 |
+
f"Trusted_Connection=yes;"
|
| 45 |
+
)
|
| 46 |
+
else:
|
| 47 |
+
raw = (
|
| 48 |
+
f"DRIVER={{{SQL_DRIVER}}};"
|
| 49 |
+
f"SERVER={_server};"
|
| 50 |
+
f"DATABASE={SQL_DB};"
|
| 51 |
+
f"UID={SQL_USER};PWD={SQL_PASSWORD};"
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
if SQL_ENCRYPT == "yes":
|
| 55 |
+
raw += "Encrypt=yes;"
|
| 56 |
+
if SQL_TRUSTCERT == "yes":
|
| 57 |
+
raw += "TrustServerCertificate=yes;"
|
| 58 |
+
|
| 59 |
+
params = urllib.parse.quote_plus(raw)
|
| 60 |
+
SQLALCHEMY_DATABASE_URI = f"mssql+pyodbc:///?odbc_connect={params}"
|
| 61 |
+
|
| 62 |
+
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
|
| 63 |
+
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
|
| 64 |
+
|
| 65 |
+
db.init_app(app)
|
| 66 |
+
|
| 67 |
+
return db
|
| 68 |
+
|
| 69 |
+
def fetch_profile_for_role(user_id: str, role: str) -> Dict:
|
| 70 |
+
"""Fetch profile from the correct table based on role"""
|
| 71 |
+
table = {
|
| 72 |
+
"marriage": "Marriage",
|
| 73 |
+
"interview": "Interview",
|
| 74 |
+
"partnership": "Partnership"
|
| 75 |
+
}.get(role.lower())
|
| 76 |
+
|
| 77 |
+
if not table:
|
| 78 |
+
return {}
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
conn = get_db_connection()
|
| 82 |
+
cur = conn.cursor()
|
| 83 |
+
cur.execute(f"""
|
| 84 |
+
SELECT TOP 1 *
|
| 85 |
+
FROM {table}
|
| 86 |
+
WHERE user_id = ?
|
| 87 |
+
ORDER BY created_at DESC
|
| 88 |
+
""", (user_id,))
|
| 89 |
+
row = cur.fetchone()
|
| 90 |
+
if row is None:
|
| 91 |
+
return {}
|
| 92 |
+
prof = row_to_dict(cur, row)
|
| 93 |
+
# Normalize hobbies_interests if it exists
|
| 94 |
+
if "hobbies_interests" in prof and isinstance(prof["hobbies_interests"], str):
|
| 95 |
+
if prof["hobbies_interests"].strip().startswith("["):
|
| 96 |
+
try:
|
| 97 |
+
prof["hobbies_interests"] = json.loads(prof["hobbies_interests"])
|
| 98 |
+
except Exception:
|
| 99 |
+
prof["hobbies_interests"] = [s.strip() for s in prof["hobbies_interests"].split(",") if s.strip()]
|
| 100 |
+
else:
|
| 101 |
+
prof["hobbies_interests"] = [s.strip() for s in prof["hobbies_interests"].split(",") if s.strip()]
|
| 102 |
+
prof["user_id"] = str(user_id)
|
| 103 |
+
return prof
|
| 104 |
+
except pyodbc.Error as e:
|
| 105 |
+
print("Profile fetch error:", e)
|
| 106 |
+
return {}
|
| 107 |
+
finally:
|
| 108 |
+
try: conn.close()
|
| 109 |
+
except: pass
|
| 110 |
+
|
| 111 |
+
def fetch_expectation_data(user_id: str) -> Dict:
|
| 112 |
+
"""Fetch expectation data from ExpectationResponse table"""
|
| 113 |
+
try:
|
| 114 |
+
conn = get_db_connection()
|
| 115 |
+
cur = conn.cursor()
|
| 116 |
+
cur.execute("""
|
| 117 |
+
SELECT * FROM ExpectationResponse
|
| 118 |
+
WHERE user_id = ?
|
| 119 |
+
ORDER BY created_at DESC
|
| 120 |
+
""", (user_id,))
|
| 121 |
+
row = cur.fetchone()
|
| 122 |
+
if row is None:
|
| 123 |
+
return {}
|
| 124 |
+
return row_to_dict(cur, row)
|
| 125 |
+
except Exception as e:
|
| 126 |
+
print(f"Error fetching expectation data: {e}")
|
| 127 |
+
return {}
|
| 128 |
+
finally:
|
| 129 |
+
try: conn.close()
|
| 130 |
+
except: pass
|
| 131 |
+
|
| 132 |
+
def fetch_marriage_profile_data(user_id: str) -> Dict:
|
| 133 |
+
"""Fetch marriage profile data for comparison"""
|
| 134 |
+
try:
|
| 135 |
+
conn = get_db_connection()
|
| 136 |
+
cur = conn.cursor()
|
| 137 |
+
cur.execute("""
|
| 138 |
+
SELECT * FROM Marriage
|
| 139 |
+
WHERE user_id = ?
|
| 140 |
+
ORDER BY created_at DESC
|
| 141 |
+
""", (user_id,))
|
| 142 |
+
row = cur.fetchone()
|
| 143 |
+
if row is None:
|
| 144 |
+
return {}
|
| 145 |
+
return row_to_dict(cur, row)
|
| 146 |
+
except Exception as e:
|
| 147 |
+
print(f"Error fetching marriage profile data: {e}")
|
| 148 |
+
return {}
|
| 149 |
+
finally:
|
| 150 |
+
try: conn.close()
|
| 151 |
+
except: pass
|
faiss_Psychology2e_WEB.index
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c87e897737e6288e24e3c7bd7497b1fe320701f4c4e2671573a5126201b8cde9
|
| 3 |
+
size 1536045
|
faiss_index_file.index
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d9e53182220a69e6dc00240a4a57e89d4bb8bbce525289677486b81ac800a755
|
| 3 |
+
size 3575853
|
faiss_service.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# faiss_service.py
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import pickle
|
| 5 |
+
import random
|
| 6 |
+
from typing import Dict, List, Tuple, Optional
|
| 7 |
+
|
| 8 |
+
# Try importing faiss
|
| 9 |
+
try:
|
| 10 |
+
import faiss
|
| 11 |
+
HAS_FAISS = True
|
| 12 |
+
except Exception as e:
|
| 13 |
+
print("faiss import failed:", e)
|
| 14 |
+
HAS_FAISS = False
|
| 15 |
+
|
| 16 |
+
# Try importing sentence-transformers
|
| 17 |
+
try:
|
| 18 |
+
from sentence_transformers import SentenceTransformer # type: ignore
|
| 19 |
+
HAS_EMBEDDER = True
|
| 20 |
+
except Exception:
|
| 21 |
+
SentenceTransformer = None
|
| 22 |
+
HAS_EMBEDDER = False
|
| 23 |
+
|
| 24 |
+
from config import BASE_DIR, FAISS_INDEX_PATH
|
| 25 |
+
|
| 26 |
+
BOOKS = [
|
| 27 |
+
{
|
| 28 |
+
"index": os.path.join(BASE_DIR, "psychology2e.index"),
|
| 29 |
+
"meta": os.path.join(BASE_DIR, "psychology2e_meta.pkl"),
|
| 30 |
+
"name": "Psychology 2e",
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"index": os.path.join(BASE_DIR, "surrounded_by_idiots.index"),
|
| 34 |
+
"meta": os.path.join(BASE_DIR, "surrounded_by_idiots_meta.pkl"),
|
| 35 |
+
"name": "Surrounded by Idiots",
|
| 36 |
+
},
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
FAISS_INDEX = None
|
| 40 |
+
TEXT_CHUNKS: List[str] = []
|
| 41 |
+
|
| 42 |
+
class KnowledgeSource:
|
| 43 |
+
def __init__(self):
|
| 44 |
+
self.indices: List = []
|
| 45 |
+
self.metas: List[List[Dict]] = []
|
| 46 |
+
self.embedder = None
|
| 47 |
+
if not HAS_FAISS:
|
| 48 |
+
return
|
| 49 |
+
try:
|
| 50 |
+
if SentenceTransformer:
|
| 51 |
+
self.embedder = SentenceTransformer("all-MiniLM-L6-v2")
|
| 52 |
+
except Exception:
|
| 53 |
+
self.embedder = None
|
| 54 |
+
for b in BOOKS:
|
| 55 |
+
try:
|
| 56 |
+
idx_path = b["index"]
|
| 57 |
+
meta_path = b["meta"]
|
| 58 |
+
if os.path.exists(idx_path) and os.path.exists(meta_path):
|
| 59 |
+
index = faiss.read_index(idx_path)
|
| 60 |
+
with open(meta_path, "rb") as f:
|
| 61 |
+
meta = pickle.load(f)
|
| 62 |
+
self.indices.append(index)
|
| 63 |
+
self.metas.append(meta)
|
| 64 |
+
except Exception:
|
| 65 |
+
continue
|
| 66 |
+
|
| 67 |
+
def get_relevant_context(self, query: str, topk: int = 3) -> List[str]:
|
| 68 |
+
"""Get relevant context from knowledge base for relationship matching"""
|
| 69 |
+
if not self.indices or not self.embedder:
|
| 70 |
+
return []
|
| 71 |
+
try:
|
| 72 |
+
vec = self.embedder.encode([query]).astype("float32")
|
| 73 |
+
results: List[Tuple[float, str]] = []
|
| 74 |
+
for index, meta in zip(self.indices, self.metas):
|
| 75 |
+
D, I = index.search(vec, topk)
|
| 76 |
+
for d, i in zip(D[0], I[0]):
|
| 77 |
+
if 0 <= i < len(meta):
|
| 78 |
+
txt = meta[i].get("text", "")[:500] # Increased length for better context
|
| 79 |
+
results.append((float(d), txt))
|
| 80 |
+
results.sort(key=lambda x: x[0])
|
| 81 |
+
return [t for _, t in results[:topk]]
|
| 82 |
+
except Exception:
|
| 83 |
+
return []
|
| 84 |
+
|
| 85 |
+
def try_load_chunks_from_disk(index_path: str) -> List[str]:
|
| 86 |
+
"""Try several companion filenames for the chunk/text mapping."""
|
| 87 |
+
base = os.path.splitext(index_path)[0]
|
| 88 |
+
candidates = [base + ".chunks.json", base + "_chunks.json", base + ".chunks.pkl", base + "_chunks.pkl"]
|
| 89 |
+
|
| 90 |
+
for c in candidates:
|
| 91 |
+
if os.path.exists(c):
|
| 92 |
+
try:
|
| 93 |
+
if c.endswith(".json"):
|
| 94 |
+
with open(c, "r", encoding="utf-8") as f:
|
| 95 |
+
data = json.load(f)
|
| 96 |
+
# expecting list of strings
|
| 97 |
+
if isinstance(data, list):
|
| 98 |
+
return data
|
| 99 |
+
# sometimes stored as {"chunks": [...]}
|
| 100 |
+
if isinstance(data, dict) and "chunks" in data:
|
| 101 |
+
return data["chunks"]
|
| 102 |
+
else:
|
| 103 |
+
with open(c, "rb") as f:
|
| 104 |
+
data = pickle.load(f)
|
| 105 |
+
if isinstance(data, list):
|
| 106 |
+
return data
|
| 107 |
+
except Exception as e:
|
| 108 |
+
print(f"Failed to load chunks from {c}:", e)
|
| 109 |
+
return []
|
| 110 |
+
|
| 111 |
+
def load_faiss_index(index_path: str):
|
| 112 |
+
global FAISS_INDEX, TEXT_CHUNKS
|
| 113 |
+
if not HAS_FAISS:
|
| 114 |
+
print("FAISS not installed. Skipping index load.")
|
| 115 |
+
return
|
| 116 |
+
if not os.path.exists(index_path):
|
| 117 |
+
print("Faiss index path does not exist:", index_path)
|
| 118 |
+
return
|
| 119 |
+
try:
|
| 120 |
+
FAISS_INDEX = faiss.read_index(index_path)
|
| 121 |
+
# try to load chunks from companion files
|
| 122 |
+
TEXT_CHUNKS = try_load_chunks_from_disk(index_path)
|
| 123 |
+
if not TEXT_CHUNKS:
|
| 124 |
+
print("Warning: Faiss index loaded but no companion text chunks found.")
|
| 125 |
+
print("Provide a companion .chunks.json or .chunks.pkl file with a list of text chunks.")
|
| 126 |
+
except Exception as e:
|
| 127 |
+
print("Failed to load faiss index:", e)
|
| 128 |
+
FAISS_INDEX = None
|
| 129 |
+
|
| 130 |
+
def get_nearest_context(query_emb: List[float] = None, k: int = 5, query_vector: Optional[List[float]] = None):
|
| 131 |
+
"""Return concatenated top-k chunks for a query."""
|
| 132 |
+
if FAISS_INDEX is None or not HAS_FAISS:
|
| 133 |
+
return ""
|
| 134 |
+
try:
|
| 135 |
+
# ... rest of the existing code remains the same
|
| 136 |
+
if query_vector is None:
|
| 137 |
+
# no embedding generation in this script: rely on an external embedding or LLM to provide a context id
|
| 138 |
+
return ""
|
| 139 |
+
import numpy as np
|
| 140 |
+
vec = np.array([query_vector], dtype='float32')
|
| 141 |
+
D, I = FAISS_INDEX.search(vec, k)
|
| 142 |
+
idxs = I[0].tolist()
|
| 143 |
+
texts = []
|
| 144 |
+
for idx in idxs:
|
| 145 |
+
if 0 <= idx < len(TEXT_CHUNKS):
|
| 146 |
+
texts.append(TEXT_CHUNKS[idx])
|
| 147 |
+
return "\n\n".join(texts)
|
| 148 |
+
except Exception as e:
|
| 149 |
+
print("Faiss search failed:", e)
|
| 150 |
+
return ""
|
| 151 |
+
|
| 152 |
+
def get_faiss_context(k=3):
|
| 153 |
+
"""Return k random FAISS text chunks for LLM context."""
|
| 154 |
+
if HAS_FAISS and TEXT_CHUNKS:
|
| 155 |
+
return "\n".join(random.sample(TEXT_CHUNKS, min(k, len(TEXT_CHUNKS))))
|
| 156 |
+
return ""
|
| 157 |
+
|
| 158 |
+
# Initialize knowledge base only if FAISS is available
|
| 159 |
+
knowledge = None
|
| 160 |
+
if HAS_FAISS:
|
| 161 |
+
knowledge = KnowledgeSource()
|
| 162 |
+
else:
|
| 163 |
+
print("FAISS not available - KnowledgeSource disabled")
|
| 164 |
+
# Create a dummy knowledge object with empty methods
|
| 165 |
+
class DummyKnowledge:
|
| 166 |
+
def get_relevant_context(self, *args, **kwargs):
|
| 167 |
+
return []
|
| 168 |
+
knowledge = DummyKnowledge()
|
llm_service.py
ADDED
|
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# llm_service.py
|
| 2 |
+
import pyodbc
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import uuid
|
| 6 |
+
import random
|
| 7 |
+
import threading
|
| 8 |
+
from typing import Dict, List, Optional
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
|
| 11 |
+
from config import DEFAULT_N_QUESTIONS, DEFAULT_BATCH_SIZE, MAX_QUESTIONS, COLOR_KEYS, DOMAINS
|
| 12 |
+
|
| 13 |
+
# Try importing LLM libraries
|
| 14 |
+
try:
|
| 15 |
+
from pydantic import BaseModel, Field
|
| 16 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 17 |
+
from langchain_core.output_parsers import PydanticOutputParser, StrOutputParser
|
| 18 |
+
from langchain_openai import ChatOpenAI
|
| 19 |
+
HAS_LLM_STACK = True
|
| 20 |
+
HAS_LLM = True
|
| 21 |
+
except Exception:
|
| 22 |
+
HAS_LLM_STACK = False
|
| 23 |
+
HAS_LLM = False
|
| 24 |
+
|
| 25 |
+
class Option(BaseModel):
|
| 26 |
+
text: str
|
| 27 |
+
color: str
|
| 28 |
+
|
| 29 |
+
class QAItem(BaseModel):
|
| 30 |
+
question: str
|
| 31 |
+
options: List[Option] = Field(min_items=4, max_items=4)
|
| 32 |
+
|
| 33 |
+
class BatchQA(BaseModel):
|
| 34 |
+
items: List[QAItem] = Field(..., min_items=1)
|
| 35 |
+
SYSTEM_PROMPT = (
|
| 36 |
+
"You write marriage compatibility assessment questions that reveal four personality colors through forced choices:\n"
|
| 37 |
+
"- blue=analytical, fact-based (positive: thorough, precise | negative: overly critical, data-obsessed)\n"
|
| 38 |
+
"- green=organized, process-oriented (positive: systematic, reliable | negative: rigid, bureaucratic)\n"
|
| 39 |
+
"- red=decisive, action-oriented (positive: direct, results-driven | negative: impulsive, controlling)\n"
|
| 40 |
+
"- yellow=creative, big-picture (positive: innovative, visionary | negative: unrealistic, scattered)\n"
|
| 41 |
+
"\n"
|
| 42 |
+
"DISTRIBUTION REQUIREMENT:\n"
|
| 43 |
+
"For a 20-question assessment, you MUST generate:\n"
|
| 44 |
+
"1) 5 PROFILE-BASED questions (25%): Using ONLY user's current background (education, employment, hobbies, family)\n"
|
| 45 |
+
"2) 5 EXPECTATION-BASED questions (25%): Using ONLY user's relationship expectations (conflict style, financial preferences, values)\n"
|
| 46 |
+
"3) 10 CHARACTER-BASED questions (50%): About CURRENT behavior in various life situations\n"
|
| 47 |
+
"\n"
|
| 48 |
+
"CRITICAL RULES:\n"
|
| 49 |
+
"1) NO PREFIXES: Do NOT use phrases like 'Based on your profile' or 'Given your expectations' - embed naturally\n"
|
| 50 |
+
"2) Each option MUST clearly represent one color's typical behavior (include both positive and negative aspects)\n"
|
| 51 |
+
"3) Questions should force a choice that reveals CORE PERSONALITY TRAITS, not future plans\n"
|
| 52 |
+
"4) Use simple, everyday language but maintain situational specificity\n"
|
| 53 |
+
"5) Options should be practical actions someone would actually take (show both strengths and weaknesses)\n"
|
| 54 |
+
"6) Focus on CURRENT CHARACTER ANALYSIS, not future hypotheticals\n"
|
| 55 |
+
"7) Ensure each color option is distinctly different from others\n"
|
| 56 |
+
"8) Each option MUST be maximum 15 words - keep them concise and clear\n"
|
| 57 |
+
"9) AVOID future-focused questions about children, family planning, or long-term hypotheticals\n"
|
| 58 |
+
"10) Include some options that reveal potential negative/shadow aspects of each color\n"
|
| 59 |
+
"\n"
|
| 60 |
+
"QUESTION STYLE GUIDELINES:\n"
|
| 61 |
+
"- Profile questions: Embed background naturally (e.g., 'When learning something new, how do you approach it?' not 'Given your education...')\n"
|
| 62 |
+
"- Expectation questions: Embed values in scenarios (e.g., 'When managing shared expenses, what's your approach?')\n"
|
| 63 |
+
"- Character questions: Use everyday situations (e.g., 'When faced with an unexpected problem at work, what do you do first?')\n"
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
USER_PROMPT_BATCH = (
|
| 67 |
+
"Context (from Surrounded by Idiots or other corpus):\n{context}\n\n"
|
| 68 |
+
"Question Type: {question_type}\n\n" # Add this line
|
| 69 |
+
|
| 70 |
+
"User Profile (Current Background):\n"
|
| 71 |
+
"- Education: {education}\n"
|
| 72 |
+
"- Employment: {employment}\n"
|
| 73 |
+
"- Hobbies: {hobbies}\n"
|
| 74 |
+
"- Family Background: {family_type}\n"
|
| 75 |
+
"- Current Lifestyle: {current_lifestyle}\n"
|
| 76 |
+
"\n"
|
| 77 |
+
"User Relationship Expectations:\n"
|
| 78 |
+
"- Conflict Style: {conflict_style}\n"
|
| 79 |
+
"- Financial Style: {financial_style}\n"
|
| 80 |
+
"- Income Expectations: {income_range}\n"
|
| 81 |
+
"- Career Mobility: {relocation_willingness}\n"
|
| 82 |
+
"- Family Values: {family_values}\n"
|
| 83 |
+
"- Core Values: {core_values}\n"
|
| 84 |
+
"- Work-Life Balance: {lifestyle_pref}\n"
|
| 85 |
+
"- Social Preference: {social_pref}\n"
|
| 86 |
+
"- Ambition Level: {ambition_pref}\n"
|
| 87 |
+
"- Deal Breakers: {deal_breakers}\n"
|
| 88 |
+
"\n"
|
| 89 |
+
"Themes (array of short strings): {themes_json}\n"
|
| 90 |
+
"Previously asked questions: {previous_questions}\n\n"
|
| 91 |
+
"{format_instructions}\n\n"
|
| 92 |
+
|
| 93 |
+
"Generate {question_type} questions:\n"
|
| 94 |
+
"- If QUESTION TYPE is 'profile': Generate 5 questions using ONLY profile data (education, employment, hobbies, family background, current lifestyle)\n"
|
| 95 |
+
"- If QUESTION TYPE is 'expectation': Generate 5 questions using ONLY expectation data (conflict style, financial preferences, values, deal breakers)\n"
|
| 96 |
+
"- If QUESTION TYPE is 'character': Generate 10 questions about CURRENT behavior in various life situations\n\n"
|
| 97 |
+
|
| 98 |
+
"CRITICAL RULES:\n"
|
| 99 |
+
"1) DO NOT use prefixes like 'Based on your profile' or 'Considering your expectations'\n"
|
| 100 |
+
"2) Questions should be natural and flow conversationally\n"
|
| 101 |
+
"3) Focus on CURRENT traits and behaviors, not future plans\n"
|
| 102 |
+
"4) Each option must represent a clear personality color (blue=analytical, green=organized, red=decisive, yellow=creative)\n"
|
| 103 |
+
"5) Include both positive and negative aspects in options\n"
|
| 104 |
+
"6) Maximum 15 words per option\n"
|
| 105 |
+
"7) For profile questions: Reference background naturally without explicit labels\n"
|
| 106 |
+
"8) For expectation questions: Embed values naturally in the scenario\n"
|
| 107 |
+
"9) For character questions: Use everyday situations that reveal core personality\n"
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
PARSER_BATCH = None
|
| 111 |
+
CHAIN_BATCH = None
|
| 112 |
+
|
| 113 |
+
if HAS_LLM_STACK and os.getenv("OPENAI_API_KEY"):
|
| 114 |
+
try:
|
| 115 |
+
PARSER_BATCH = PydanticOutputParser(pydantic_object=BatchQA)
|
| 116 |
+
|
| 117 |
+
def build_batch_chain():
|
| 118 |
+
llm = ChatOpenAI(
|
| 119 |
+
model="gpt-4o-mini",
|
| 120 |
+
temperature=0.7,
|
| 121 |
+
max_retries=2,
|
| 122 |
+
timeout=30,
|
| 123 |
+
model_kwargs={"response_format": {"type": "json_object"}},
|
| 124 |
+
)
|
| 125 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 126 |
+
("system", SYSTEM_PROMPT),
|
| 127 |
+
("user", USER_PROMPT_BATCH),
|
| 128 |
+
])
|
| 129 |
+
return prompt | llm | PARSER_BATCH
|
| 130 |
+
|
| 131 |
+
CHAIN_BATCH = build_batch_chain()
|
| 132 |
+
except Exception as e:
|
| 133 |
+
print("Failed to build CHAIN_BATCH:", e)
|
| 134 |
+
CHAIN_BATCH = None
|
| 135 |
+
|
| 136 |
+
def ensure_valid_colors(options: List[Dict]) -> List[Dict]:
|
| 137 |
+
seen, fixed = set(), []
|
| 138 |
+
defaults = {
|
| 139 |
+
"blue": "Verify facts and numbers",
|
| 140 |
+
"green": "Outline a clear process",
|
| 141 |
+
"red": "Coordinate people and act",
|
| 142 |
+
"yellow": "Propose a fresh idea",
|
| 143 |
+
}
|
| 144 |
+
for o in options:
|
| 145 |
+
c = str(o.get("color", "")).lower()
|
| 146 |
+
t = str(o.get("text", "")).strip()
|
| 147 |
+
if c in COLOR_KEYS and c not in seen and t:
|
| 148 |
+
seen.add(c); fixed.append({"text": t[:80], "color": c})
|
| 149 |
+
for c in COLOR_KEYS:
|
| 150 |
+
if c not in seen:
|
| 151 |
+
fixed.append({"text": defaults[c], "color": c})
|
| 152 |
+
return fixed[:4]
|
| 153 |
+
|
| 154 |
+
def summarize_profile(profile: Dict) -> Dict:
|
| 155 |
+
"""Extract all non-PII columns from Marriage table for LLM context"""
|
| 156 |
+
out: Dict = {}
|
| 157 |
+
|
| 158 |
+
# All columns from Marriage table (excluding PII where possible)
|
| 159 |
+
marriage_columns = [
|
| 160 |
+
"user_id", "full_name", "gender", "current_city", "marital_status",
|
| 161 |
+
"education_level", "employment_status", "number_of_siblings", "family_type",
|
| 162 |
+
"hobbies_interests", "conflict_approach", "financial_style", "income_range",
|
| 163 |
+
"relocation_willingness", "height", "skin_tone", "languages_spoken", "country",
|
| 164 |
+
"blood_group", "religion", "dual_citizenship", "siblings_position",
|
| 165 |
+
"parents_living_status", "live_with_parents", "support_parents_financially",
|
| 166 |
+
"family_communication_frequency", "food_preference", "smoking_habit",
|
| 167 |
+
"alcohol_habit", "daily_routine", "fitness_level", "own_pets", "travel_preference",
|
| 168 |
+
"relaxation_mode", "job_role", "work_experience_years", "career_aspirations",
|
| 169 |
+
"field_of_study", "remark", "children_timeline", "open_to_adoption",
|
| 170 |
+
"deal_breakers", "other_non_negotiables", "health_constraints", "live_with_inlaws"
|
| 171 |
+
]
|
| 172 |
+
|
| 173 |
+
for col in marriage_columns:
|
| 174 |
+
v = profile.get(col)
|
| 175 |
+
if v not in (None, "", []):
|
| 176 |
+
out[col] = v
|
| 177 |
+
|
| 178 |
+
return out
|
| 179 |
+
|
| 180 |
+
def offline_generate_batch(themes: List[str], state: Dict, context: str = "") -> List[Dict]:
|
| 181 |
+
prof = state.get("profile", {}) or {}
|
| 182 |
+
name = prof.get("full_name") or "Partner"
|
| 183 |
+
conflict = (prof.get("conflict_approach") or "").lower()
|
| 184 |
+
money = (prof.get("financial_style") or "").lower()
|
| 185 |
+
hobby = None
|
| 186 |
+
if isinstance(prof.get("hobbies_interests"), list) and prof["hobbies_interests"]:
|
| 187 |
+
hobby = random.choice(prof["hobbies_interests"])
|
| 188 |
+
|
| 189 |
+
def flavor():
|
| 190 |
+
bits = []
|
| 191 |
+
if conflict:
|
| 192 |
+
bits.append(f"{conflict} style")
|
| 193 |
+
if money:
|
| 194 |
+
bits.append(f"{money} finances")
|
| 195 |
+
if hobby:
|
| 196 |
+
bits.append(f"likes {hobby}")
|
| 197 |
+
return ", ".join(bits)
|
| 198 |
+
|
| 199 |
+
items = []
|
| 200 |
+
for theme in themes:
|
| 201 |
+
short = theme.split(" around ")[-1].strip()
|
| 202 |
+
|
| 203 |
+
# Simple fallback question generation without PROMPT_SCENARIOS_BY_THEME
|
| 204 |
+
tail = (", " + flavor()) if flavor() else ""
|
| 205 |
+
q = f"{name}, what would you do about {short}?".strip()
|
| 206 |
+
|
| 207 |
+
# incorporate small bit from context if available (first 120 chars)
|
| 208 |
+
if context:
|
| 209 |
+
ctx_snip = context.replace('\n', ' ')[:120]
|
| 210 |
+
q = f"{q} (Note: {ctx_snip})"
|
| 211 |
+
|
| 212 |
+
# Keep concise
|
| 213 |
+
if len(q.split()) > 20:
|
| 214 |
+
q = " ".join(q.split()[:20])
|
| 215 |
+
|
| 216 |
+
opts = [
|
| 217 |
+
{"text": "Check data and facts", "color": "blue"},
|
| 218 |
+
{"text": "Draft a step-by-step plan", "color": "green"},
|
| 219 |
+
{"text": "Align people and act", "color": "red"},
|
| 220 |
+
{"text": "Brainstorm bold ideas", "color": "yellow"},
|
| 221 |
+
]
|
| 222 |
+
random.shuffle(opts)
|
| 223 |
+
items.append({"question": q, "options": opts, "source": "fallback"})
|
| 224 |
+
return items
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def generate_category_specific_options(question_type: str, question_text: str, profile_data: Dict = None, expectation_data: Dict = None) -> List[Dict]:
|
| 228 |
+
"""
|
| 229 |
+
Generate options tailored to the question category and content
|
| 230 |
+
"""
|
| 231 |
+
question_lower = question_text.lower()
|
| 232 |
+
|
| 233 |
+
# Extract key themes from question for better contextualization
|
| 234 |
+
themes_in_question = []
|
| 235 |
+
for theme in ["learning", "problem", "conflict", "money", "family", "work", "social", "stress", "decision", "plan"]:
|
| 236 |
+
if theme in question_lower:
|
| 237 |
+
themes_in_question.append(theme)
|
| 238 |
+
|
| 239 |
+
# Default option templates for each color
|
| 240 |
+
base_options = {
|
| 241 |
+
"blue": {
|
| 242 |
+
"profile": "Research thoroughly and analyze all available data",
|
| 243 |
+
"expectation": "Gather detailed information before forming an opinion",
|
| 244 |
+
"character": "Analyze the situation carefully with facts and logic"
|
| 245 |
+
},
|
| 246 |
+
"green": {
|
| 247 |
+
"profile": "Follow a structured, step-by-step approach",
|
| 248 |
+
"expectation": "Establish clear rules and procedures",
|
| 249 |
+
"character": "Create an organized plan and stick to it"
|
| 250 |
+
},
|
| 251 |
+
"red": {
|
| 252 |
+
"profile": "Take decisive action to address the situation",
|
| 253 |
+
"expectation": "Take charge and make things happen quickly",
|
| 254 |
+
"character": "Act immediately and coordinate people involved"
|
| 255 |
+
},
|
| 256 |
+
"yellow": {
|
| 257 |
+
"profile": "Explore creative possibilities and new approaches",
|
| 258 |
+
"expectation": "Consider innovative solutions and future potential",
|
| 259 |
+
"character": "Brainstorm creative ideas and possibilities"
|
| 260 |
+
}
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
# Contextual variations based on question themes
|
| 264 |
+
contextual_variations = {
|
| 265 |
+
"learning": {
|
| 266 |
+
"blue": "Study methodically and verify all information",
|
| 267 |
+
"green": "Follow the curriculum in an organized manner",
|
| 268 |
+
"red": "Jump into practical application immediately",
|
| 269 |
+
"yellow": "Explore unconventional learning methods"
|
| 270 |
+
},
|
| 271 |
+
"problem": {
|
| 272 |
+
"blue": "Analyze root causes with data",
|
| 273 |
+
"green": "Systematically troubleshoot each component",
|
| 274 |
+
"red": "Take immediate corrective action",
|
| 275 |
+
"yellow": "Find innovative workarounds"
|
| 276 |
+
},
|
| 277 |
+
"conflict": {
|
| 278 |
+
"blue": "Analyze perspectives logically",
|
| 279 |
+
"green": "Establish fair mediation process",
|
| 280 |
+
"red": "Address it directly and decisively",
|
| 281 |
+
"yellow": "Find creative compromise"
|
| 282 |
+
},
|
| 283 |
+
"money": {
|
| 284 |
+
"blue": "Analyze financial data thoroughly",
|
| 285 |
+
"green": "Budget systematically and track expenses",
|
| 286 |
+
"red": "Make decisive investment choices",
|
| 287 |
+
"yellow": "Explore unconventional earning opportunities"
|
| 288 |
+
},
|
| 289 |
+
"family": {
|
| 290 |
+
"blue": "Analyze family dynamics logically",
|
| 291 |
+
"green": "Maintain family traditions and routines",
|
| 292 |
+
"red": "Take leadership in family matters",
|
| 293 |
+
"yellow": "Introduce new family activities"
|
| 294 |
+
}
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
# Start with base options for the category
|
| 298 |
+
options = []
|
| 299 |
+
for color in COLOR_KEYS:
|
| 300 |
+
base_text = base_options[color][question_type]
|
| 301 |
+
|
| 302 |
+
# Add contextual variation if theme matches
|
| 303 |
+
for theme, variations in contextual_variations.items():
|
| 304 |
+
if theme in themes_in_question:
|
| 305 |
+
base_text = variations[color]
|
| 306 |
+
break
|
| 307 |
+
|
| 308 |
+
# Add shadow/negative aspects for realism
|
| 309 |
+
shadow_aspects = {
|
| 310 |
+
"blue": {
|
| 311 |
+
"profile": " (but can get stuck in analysis)",
|
| 312 |
+
"expectation": " (but may overanalyze)",
|
| 313 |
+
"character": " (but can be overly critical)"
|
| 314 |
+
},
|
| 315 |
+
"green": {
|
| 316 |
+
"profile": " (but can be too rigid)",
|
| 317 |
+
"expectation": " (but may create bureaucracy)",
|
| 318 |
+
"character": " (but can resist change)"
|
| 319 |
+
},
|
| 320 |
+
"red": {
|
| 321 |
+
"profile": " (but can be impulsive)",
|
| 322 |
+
"expectation": " (but may be controlling)",
|
| 323 |
+
"character": " (but can overlook details)"
|
| 324 |
+
},
|
| 325 |
+
"yellow": {
|
| 326 |
+
"profile": " (but can be unrealistic)",
|
| 327 |
+
"expectation": " (but may lack follow-through)",
|
| 328 |
+
"character": " (but can be scattered)"
|
| 329 |
+
}
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
# Only add shadow aspects occasionally (30% chance) for variety
|
| 333 |
+
if random.random() < 0.3:
|
| 334 |
+
shadow = shadow_aspects[color][question_type]
|
| 335 |
+
# Ensure we don't exceed word limit
|
| 336 |
+
if len(base_text.split()) + len(shadow.split()) <= 15:
|
| 337 |
+
base_text += shadow
|
| 338 |
+
|
| 339 |
+
options.append({
|
| 340 |
+
"text": base_text[:80], # Limit length
|
| 341 |
+
"color": color
|
| 342 |
+
})
|
| 343 |
+
|
| 344 |
+
return options
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def generate_batch_questions(themes: List[str], state: Dict, context: str = "", previous_questions: List[str] = None) -> List[Dict]:
|
| 352 |
+
# Extract ALL data from Marriage table
|
| 353 |
+
profile = state.get("profile", {})
|
| 354 |
+
user_id = profile.get("user_id")
|
| 355 |
+
|
| 356 |
+
try:
|
| 357 |
+
from database import fetch_expectation_data
|
| 358 |
+
expectation_data = fetch_expectation_data(user_id) if user_id else {}
|
| 359 |
+
except ImportError:
|
| 360 |
+
expectation_data = {}
|
| 361 |
+
|
| 362 |
+
# Extract ALL profile data from Marriage table
|
| 363 |
+
profile_data = {
|
| 364 |
+
# Personal Information
|
| 365 |
+
"full_name": profile.get("full_name", "Not specified"),
|
| 366 |
+
|
| 367 |
+
"gender": profile.get("gender", "Not specified"),
|
| 368 |
+
"current_city": profile.get("current_city", "Not specified"),
|
| 369 |
+
"country": profile.get("country", "Not specified"),
|
| 370 |
+
"marital_status": profile.get("marital_status", "Not specified"),
|
| 371 |
+
|
| 372 |
+
# Education & Career
|
| 373 |
+
"education_level": profile.get("education_level", "Not specified"),
|
| 374 |
+
"employment_status": profile.get("employment_status", "Not specified"),
|
| 375 |
+
"job_role": profile.get("job_role", "Not specified"),
|
| 376 |
+
"work_experience_years": profile.get("work_experience_years", "Not specified"),
|
| 377 |
+
"career_aspirations": profile.get("career_aspirations", "Not specified"),
|
| 378 |
+
"field_of_study": profile.get("field_of_study", "Not specified"),
|
| 379 |
+
"income_range": profile.get("income_range", "Not specified"),
|
| 380 |
+
|
| 381 |
+
# Family & Background
|
| 382 |
+
"number_of_siblings": profile.get("number_of_siblings", "Not specified"),
|
| 383 |
+
"family_type": profile.get("family_type", "Not specified"),
|
| 384 |
+
"siblings_position": profile.get("siblings_position", "Not specified"),
|
| 385 |
+
"parents_living_status": profile.get("parents_living_status", "Not specified"),
|
| 386 |
+
"live_with_parents": profile.get("live_with_parents", "Not specified"),
|
| 387 |
+
"support_parents_financially": profile.get("support_parents_financially", "Not specified"),
|
| 388 |
+
"family_communication_frequency": profile.get("family_communication_frequency", "Not specified"),
|
| 389 |
+
|
| 390 |
+
# Physical & Health
|
| 391 |
+
"height": profile.get("height", "Not specified"),
|
| 392 |
+
"skin_tone": profile.get("skin_tone", "Not specified"),
|
| 393 |
+
"blood_group": profile.get("blood_group", "Not specified"),
|
| 394 |
+
"health_constraints": profile.get("health_constraints", "Not specified"),
|
| 395 |
+
"fitness_level": profile.get("fitness_level", "Not specified"),
|
| 396 |
+
|
| 397 |
+
# Lifestyle & Habits
|
| 398 |
+
"hobbies_interests": str(profile.get("hobbies_interests", "Not specified")),
|
| 399 |
+
"conflict_approach": profile.get("conflict_approach", "Not specified"),
|
| 400 |
+
"financial_style": profile.get("financial_style", "Not specified"),
|
| 401 |
+
"food_preference": profile.get("food_preference", "Not specified"),
|
| 402 |
+
"smoking_habit": profile.get("smoking_habit", "Not specified"),
|
| 403 |
+
"alcohol_habit": profile.get("alcohol_habit", "Not specified"),
|
| 404 |
+
"daily_routine": profile.get("daily_routine", "Not specified"),
|
| 405 |
+
"own_pets": profile.get("own_pets", "Not specified"),
|
| 406 |
+
"travel_preference": profile.get("travel_preference", "Not specified"),
|
| 407 |
+
"relaxation_mode": profile.get("relaxation_mode", "Not specified"),
|
| 408 |
+
|
| 409 |
+
# Languages & Relocation
|
| 410 |
+
"languages_spoken": profile.get("languages_spoken", "Not specified"),
|
| 411 |
+
"relocation_willingness": profile.get("relocation_willingness", "Not specified"),
|
| 412 |
+
|
| 413 |
+
# Religion & Citizenship
|
| 414 |
+
"religion": profile.get("religion", "Not specified"),
|
| 415 |
+
"dual_citizenship": profile.get("dual_citizenship", "Not specified"),
|
| 416 |
+
|
| 417 |
+
# Relationship Preferences
|
| 418 |
+
"children_timeline": profile.get("children_timeline", "Not specified"),
|
| 419 |
+
"open_to_adoption": profile.get("open_to_adoption", "Not specified"),
|
| 420 |
+
"deal_breakers": profile.get("deal_breakers", "Not specified"),
|
| 421 |
+
"other_non_negotiables": profile.get("other_non_negotiables", "Not specified"),
|
| 422 |
+
"live_with_inlaws": profile.get("live_with_inlaws", "Not specified"),
|
| 423 |
+
|
| 424 |
+
# Additional Info
|
| 425 |
+
"remark": profile.get("remark", "Not specified"),
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
# Extract ALL expectation data from ExpectationResponse table
|
| 429 |
+
expectation_data_dict = {
|
| 430 |
+
# Basic Preferences
|
| 431 |
+
"pref_age_range": expectation_data.get("pref_age_range", "Not specified"),
|
| 432 |
+
"pref_height_range": expectation_data.get("pref_height_range", "Not specified"),
|
| 433 |
+
"pref_current_city": expectation_data.get("pref_current_city", "Not specified"),
|
| 434 |
+
"pref_countries": expectation_data.get("pref_countries", "Not specified"),
|
| 435 |
+
"pref_languages": expectation_data.get("pref_languages", "Not specified"),
|
| 436 |
+
"pref_education_level": expectation_data.get("pref_education_level", "Not specified"),
|
| 437 |
+
"pref_employment_status": expectation_data.get("pref_employment_status", "Not specified"),
|
| 438 |
+
|
| 439 |
+
# Health & Lifestyle
|
| 440 |
+
"health_constraints": expectation_data.get("health_constraints", "Not specified"),
|
| 441 |
+
"pref_diet": expectation_data.get("pref_diet", "Not specified"),
|
| 442 |
+
"accept_smoking": expectation_data.get("accept_smoking", "Not specified"),
|
| 443 |
+
"accept_alcohol": expectation_data.get("accept_alcohol", "Not specified"),
|
| 444 |
+
"pref_fitness": expectation_data.get("pref_fitness", "Not specified"),
|
| 445 |
+
|
| 446 |
+
# Family & Living
|
| 447 |
+
"pref_family_type": expectation_data.get("pref_family_type", "Not specified"),
|
| 448 |
+
"live_with_inlaws": expectation_data.get("live_with_inlaws", "Not specified"),
|
| 449 |
+
"children_timeline": expectation_data.get("children_timeline", "Not specified"),
|
| 450 |
+
"open_to_adoption": expectation_data.get("open_to_adoption", "Not specified"),
|
| 451 |
+
"pref_live_with_parents": expectation_data.get("pref_live_with_parents", "Not specified"),
|
| 452 |
+
"financial_support_to_parents": expectation_data.get("financial_support_to_parents", "Not specified"),
|
| 453 |
+
|
| 454 |
+
# Conflict & Finance
|
| 455 |
+
"pref_conflict_approach": expectation_data.get("pref_conflict_approach", "Not specified"),
|
| 456 |
+
"pref_financial_style": expectation_data.get("pref_financial_style", "Not specified"),
|
| 457 |
+
"pref_income_range": expectation_data.get("pref_income_range", "Not specified"),
|
| 458 |
+
|
| 459 |
+
# Values & Compatibility
|
| 460 |
+
"religion_alignment": expectation_data.get("religion_alignment", "Not specified"),
|
| 461 |
+
"pref_shared_hobbies": expectation_data.get("pref_shared_hobbies", "Not specified"),
|
| 462 |
+
"travel_pref": expectation_data.get("travel_pref", "Not specified"),
|
| 463 |
+
"pet_pref": expectation_data.get("pet_pref", "Not specified"),
|
| 464 |
+
|
| 465 |
+
# Career & Relocation
|
| 466 |
+
"pref_partner_relocation": expectation_data.get("pref_partner_relocation", "Not specified"),
|
| 467 |
+
"pref_career_aspirations": expectation_data.get("pref_career_aspirations", "Not specified"),
|
| 468 |
+
|
| 469 |
+
# Additional Preferences
|
| 470 |
+
"marital_status": expectation_data.get("marital_status", "Not specified"),
|
| 471 |
+
"skin_tone": expectation_data.get("skin_tone", "Not specified"),
|
| 472 |
+
"daily_routine": expectation_data.get("daily_routine", "Not specified"),
|
| 473 |
+
"family_communication_frequency": expectation_data.get("family_communication_frequency", "Not specified"),
|
| 474 |
+
"relaxation_mode": expectation_data.get("relaxation_mode", "Not specified"),
|
| 475 |
+
|
| 476 |
+
# Non-negotiables
|
| 477 |
+
"deal_breakers": expectation_data.get("deal_breakers", "Not specified"),
|
| 478 |
+
"other_non_negotiables": expectation_data.get("other_non_negotiables", "Not specified"),
|
| 479 |
+
|
| 480 |
+
# Summary
|
| 481 |
+
"expectation_summary": expectation_data.get("expectation_summary", "Not specified"),
|
| 482 |
+
"_mandatory_fields": expectation_data.get("_mandatory_fields", "Not specified"),
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
if CHAIN_BATCH is not None and PARSER_BATCH is not None:
|
| 486 |
+
try:
|
| 487 |
+
items: List[Dict] = []
|
| 488 |
+
|
| 489 |
+
# 1. PROFILE-BASED QUESTIONS (5 questions) - Using ALL Marriage table columns
|
| 490 |
+
profile_prompt = {
|
| 491 |
+
"state": json.dumps(state, ensure_ascii=False),
|
| 492 |
+
"themes_json": json.dumps(["profile"] * 5, ensure_ascii=False),
|
| 493 |
+
"previous_questions": json.dumps(previous_questions or [], ensure_ascii=False),
|
| 494 |
+
"format_instructions": PARSER_BATCH.get_format_instructions(),
|
| 495 |
+
"context": "Generate 5 PROFILE-BASED questions using ALL available user background information.",
|
| 496 |
+
"question_type": "profile",
|
| 497 |
+
|
| 498 |
+
# Use ALL profile data
|
| 499 |
+
"education": f"{profile_data['education_level']} | {profile_data['field_of_study']}",
|
| 500 |
+
"employment": f"{profile_data['employment_status']} | {profile_data['job_role']} ({profile_data['work_experience_years']} years)",
|
| 501 |
+
"hobbies": profile_data['hobbies_interests'],
|
| 502 |
+
"family_type": f"{profile_data['family_type']} | Siblings: {profile_data['number_of_siblings']} | Position: {profile_data['siblings_position']}",
|
| 503 |
+
"current_lifestyle": f"City: {profile_data['current_city']}, {profile_data['country']} | Height: {profile_data['height']} | Languages: {profile_data['languages_spoken']} | Religion: {profile_data['religion']} | Fitness: {profile_data['fitness_level']} | Diet: {profile_data['food_preference']} | Habits: Smoking: {profile_data['smoking_habit']}, Alcohol: {profile_data['alcohol_habit']}",
|
| 504 |
+
|
| 505 |
+
# Expectation data marked as irrelevant
|
| 506 |
+
"conflict_style": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 507 |
+
"financial_style": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 508 |
+
"income_range": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 509 |
+
"relocation_willingness": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 510 |
+
"family_values": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 511 |
+
"core_values": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 512 |
+
"lifestyle_pref": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 513 |
+
"social_pref": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 514 |
+
"ambition_pref": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 515 |
+
"deal_breakers": "IRRELEVANT_FOR_PROFILE_QUESTIONS",
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
result = CHAIN_BATCH.invoke(profile_prompt)
|
| 519 |
+
profile_items = get_items_from_result(result)
|
| 520 |
+
|
| 521 |
+
for qa in profile_items[:5]:
|
| 522 |
+
out = qa.dict() if hasattr(qa, "dict") else dict(qa)
|
| 523 |
+
out["options"] = generate_category_specific_options(
|
| 524 |
+
"profile",
|
| 525 |
+
out.get("question", ""),
|
| 526 |
+
profile_data,
|
| 527 |
+
None
|
| 528 |
+
)
|
| 529 |
+
out["source"] = "llm_profile"
|
| 530 |
+
out["question_type"] = "profile"
|
| 531 |
+
random.shuffle(out["options"])
|
| 532 |
+
items.append(out)
|
| 533 |
+
|
| 534 |
+
# 2. EXPECTATION-BASED QUESTIONS (5 questions) - Using ALL ExpectationResponse columns
|
| 535 |
+
expectation_prompt = {
|
| 536 |
+
"state": json.dumps(state, ensure_ascii=False),
|
| 537 |
+
"themes_json": json.dumps(["expectation"] * 5, ensure_ascii=False),
|
| 538 |
+
"previous_questions": json.dumps([q["question"] for q in items] + (previous_questions or []), ensure_ascii=False),
|
| 539 |
+
"format_instructions": PARSER_BATCH.get_format_instructions(),
|
| 540 |
+
"context": "Generate 5 EXPECTATION-BASED questions using ALL relationship preferences and expectations.",
|
| 541 |
+
"question_type": "expectation",
|
| 542 |
+
|
| 543 |
+
# Minimal profile context
|
| 544 |
+
"education": "Background context only",
|
| 545 |
+
"employment": "Background context only",
|
| 546 |
+
"hobbies": "Background context only",
|
| 547 |
+
"family_type": "Background context only",
|
| 548 |
+
"current_lifestyle": "General context",
|
| 549 |
+
|
| 550 |
+
# Use ALL expectation data
|
| 551 |
+
"conflict_style": f"{expectation_data_dict['pref_conflict_approach']}",
|
| 552 |
+
"financial_style": f"{expectation_data_dict['pref_financial_style']} | Income: {expectation_data_dict['pref_income_range']}",
|
| 553 |
+
"income_range": expectation_data_dict['pref_income_range'],
|
| 554 |
+
"relocation_willingness": f"{expectation_data_dict['pref_partner_relocation']}",
|
| 555 |
+
"family_values": f"{expectation_data_dict['pref_family_type']} | Live with in-laws: {expectation_data_dict['live_with_inlaws']} | Children timeline: {expectation_data_dict['children_timeline']}",
|
| 556 |
+
"core_values": f"Religion: {expectation_data_dict['religion_alignment']} | Deal breakers: {expectation_data_dict['deal_breakers']}",
|
| 557 |
+
"lifestyle_pref": f"Fitness: {expectation_data_dict['pref_fitness']} | Diet: {expectation_data_dict['pref_diet']} | Daily routine: {expectation_data_dict['daily_routine']}",
|
| 558 |
+
"social_pref": f"Hobbies: {expectation_data_dict['pref_shared_hobbies']} | Travel: {expectation_data_dict['travel_pref']} | Pets: {expectation_data_dict['pet_pref']}",
|
| 559 |
+
"ambition_pref": f"Career: {expectation_data_dict['pref_career_aspirations']} | Education: {expectation_data_dict['pref_education_level']}",
|
| 560 |
+
"deal_breakers": f"{expectation_data_dict['deal_breakers']} | Other non-negotiables: {expectation_data_dict['other_non_negotiables']}",
|
| 561 |
+
}
|
| 562 |
+
|
| 563 |
+
result = CHAIN_BATCH.invoke(expectation_prompt)
|
| 564 |
+
expectation_items = get_items_from_result(result)
|
| 565 |
+
|
| 566 |
+
for qa in expectation_items[:5]:
|
| 567 |
+
out = qa.dict() if hasattr(qa, "dict") else dict(qa)
|
| 568 |
+
out["options"] = generate_category_specific_options(
|
| 569 |
+
"expectation",
|
| 570 |
+
out.get("question", ""),
|
| 571 |
+
None,
|
| 572 |
+
expectation_data_dict
|
| 573 |
+
)
|
| 574 |
+
out["source"] = "llm_expectation"
|
| 575 |
+
out["question_type"] = "expectation"
|
| 576 |
+
random.shuffle(out["options"])
|
| 577 |
+
items.append(out)
|
| 578 |
+
|
| 579 |
+
# 3. CHARACTER-BASED QUESTIONS (10 questions) - Using data from BOTH tables
|
| 580 |
+
character_prompt = {
|
| 581 |
+
"state": json.dumps(state, ensure_ascii=False),
|
| 582 |
+
"themes_json": json.dumps(themes[:10], ensure_ascii=False),
|
| 583 |
+
"previous_questions": json.dumps([q["question"] for q in items] + (previous_questions or []), ensure_ascii=False),
|
| 584 |
+
"format_instructions": PARSER_BATCH.get_format_instructions(),
|
| 585 |
+
"context": context[:2000] + "\n\nGenerate 10 CHARACTER-BASED questions using ALL available data.",
|
| 586 |
+
"question_type": "character",
|
| 587 |
+
|
| 588 |
+
# All data from Marriage table
|
| 589 |
+
"education": profile_data['education_level'],
|
| 590 |
+
"employment": profile_data['employment_status'],
|
| 591 |
+
"hobbies": profile_data['hobbies_interests'],
|
| 592 |
+
"family_type": profile_data['family_type'],
|
| 593 |
+
"current_lifestyle": f"{profile_data['current_city']}, {profile_data['country']} | {profile_data['daily_routine']} | Relaxation: {profile_data['relaxation_mode']}",
|
| 594 |
+
|
| 595 |
+
# All data from ExpectationResponse table
|
| 596 |
+
"conflict_style": expectation_data_dict['pref_conflict_approach'],
|
| 597 |
+
"financial_style": expectation_data_dict['pref_financial_style'],
|
| 598 |
+
"income_range": expectation_data_dict['pref_income_range'],
|
| 599 |
+
"relocation_willingness": expectation_data_dict['pref_partner_relocation'],
|
| 600 |
+
"family_values": expectation_data_dict['pref_family_type'],
|
| 601 |
+
"core_values": expectation_data_dict['religion_alignment'],
|
| 602 |
+
"lifestyle_pref": expectation_data_dict['pref_fitness'],
|
| 603 |
+
"social_pref": expectation_data_dict['pref_shared_hobbies'],
|
| 604 |
+
"ambition_pref": expectation_data_dict['pref_career_aspirations'],
|
| 605 |
+
"deal_breakers": expectation_data_dict['deal_breakers'],
|
| 606 |
+
}
|
| 607 |
+
|
| 608 |
+
result = CHAIN_BATCH.invoke(character_prompt)
|
| 609 |
+
character_items = get_items_from_result(result)
|
| 610 |
+
|
| 611 |
+
for qa in character_items[:10]:
|
| 612 |
+
out = qa.dict() if hasattr(qa, "dict") else dict(qa)
|
| 613 |
+
out["options"] = generate_category_specific_options(
|
| 614 |
+
"character",
|
| 615 |
+
out.get("question", ""),
|
| 616 |
+
profile_data,
|
| 617 |
+
expectation_data_dict
|
| 618 |
+
)
|
| 619 |
+
out["source"] = "llm_character"
|
| 620 |
+
out["question_type"] = "character"
|
| 621 |
+
random.shuffle(out["options"])
|
| 622 |
+
items.append(out)
|
| 623 |
+
|
| 624 |
+
# Verify we have exactly 20 questions
|
| 625 |
+
if len(items) == 20:
|
| 626 |
+
return items[:20]
|
| 627 |
+
else:
|
| 628 |
+
# If LLM didn't generate enough, fill with fallback
|
| 629 |
+
return fill_missing_questions(items, themes, state, profile_data, expectation_data_dict, context)
|
| 630 |
+
|
| 631 |
+
except Exception as e:
|
| 632 |
+
print("LLM batch generation failed:", e)
|
| 633 |
+
return generate_fallback_with_distribution(themes, state, profile_data, expectation_data_dict, context)
|
| 634 |
+
else:
|
| 635 |
+
return generate_fallback_with_distribution(themes, state, profile_data, expectation_data_dict, context)
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def get_items_from_result(result):
|
| 641 |
+
"""Helper to extract items from LLM result"""
|
| 642 |
+
if hasattr(result, "items"):
|
| 643 |
+
return result.items
|
| 644 |
+
elif isinstance(result, dict) and "items" in result:
|
| 645 |
+
return result["items"]
|
| 646 |
+
else:
|
| 647 |
+
return []
|
| 648 |
+
|
| 649 |
+
def fill_missing_questions(current_items: List[Dict], themes: List[str], state: Dict,
|
| 650 |
+
profile_data: Dict, expectation_data: Dict, context: str = "") -> List[Dict]:
|
| 651 |
+
"""Fill missing questions to reach 20 total"""
|
| 652 |
+
items = current_items.copy()
|
| 653 |
+
|
| 654 |
+
# Count current distribution
|
| 655 |
+
profile_count = sum(1 for q in items if q.get("question_type") == "profile")
|
| 656 |
+
expectation_count = sum(1 for q in items if q.get("question_type") == "expectation")
|
| 657 |
+
character_count = sum(1 for q in items if q.get("question_type") == "character")
|
| 658 |
+
|
| 659 |
+
# Fill profile questions if needed
|
| 660 |
+
while profile_count < 5:
|
| 661 |
+
profile_q = generate_profile_question(state, profile_data)
|
| 662 |
+
items.append(profile_q)
|
| 663 |
+
profile_count += 1
|
| 664 |
+
|
| 665 |
+
# Fill expectation questions if needed
|
| 666 |
+
while expectation_count < 5:
|
| 667 |
+
expectation_q = generate_expectation_question(state, expectation_data)
|
| 668 |
+
items.append(expectation_q)
|
| 669 |
+
expectation_count += 1
|
| 670 |
+
|
| 671 |
+
# Fill character questions if needed
|
| 672 |
+
while character_count < 10:
|
| 673 |
+
theme = themes[character_count % len(themes)] if themes else "daily situation"
|
| 674 |
+
character_q = generate_character_question(theme, state)
|
| 675 |
+
items.append(character_q)
|
| 676 |
+
character_count += 1
|
| 677 |
+
|
| 678 |
+
return items[:20]
|
| 679 |
+
|
| 680 |
+
def generate_profile_question(state: Dict, profile_data: Dict) -> Dict:
|
| 681 |
+
"""Generate a single profile question"""
|
| 682 |
+
prof = state.get("profile", {})
|
| 683 |
+
name = prof.get("full_name") or "Partner"
|
| 684 |
+
|
| 685 |
+
profile_topics = [
|
| 686 |
+
("education", f"How does your educational background shape how you approach complex information?"),
|
| 687 |
+
("employment", f"What methods from your professional life do you apply to personal challenges?"),
|
| 688 |
+
("hobbies", f"How do your personal interests influence your approach to new experiences?"),
|
| 689 |
+
("family", f"What communication patterns from your family background feel most natural to you?"),
|
| 690 |
+
("background", f"How does your personal history affect your current decision-making style?")
|
| 691 |
+
]
|
| 692 |
+
|
| 693 |
+
topic_idx = len([q for q in state.get("history", []) if q.get("question_type") == "profile"])
|
| 694 |
+
if topic_idx >= len(profile_topics):
|
| 695 |
+
topic_idx = 0
|
| 696 |
+
|
| 697 |
+
topic, question = profile_topics[topic_idx]
|
| 698 |
+
|
| 699 |
+
# Generate contextual options
|
| 700 |
+
options = generate_category_specific_options("profile", question, profile_data, None)
|
| 701 |
+
random.shuffle(options)
|
| 702 |
+
|
| 703 |
+
return {
|
| 704 |
+
"question": question,
|
| 705 |
+
"options": options,
|
| 706 |
+
"source": "fallback_profile",
|
| 707 |
+
"question_type": "profile"
|
| 708 |
+
}
|
| 709 |
+
|
| 710 |
+
def generate_expectation_question(state: Dict, expectation_data: Dict) -> Dict:
|
| 711 |
+
"""Generate a single expectation question"""
|
| 712 |
+
expectation_topics = [
|
| 713 |
+
("conflict", f"When tensions arise, what's your instinctive approach to resolution?"),
|
| 714 |
+
("values", f"How do your core principles guide your everyday choices?"),
|
| 715 |
+
("finance", f"What mindset drives your approach to shared financial decisions?"),
|
| 716 |
+
("balance", f"How do you navigate between personal needs and relationship commitments?"),
|
| 717 |
+
("dealbreakers", f"What boundaries are non-negotiable for you in close relationships?")
|
| 718 |
+
]
|
| 719 |
+
|
| 720 |
+
topic_idx = len([q for q in state.get("history", []) if q.get("question_type") == "expectation"])
|
| 721 |
+
if topic_idx >= len(expectation_topics):
|
| 722 |
+
topic_idx = 0
|
| 723 |
+
|
| 724 |
+
topic, question = expectation_topics[topic_idx]
|
| 725 |
+
|
| 726 |
+
# Generate contextual options
|
| 727 |
+
options = generate_category_specific_options("expectation", question, None, expectation_data)
|
| 728 |
+
random.shuffle(options)
|
| 729 |
+
|
| 730 |
+
return {
|
| 731 |
+
"question": question,
|
| 732 |
+
"options": options,
|
| 733 |
+
"source": "fallback_expectation",
|
| 734 |
+
"question_type": "expectation"
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def generate_character_question(theme: str, state: Dict) -> Dict:
|
| 739 |
+
"""Generate a single character question"""
|
| 740 |
+
prof = state.get("profile", {})
|
| 741 |
+
name = prof.get("full_name") or "Partner"
|
| 742 |
+
|
| 743 |
+
short_theme = theme.split(" around ")[-1].strip()[:50]
|
| 744 |
+
question = f"When {short_theme}, what's your typical response?"
|
| 745 |
+
|
| 746 |
+
# Generate contextual options
|
| 747 |
+
options = generate_category_specific_options("character", question, None, None)
|
| 748 |
+
random.shuffle(options)
|
| 749 |
+
|
| 750 |
+
return {
|
| 751 |
+
"question": question,
|
| 752 |
+
"options": options,
|
| 753 |
+
"source": "fallback_character",
|
| 754 |
+
"question_type": "character"
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
def generate_fallback_with_distribution(themes: List[str], state: Dict, profile_data: Dict, expectation_data: Dict, context: str = "") -> List[Dict]:
|
| 761 |
+
"""
|
| 762 |
+
Fallback generator that enforces the 5-5-10 distribution
|
| 763 |
+
"""
|
| 764 |
+
items = []
|
| 765 |
+
prof = state.get("profile", {}) or {}
|
| 766 |
+
name = prof.get("full_name") or "Partner"
|
| 767 |
+
|
| 768 |
+
# 1. Generate 5 PROFILE-BASED questions
|
| 769 |
+
profile_sources = [
|
| 770 |
+
("education", f"How does your {profile_data['education']} background influence your approach to learning new things?"),
|
| 771 |
+
("employment", f"Given your work as {profile_data['employment']}, what problem-solving methods do you typically use?"),
|
| 772 |
+
("hobbies", f"When engaging in {profile_data['hobbies']}, how do you typically organize your activity?"),
|
| 773 |
+
("family_type", f"Growing up in a {profile_data['family_type']} family, what communication patterns feel most natural to you?"),
|
| 774 |
+
("current_city", f"Living in {profile_data['current_city']}, how do you adapt to your daily environment?")
|
| 775 |
+
]
|
| 776 |
+
|
| 777 |
+
for source, question in profile_sources:
|
| 778 |
+
opts = [
|
| 779 |
+
{"text": "Analyze data and research thoroughly before deciding", "color": "blue"},
|
| 780 |
+
{"text": "Create a structured plan and follow established procedures", "color": "green"},
|
| 781 |
+
{"text": "Take immediate action and coordinate with people involved", "color": "red"},
|
| 782 |
+
{"text": "Brainstorm creative approaches and explore possibilities", "color": "yellow"},
|
| 783 |
+
]
|
| 784 |
+
random.shuffle(opts)
|
| 785 |
+
items.append({
|
| 786 |
+
"question": question,
|
| 787 |
+
"options": opts,
|
| 788 |
+
"source": "fallback_profile",
|
| 789 |
+
"question_type": "profile"
|
| 790 |
+
})
|
| 791 |
+
|
| 792 |
+
# 2. Generate 5 EXPECTATION-BASED questions
|
| 793 |
+
expectation_sources = [
|
| 794 |
+
("conflict_style", f"When facing disagreement ({expectation_data['conflict_style']}), how do you typically respond?"),
|
| 795 |
+
("financial_style", f"Regarding money matters ({expectation_data['financial_style']}), what's your immediate reaction to financial decisions?"),
|
| 796 |
+
("family_values", f"Considering your family values ({expectation_data['family_values']}), how do you approach family-related decisions?"),
|
| 797 |
+
("work_life", f"With your work-life preference ({expectation_data['lifestyle_pref']}), how do you manage daily priorities?"),
|
| 798 |
+
("deal_breakers", f"Given your deal breakers ({expectation_data['deal_breakers'][:50] if expectation_data['deal_breakers'] else 'certain boundaries'}), how do you establish personal limits?")
|
| 799 |
+
]
|
| 800 |
+
|
| 801 |
+
for source, question in expectation_sources:
|
| 802 |
+
opts = [
|
| 803 |
+
{"text": "Gather all relevant information and analyze carefully", "color": "blue"},
|
| 804 |
+
{"text": "Follow a systematic process to evaluate options", "color": "green"},
|
| 805 |
+
{"text": "Make a quick decision and implement immediately", "color": "red"},
|
| 806 |
+
{"text": "Consider innovative solutions and future possibilities", "color": "yellow"},
|
| 807 |
+
]
|
| 808 |
+
random.shuffle(opts)
|
| 809 |
+
items.append({
|
| 810 |
+
"question": question,
|
| 811 |
+
"options": opts,
|
| 812 |
+
"source": "fallback_expectation",
|
| 813 |
+
"question_type": "expectation"
|
| 814 |
+
})
|
| 815 |
+
|
| 816 |
+
# 3. Generate 10 CHARACTER-BASED questions from themes
|
| 817 |
+
for i, theme in enumerate(themes[:10]): # Use first 10 themes
|
| 818 |
+
short = theme.split(" around ")[-1].strip()
|
| 819 |
+
question = f"When dealing with {short}, what is your typical approach?"
|
| 820 |
+
|
| 821 |
+
opts = [
|
| 822 |
+
{"text": "Research facts and analyze details before acting", "color": "blue"},
|
| 823 |
+
{"text": "Develop a step-by-step plan and follow it", "color": "green"},
|
| 824 |
+
{"text": "Take charge and coordinate people to solve it", "color": "red"},
|
| 825 |
+
{"text": "Explore creative ideas and unconventional solutions", "color": "yellow"},
|
| 826 |
+
]
|
| 827 |
+
random.shuffle(opts)
|
| 828 |
+
items.append({
|
| 829 |
+
"question": question,
|
| 830 |
+
"options": opts,
|
| 831 |
+
"source": "fallback_character",
|
| 832 |
+
"question_type": "character"
|
| 833 |
+
})
|
| 834 |
+
|
| 835 |
+
# Ensure we have exactly 20 questions
|
| 836 |
+
return items[:20]
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
class SessionState:
|
| 840 |
+
def __init__(self, n_questions: int, batch_size: int, domain: str = "general", role: Optional[str] = None, profile: Optional[Dict] = None):
|
| 841 |
+
domain = (domain or role or "general").lower()
|
| 842 |
+
self.domain = domain if domain in DOMAINS else "general"
|
| 843 |
+
self.role = (role or self.domain)
|
| 844 |
+
self.profile = profile or {}
|
| 845 |
+
self.n_questions = max(1, min(n_questions, MAX_QUESTIONS))
|
| 846 |
+
self.batch_size = max(1, batch_size)
|
| 847 |
+
self.asked = 0
|
| 848 |
+
self.color_counts = {c: 0 for c in COLOR_KEYS}
|
| 849 |
+
self.history: List[Dict] = []
|
| 850 |
+
self.queue: List[Dict] = []
|
| 851 |
+
self.finished = False
|
| 852 |
+
self.used_topics: List[str] = []
|
| 853 |
+
self.history_of_questions: List[str] = [] # Add this line to track question texts
|
| 854 |
+
|
| 855 |
+
def to_min_state(self) -> Dict:
|
| 856 |
+
total = sum(self.color_counts.values()) or 1
|
| 857 |
+
mix_percentages = {k: round((v / total) * 100, 2) for k, v in self.color_counts.items()}
|
| 858 |
+
dominant = max(self.color_counts, key=self.color_counts.get) if total else None
|
| 859 |
+
return {
|
| 860 |
+
"asked": self.asked,
|
| 861 |
+
"dominant": dominant,
|
| 862 |
+
"mix": mix_percentages,
|
| 863 |
+
"domain": self.domain,
|
| 864 |
+
"role": self.role,
|
| 865 |
+
"profile": summarize_profile(self.profile),
|
| 866 |
+
}
|
| 867 |
+
|
| 868 |
+
def remaining(self) -> int:
|
| 869 |
+
return self.n_questions - self.asked
|
| 870 |
+
|
| 871 |
+
SESSIONS_FILE = os.getenv("PYMATCH_SESSIONS_FILE", "sessions.json")
|
| 872 |
+
_sessions_lock = threading.Lock()
|
| 873 |
+
SESSIONS: Dict[str, SessionState] = {}
|
| 874 |
+
|
| 875 |
+
def save_sessions():
|
| 876 |
+
try:
|
| 877 |
+
with _sessions_lock:
|
| 878 |
+
serializable = {sid: s.__dict__ for sid, s in SESSIONS.items()}
|
| 879 |
+
tmp = SESSIONS_FILE + ".tmp"
|
| 880 |
+
with open(tmp, "w", encoding="utf-8") as f:
|
| 881 |
+
json.dump(serializable, f, ensure_ascii=False, indent=2, default=str)
|
| 882 |
+
os.replace(tmp, SESSIONS_FILE)
|
| 883 |
+
except Exception as e:
|
| 884 |
+
print("Failed to save sessions:", e)
|
| 885 |
+
|
| 886 |
+
def persist_final_progress(user_id: Optional[str], role: str, mix: Dict[str, float]) -> bool:
|
| 887 |
+
from database import get_db_connection
|
| 888 |
+
from config import PROGRESS_TBL
|
| 889 |
+
|
| 890 |
+
llm_id = str(uuid.uuid4())
|
| 891 |
+
blue = float(mix.get("blue", 0.0))
|
| 892 |
+
green = float(mix.get("green", 0.0))
|
| 893 |
+
yellow = float(mix.get("yellow", 0.0))
|
| 894 |
+
red = float(mix.get("red", 0.0))
|
| 895 |
+
try:
|
| 896 |
+
conn = get_db_connection()
|
| 897 |
+
cur = conn.cursor()
|
| 898 |
+
# Try with llm_id; if identity error, retry without it
|
| 899 |
+
try:
|
| 900 |
+
cur.execute(f"""
|
| 901 |
+
INSERT INTO [dbo].[{PROGRESS_TBL}]
|
| 902 |
+
([llm_id],[user_id],[role],[blue],[green],[yellow],[red],[created_at])
|
| 903 |
+
VALUES (?,?,?,?,?,?,?,SYSUTCDATETIME())
|
| 904 |
+
""", (llm_id, str(user_id) if user_id is not None else None, role, blue, green, yellow, red))
|
| 905 |
+
conn.commit()
|
| 906 |
+
return True
|
| 907 |
+
except pyodbc.Error as e:
|
| 908 |
+
if "IDENTITY_INSERT" in str(e) or "(544)" in str(e):
|
| 909 |
+
cur.execute(f"""
|
| 910 |
+
INSERT INTO [dbo].[{PROGRESS_TBL}]
|
| 911 |
+
([user_id],[role],[blue],[green],[yellow],[red],[created_at])
|
| 912 |
+
VALUES (?,?,?,?,?,?,SYSUTCDATETIME())
|
| 913 |
+
""", (str(user_id) if user_id is not None else None, role, blue, green, yellow, red))
|
| 914 |
+
conn.commit()
|
| 915 |
+
return True
|
| 916 |
+
else:
|
| 917 |
+
print("Persist failed:", e)
|
| 918 |
+
return False
|
| 919 |
+
except Exception as ex:
|
| 920 |
+
print("Persist final progress failed:", ex)
|
| 921 |
+
return False
|
| 922 |
+
finally:
|
| 923 |
+
try: conn.close()
|
| 924 |
+
except: pass
|
| 925 |
+
|
| 926 |
+
def choose_themes(sess, k: int) -> List[str]:
|
| 927 |
+
"""
|
| 928 |
+
Instead of generic topic banks, use FAISS to retrieve text chunks from the document.
|
| 929 |
+
"""
|
| 930 |
+
try:
|
| 931 |
+
from faiss_service import HAS_FAISS, FAISS_INDEX, TEXT_CHUNKS
|
| 932 |
+
|
| 933 |
+
if HAS_FAISS and FAISS_INDEX is not None and TEXT_CHUNKS:
|
| 934 |
+
# Just grab k random chunks from the indexed document
|
| 935 |
+
selected = random.sample(TEXT_CHUNKS, min(k, len(TEXT_CHUNKS)))
|
| 936 |
+
# Wrap them as "themes" but really they're just context
|
| 937 |
+
return selected
|
| 938 |
+
except ImportError:
|
| 939 |
+
pass
|
| 940 |
+
|
| 941 |
+
# fallback: use generic themes
|
| 942 |
+
fallback_themes = [
|
| 943 |
+
"communication style", "conflict resolution", "decision making",
|
| 944 |
+
"problem solving", "team collaboration", "personal values",
|
| 945 |
+
"work habits", "social interaction", "stress management",
|
| 946 |
+
"goal setting", "time management", "relationship dynamics"
|
| 947 |
+
]
|
| 948 |
+
return random.sample(fallback_themes, min(k, len(fallback_themes)))
|
matching_functions.py
ADDED
|
@@ -0,0 +1,1112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# matching_functions.py
|
| 2 |
+
import json
|
| 3 |
+
import random
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import numpy as np
|
| 6 |
+
from datetime import date, datetime
|
| 7 |
+
from difflib import SequenceMatcher
|
| 8 |
+
from typing import Dict, List, Optional, Tuple
|
| 9 |
+
from sqlalchemy import func
|
| 10 |
+
from models import ExpectationResponse, Marriage, LLMGeneratedQuestions, Users, db
|
| 11 |
+
from character_functions import calculate_character_similarity
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def values_match(expect_value, profile_value, field_name):
|
| 15 |
+
"""Check if expectation value matches profile value with special handling for all fields"""
|
| 16 |
+
# Handle None values before using pd.isna
|
| 17 |
+
if expect_value is None or (hasattr(expect_value, 'isna') and pd.isna(expect_value)) or expect_value in ['', 'No preference', 'Any', 'All']:
|
| 18 |
+
return True # No preference means match with any value
|
| 19 |
+
|
| 20 |
+
# Convert to string safely
|
| 21 |
+
expect_str = str(expect_value).lower().strip() if expect_value is not None else ""
|
| 22 |
+
profile_str = str(profile_value).lower().strip() if profile_value is not None else ""
|
| 23 |
+
|
| 24 |
+
# Handle empty profile values
|
| 25 |
+
if profile_value is None or (hasattr(profile_value, 'isna') and pd.isna(profile_value)) or profile_str in ['', 'none', 'null']:
|
| 26 |
+
return False
|
| 27 |
+
|
| 28 |
+
# 🚨 FIELD-SPECIFIC MATCHING LOGIC
|
| 29 |
+
|
| 30 |
+
if field_name == 'pref_age_range':
|
| 31 |
+
try:
|
| 32 |
+
if '-' in expect_str and profile_value:
|
| 33 |
+
min_age, max_age = map(int, expect_str.split('-'))
|
| 34 |
+
from datetime import date, datetime
|
| 35 |
+
|
| 36 |
+
# 🧠 Handle both string and datetime.date types
|
| 37 |
+
if isinstance(profile_value, date):
|
| 38 |
+
birth_date = profile_value
|
| 39 |
+
elif isinstance(profile_value, str):
|
| 40 |
+
# Try common formats
|
| 41 |
+
try:
|
| 42 |
+
birth_date = datetime.strptime(profile_value, "%Y-%m-%d").date()
|
| 43 |
+
except ValueError:
|
| 44 |
+
birth_date = datetime.strptime(profile_value, "%d-%m-%Y").date()
|
| 45 |
+
else:
|
| 46 |
+
print(f"⚠️ Unsupported date type: {type(profile_value)}")
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
# ✅ Calculate age accurately
|
| 50 |
+
today = date.today()
|
| 51 |
+
age = today.year - birth_date.year - (
|
| 52 |
+
(today.month, today.day) < (birth_date.month, birth_date.day)
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# ✅ Inclusive range with ±1 tolerance
|
| 56 |
+
if (min_age - 1) <= age <= (max_age + 1):
|
| 57 |
+
return True
|
| 58 |
+
else:
|
| 59 |
+
return False
|
| 60 |
+
return True
|
| 61 |
+
except Exception as e:
|
| 62 |
+
print(f"⚠️ Age parsing error: {e} for {profile_value}")
|
| 63 |
+
return False
|
| 64 |
+
|
| 65 |
+
elif field_name == 'pref_height_range':
|
| 66 |
+
try:
|
| 67 |
+
cleaned = expect_str.replace('cm', '').replace(' ', '').lower()
|
| 68 |
+
profile_height = int(profile_str.replace('cm', '').replace(' ', ''))
|
| 69 |
+
|
| 70 |
+
# 190+
|
| 71 |
+
if cleaned.endswith('+'):
|
| 72 |
+
base = int(cleaned.replace('+', ''))
|
| 73 |
+
return profile_height >= base
|
| 74 |
+
|
| 75 |
+
# 181-189
|
| 76 |
+
if '-' in cleaned:
|
| 77 |
+
min_h, max_h = map(int, cleaned.split('-'))
|
| 78 |
+
return min_h <= profile_height <= max_h
|
| 79 |
+
|
| 80 |
+
# Single value
|
| 81 |
+
return profile_height == int(cleaned)
|
| 82 |
+
|
| 83 |
+
except:
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
# 3. City matching (pref_current_city vs current_city)
|
| 87 |
+
elif field_name == 'pref_current_city':
|
| 88 |
+
pref_cities = [city.strip().lower() for city in expect_str.split(',')]
|
| 89 |
+
return profile_str in pref_cities
|
| 90 |
+
|
| 91 |
+
# 4. Country matching (pref_countries vs country)
|
| 92 |
+
elif field_name == 'pref_countries':
|
| 93 |
+
try:
|
| 94 |
+
# Handle None/empty values
|
| 95 |
+
if not expect_str or not profile_str:
|
| 96 |
+
return False
|
| 97 |
+
|
| 98 |
+
# Normalise expectation values
|
| 99 |
+
pref_countries = [c.strip().lower() for c in str(expect_str).split(',') if c.strip()]
|
| 100 |
+
|
| 101 |
+
# If user selected No Preference → auto match
|
| 102 |
+
if 'no preference' in pref_countries:
|
| 103 |
+
return True
|
| 104 |
+
|
| 105 |
+
# Normalise profile value
|
| 106 |
+
profile_country = str(profile_str).lower().strip()
|
| 107 |
+
|
| 108 |
+
return profile_country in pref_countries
|
| 109 |
+
|
| 110 |
+
except Exception as e:
|
| 111 |
+
print(f"Error in country matching: {e}")
|
| 112 |
+
return False
|
| 113 |
+
|
| 114 |
+
# 5. Languages matching (pref_languages vs languages_spoken)
|
| 115 |
+
elif field_name == 'pref_languages':
|
| 116 |
+
pref_langs = [lang.strip().lower() for lang in expect_str.split(',')]
|
| 117 |
+
profile_langs = [lang.strip().lower() for lang in profile_str.split(',')]
|
| 118 |
+
return any(lang in profile_langs for lang in pref_langs)
|
| 119 |
+
|
| 120 |
+
# 6. Health Constraints matching
|
| 121 |
+
elif field_name == 'health_constraints':
|
| 122 |
+
health_mapping = {
|
| 123 |
+
'healthy': ['none', 'healthy'],
|
| 124 |
+
'minor': ['minor'],
|
| 125 |
+
'chronic': ['chronic'],
|
| 126 |
+
'allergies': ['allergies']
|
| 127 |
+
}
|
| 128 |
+
for exp_health, profile_options in health_mapping.items():
|
| 129 |
+
if expect_str == exp_health:
|
| 130 |
+
return profile_str in profile_options
|
| 131 |
+
return expect_str == profile_str
|
| 132 |
+
|
| 133 |
+
# 7. Diet matching - STRICT EXACT MATCHING
|
| 134 |
+
elif field_name == 'pref_diet':
|
| 135 |
+
diet_options = {
|
| 136 |
+
'vegetarian': ['vegetarian'],
|
| 137 |
+
'non-vegetarian': ['non-vegetarian'],
|
| 138 |
+
'eggetarian': ['eggetarian']
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
expect_clean = expect_str.replace('-', '').replace(' ', '')
|
| 142 |
+
profile_clean = profile_str.replace('-', '').replace(' ', '')
|
| 143 |
+
|
| 144 |
+
if expect_clean == profile_clean:
|
| 145 |
+
return True
|
| 146 |
+
|
| 147 |
+
for diet_type, variations in diet_options.items():
|
| 148 |
+
expect_variations = [v.replace('-', '').replace(' ', '') for v in variations]
|
| 149 |
+
profile_variations = [v.replace('-', '').replace(' ', '') for v in variations]
|
| 150 |
+
|
| 151 |
+
if expect_clean in expect_variations:
|
| 152 |
+
return profile_clean in profile_variations
|
| 153 |
+
|
| 154 |
+
return False
|
| 155 |
+
|
| 156 |
+
# 8. Smoking matching
|
| 157 |
+
elif field_name == 'accept_smoking':
|
| 158 |
+
smoking_mapping = {
|
| 159 |
+
'never': ['no'],
|
| 160 |
+
'no preference': ['yes', 'no', 'occasionally'],
|
| 161 |
+
'occasionally': ['occasionally', 'yes']
|
| 162 |
+
}
|
| 163 |
+
for exp_option, profile_options in smoking_mapping.items():
|
| 164 |
+
if expect_str == exp_option:
|
| 165 |
+
return profile_str in profile_options
|
| 166 |
+
return expect_str == profile_str
|
| 167 |
+
|
| 168 |
+
# 9. Alcohol matching
|
| 169 |
+
elif field_name == 'accept_alcohol':
|
| 170 |
+
alcohol_mapping = {
|
| 171 |
+
'never': ['no'],
|
| 172 |
+
'no preference': ['yes', 'no', 'occasionally'],
|
| 173 |
+
'occasionally': ['occasionally', 'yes']
|
| 174 |
+
}
|
| 175 |
+
for exp_option, profile_options in alcohol_mapping.items():
|
| 176 |
+
if expect_str == exp_option:
|
| 177 |
+
return profile_str in profile_options
|
| 178 |
+
return expect_str == profile_str
|
| 179 |
+
|
| 180 |
+
# 10. Fitness matching
|
| 181 |
+
elif field_name == 'pref_fitness':
|
| 182 |
+
fitness_mapping = {
|
| 183 |
+
'low': ['low'],
|
| 184 |
+
'moderate': ['moderate'],
|
| 185 |
+
'high': ['high'],
|
| 186 |
+
'no preference': ['low', 'moderate', 'high']
|
| 187 |
+
}
|
| 188 |
+
for exp_level, profile_options in fitness_mapping.items():
|
| 189 |
+
if expect_str == exp_level:
|
| 190 |
+
return profile_str in profile_options
|
| 191 |
+
return expect_str == profile_str
|
| 192 |
+
|
| 193 |
+
# 11. Family Type matching
|
| 194 |
+
elif field_name == 'pref_family_type':
|
| 195 |
+
family_mapping = {
|
| 196 |
+
'nuclear': ['nuclear'],
|
| 197 |
+
'joint': ['joint'],
|
| 198 |
+
'extended': ['extended'],
|
| 199 |
+
'no preference': ['nuclear', 'joint', 'extended']
|
| 200 |
+
}
|
| 201 |
+
for exp_type, profile_options in family_mapping.items():
|
| 202 |
+
if expect_str == exp_type:
|
| 203 |
+
return profile_str in profile_options
|
| 204 |
+
return expect_str == profile_str
|
| 205 |
+
|
| 206 |
+
# 12. Live with In-laws matching
|
| 207 |
+
elif field_name == 'live_with_inlaws':
|
| 208 |
+
inlaw_mapping = {
|
| 209 |
+
'yes': ['yes'],
|
| 210 |
+
'no': ['no'],
|
| 211 |
+
'maybe': ['maybe'],
|
| 212 |
+
'no preference': ['yes', 'no', 'maybe']
|
| 213 |
+
}
|
| 214 |
+
for exp_option, profile_options in inlaw_mapping.items():
|
| 215 |
+
if expect_str == exp_option:
|
| 216 |
+
return profile_str in profile_options
|
| 217 |
+
return expect_str == profile_str
|
| 218 |
+
|
| 219 |
+
# 13. Children Timeline matching
|
| 220 |
+
elif field_name == 'children_timeline':
|
| 221 |
+
timeline_mapping = {
|
| 222 |
+
'within 1 year': ['within 1 year'],
|
| 223 |
+
'1-3 years': ['1-3 years'],
|
| 224 |
+
'after 3 years': ['after 3 years'],
|
| 225 |
+
'not planning': ['not planning', 'no preference'],
|
| 226 |
+
'no preference': ['within 1 year', '1-3 years', 'after 3 years', 'not planning', 'no preference']
|
| 227 |
+
}
|
| 228 |
+
for exp_timeline, profile_options in timeline_mapping.items():
|
| 229 |
+
if expect_str == exp_timeline:
|
| 230 |
+
return profile_str in profile_options
|
| 231 |
+
return expect_str == profile_str
|
| 232 |
+
|
| 233 |
+
# 14. Open to Adoption matching
|
| 234 |
+
elif field_name == 'open_to_adoption':
|
| 235 |
+
adoption_mapping = {
|
| 236 |
+
'yes': ['yes'],
|
| 237 |
+
'no': ['no'],
|
| 238 |
+
'maybe': ['maybe'],
|
| 239 |
+
'no preference': ['yes', 'no', 'maybe']
|
| 240 |
+
}
|
| 241 |
+
for exp_option, profile_options in adoption_mapping.items():
|
| 242 |
+
if expect_str == exp_option:
|
| 243 |
+
return profile_str in profile_options
|
| 244 |
+
return expect_str == profile_str
|
| 245 |
+
|
| 246 |
+
# 15. Conflict Approach matching
|
| 247 |
+
elif field_name == 'pref_conflict_approach':
|
| 248 |
+
conflict_mapping = {
|
| 249 |
+
'discuss calmly': ['discuss calmly'],
|
| 250 |
+
'problem-solving': ['problem-solving'],
|
| 251 |
+
'compromise': ['compromise'],
|
| 252 |
+
'avoid': ['avoid'],
|
| 253 |
+
'decide fast': ['decide fast'],
|
| 254 |
+
'no preference': ['discuss calmly', 'problem-solving', 'compromise', 'avoid', 'decide fast']
|
| 255 |
+
}
|
| 256 |
+
for exp_approach, profile_options in conflict_mapping.items():
|
| 257 |
+
if expect_str == exp_approach:
|
| 258 |
+
return profile_str in profile_options
|
| 259 |
+
return expect_str == profile_str
|
| 260 |
+
|
| 261 |
+
# 16. Financial Style matching
|
| 262 |
+
elif field_name == 'pref_financial_style':
|
| 263 |
+
financial_mapping = {
|
| 264 |
+
'budget-oriented': ['budget-oriented'],
|
| 265 |
+
'spend-oriented': ['spend-oriented'],
|
| 266 |
+
'balanced': ['balanced'],
|
| 267 |
+
'no preference': ['budget-oriented', 'spend-oriented', 'balanced']
|
| 268 |
+
}
|
| 269 |
+
for exp_style, profile_options in financial_mapping.items():
|
| 270 |
+
if expect_str == exp_style:
|
| 271 |
+
return profile_str in profile_options
|
| 272 |
+
return expect_str == profile_str
|
| 273 |
+
|
| 274 |
+
# 17. Religion matching - Comprehensive version
|
| 275 |
+
elif field_name in ['pref_religion', 'religion_alignment', 'religion']:
|
| 276 |
+
# Handle "No preference" case
|
| 277 |
+
if expect_str in ['no preference', 'any', 'all']:
|
| 278 |
+
return True
|
| 279 |
+
|
| 280 |
+
# Split expected religions (comma-separated)
|
| 281 |
+
expected_religions = [religion.strip().lower() for religion in expect_str.split(',')]
|
| 282 |
+
profile_religion = profile_str.lower().strip()
|
| 283 |
+
|
| 284 |
+
# Handle cases where profile has multiple religions too
|
| 285 |
+
profile_religions = [religion.strip().lower() for religion in profile_str.split(',')]
|
| 286 |
+
|
| 287 |
+
# Check if any profile religion matches any expected religion
|
| 288 |
+
return any(religion in expected_religions for religion in profile_religions)
|
| 289 |
+
|
| 290 |
+
# 18. Income Range matching
|
| 291 |
+
elif field_name == 'pref_income_range':
|
| 292 |
+
if expect_str.lower() == 'prefer not to say' or profile_str.lower() == 'prefer not to say':
|
| 293 |
+
return True
|
| 294 |
+
if '-' in expect_str and '-' in profile_str:
|
| 295 |
+
try:
|
| 296 |
+
exp_min, exp_max = map(lambda x: int(x.replace('₹', '').replace(',', '').strip()), expect_str.split('-'))
|
| 297 |
+
prof_min, prof_max = map(lambda x: int(x.replace('₹', '').replace(',', '').strip()), profile_str.split('-'))
|
| 298 |
+
# Check if ranges overlap
|
| 299 |
+
return not (prof_max < exp_min or prof_min > exp_max)
|
| 300 |
+
except (ValueError, AttributeError):
|
| 301 |
+
pass
|
| 302 |
+
return True
|
| 303 |
+
|
| 304 |
+
# 19. Education Level matching
|
| 305 |
+
elif field_name == 'pref_education_level':
|
| 306 |
+
education_mapping = {
|
| 307 |
+
'doctorate': ['doctorate', 'phd'],
|
| 308 |
+
'master': ['master', 'masters', 'postgraduate'],
|
| 309 |
+
'bachelor': ['bachelor', 'bachelors', 'undergraduate'],
|
| 310 |
+
'diploma': ['diploma', 'certificate'],
|
| 311 |
+
'school': ['school', 'secondary', 'higher secondary'],
|
| 312 |
+
'no preference': ['doctorate', 'master', 'bachelor', 'diploma', 'school']
|
| 313 |
+
}
|
| 314 |
+
for exp_level, profile_options in education_mapping.items():
|
| 315 |
+
if expect_str == exp_level:
|
| 316 |
+
return any(option in profile_str for option in profile_options)
|
| 317 |
+
return any(option in profile_str for option in education_mapping.get(expect_str, [expect_str]))
|
| 318 |
+
|
| 319 |
+
# 20. Employment Status matching
|
| 320 |
+
elif field_name == 'pref_employment_status':
|
| 321 |
+
employment_mapping = {
|
| 322 |
+
'employed': ['Employed'],
|
| 323 |
+
'self-employed': ['Self-employed'],
|
| 324 |
+
'unemployed': ['Unemployed'],
|
| 325 |
+
'freelancer': ['Freelancer'],
|
| 326 |
+
'government employee': ['Government employee'],
|
| 327 |
+
'no preference': ['Employed', 'Self-employed', 'Unemployed', 'Freelancer', 'Government employee']
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
# Handle "no preference" case
|
| 331 |
+
if expect_str == 'no preference':
|
| 332 |
+
return True
|
| 333 |
+
|
| 334 |
+
# Get expected options
|
| 335 |
+
expected_options = employment_mapping.get(expect_str, [expect_str])
|
| 336 |
+
|
| 337 |
+
# Exact match comparison (case-insensitive)
|
| 338 |
+
profile_clean = profile_str.strip().lower()
|
| 339 |
+
return any(profile_clean == option.lower() for option in expected_options)
|
| 340 |
+
|
| 341 |
+
# 21. Travel Preference matching
|
| 342 |
+
elif field_name == 'travel_pref':
|
| 343 |
+
travel_mapping = {
|
| 344 |
+
'frequent traveler': ['frequent traveler'],
|
| 345 |
+
'occasional traveler': ['occasional traveler'],
|
| 346 |
+
'homebody': ['homebody'],
|
| 347 |
+
'no preference': ['frequent traveler', 'occasional traveler', 'homebody']
|
| 348 |
+
}
|
| 349 |
+
for exp_travel, profile_options in travel_mapping.items():
|
| 350 |
+
if expect_str == exp_travel:
|
| 351 |
+
return profile_str in profile_options
|
| 352 |
+
return expect_str == profile_str
|
| 353 |
+
|
| 354 |
+
# 22. Pet Preference matching
|
| 355 |
+
elif field_name == 'pet_pref':
|
| 356 |
+
pet_mapping = {
|
| 357 |
+
'open to pets': ['yes'],
|
| 358 |
+
'must like pets': ['yes'],
|
| 359 |
+
'no pets': ['no'],
|
| 360 |
+
'no preference': ['yes', 'no']
|
| 361 |
+
}
|
| 362 |
+
for exp_pet, profile_options in pet_mapping.items():
|
| 363 |
+
if expect_str == exp_pet:
|
| 364 |
+
return profile_str in profile_options
|
| 365 |
+
return expect_str == profile_str
|
| 366 |
+
|
| 367 |
+
# 23. Deal Breakers - Complex logic (check if profile has any deal breakers)
|
| 368 |
+
elif field_name == 'deal_breakers':
|
| 369 |
+
if pd.isna(expect_value) or expect_str in ['', 'none']:
|
| 370 |
+
return True
|
| 371 |
+
|
| 372 |
+
# What profiles actually track
|
| 373 |
+
PROFILE_DEAL_BREAKERS = {'smoking', 'different religion', 'alcohol',
|
| 374 |
+
'financial irresponsibility', 'no desire for children'}
|
| 375 |
+
|
| 376 |
+
expect_breakers = {breaker.strip().lower() for breaker in expect_str.split(',')}
|
| 377 |
+
|
| 378 |
+
# If expectation includes untrackable deal breakers → NO MATCH
|
| 379 |
+
if not expect_breakers.issubset(PROFILE_DEAL_BREAKERS):
|
| 380 |
+
return False
|
| 381 |
+
|
| 382 |
+
# Check against actual profile data
|
| 383 |
+
if pd.isna(profile_value) or not str(profile_value).strip():
|
| 384 |
+
profile_breakers = set()
|
| 385 |
+
else:
|
| 386 |
+
profile_breakers = {breaker.strip().lower() for breaker in str(profile_value).split(',')}
|
| 387 |
+
|
| 388 |
+
# No match if profile has any of the expected deal breakers
|
| 389 |
+
return len(expect_breakers.intersection(profile_breakers)) == 0
|
| 390 |
+
|
| 391 |
+
# 24. Daily Routine matching
|
| 392 |
+
elif field_name == 'daily_routine':
|
| 393 |
+
routine_mapping = {
|
| 394 |
+
'early riser': ['early riser'],
|
| 395 |
+
'night owl': ['night owl'],
|
| 396 |
+
'balanced': ['balanced'],
|
| 397 |
+
'no preference': ['early riser', 'night owl', 'balanced']
|
| 398 |
+
}
|
| 399 |
+
for exp_routine, profile_options in routine_mapping.items():
|
| 400 |
+
if expect_str == exp_routine:
|
| 401 |
+
return profile_str in profile_options
|
| 402 |
+
return expect_str == profile_str
|
| 403 |
+
|
| 404 |
+
# 25. Family Communication Frequency matching
|
| 405 |
+
elif field_name == 'family_communication_frequency':
|
| 406 |
+
comm_mapping = {
|
| 407 |
+
'daily': ['daily'],
|
| 408 |
+
'weekly': ['weekly'],
|
| 409 |
+
'monthly': ['monthly'],
|
| 410 |
+
'occasionally': ['occasionally'],
|
| 411 |
+
'no preference': ['daily', 'weekly', 'monthly', 'occasionally']
|
| 412 |
+
}
|
| 413 |
+
for exp_freq, profile_options in comm_mapping.items():
|
| 414 |
+
if expect_str == exp_freq:
|
| 415 |
+
return profile_str in profile_options
|
| 416 |
+
return expect_str == profile_str
|
| 417 |
+
|
| 418 |
+
# 26. pref_shared_hobbies
|
| 419 |
+
elif field_name == "pref_shared_hobbies":
|
| 420 |
+
# Expectation list (split by comma)
|
| 421 |
+
expect_list = [x.strip().lower() for x in expect_str.split(",") if x.strip()]
|
| 422 |
+
|
| 423 |
+
# Profile list
|
| 424 |
+
profile_list = [x.strip().lower() for x in profile_str.split(",") if x.strip()]
|
| 425 |
+
|
| 426 |
+
# ANY overlap → MATCH
|
| 427 |
+
return any(h in profile_list for h in expect_list)
|
| 428 |
+
|
| 429 |
+
# 27. pref_partner_relocation
|
| 430 |
+
elif field_name == 'pref_partner_relocation':
|
| 431 |
+
relocation_mapping = {
|
| 432 |
+
'yes': ['yes'],
|
| 433 |
+
'no': ['no'],
|
| 434 |
+
'maybe': ['maybe'],
|
| 435 |
+
'no preference': ['yes', 'no', 'maybe']
|
| 436 |
+
}
|
| 437 |
+
for exp_option, profile_options in relocation_mapping.items():
|
| 438 |
+
if expect_str == exp_option:
|
| 439 |
+
return profile_str in profile_options
|
| 440 |
+
return expect_str == profile_str
|
| 441 |
+
|
| 442 |
+
# 28. pref_live_with_parents
|
| 443 |
+
elif field_name == 'pref_live_with_parents':
|
| 444 |
+
live_mapping = {
|
| 445 |
+
'yes': ['yes'],
|
| 446 |
+
'no': ['no'],
|
| 447 |
+
'maybe': ['maybe'],
|
| 448 |
+
'no preference': ['yes', 'no', 'maybe']
|
| 449 |
+
}
|
| 450 |
+
for exp_option, profile_options in live_mapping.items():
|
| 451 |
+
if expect_str == exp_option:
|
| 452 |
+
return profile_str in profile_options
|
| 453 |
+
return expect_str == profile_str
|
| 454 |
+
|
| 455 |
+
# 29. financial_support_to_parents
|
| 456 |
+
elif field_name == 'financial_support_to_parents':
|
| 457 |
+
support_mapping = {
|
| 458 |
+
'yes': ['yes'],
|
| 459 |
+
'no': ['no'],
|
| 460 |
+
'no preference': ['yes', 'no']
|
| 461 |
+
}
|
| 462 |
+
for exp_option, profile_options in support_mapping.items():
|
| 463 |
+
if expect_str == exp_option:
|
| 464 |
+
return profile_str in profile_options
|
| 465 |
+
return expect_str == profile_str
|
| 466 |
+
|
| 467 |
+
# 30. other_non_negotiables
|
| 468 |
+
elif field_name == 'other_non_negotiables':
|
| 469 |
+
expect_list = [x.strip().lower() for x in expect_str.split(',') if x.strip()]
|
| 470 |
+
profile_list = [x.strip().lower() for x in profile_str.split(',') if x.strip()]
|
| 471 |
+
|
| 472 |
+
# Match if ANY expected non-negotiable is found in profile
|
| 473 |
+
return any(item in profile_list for item in expect_list)
|
| 474 |
+
|
| 475 |
+
# 31. skin_tone
|
| 476 |
+
elif field_name == 'skin_tone':
|
| 477 |
+
tone_mapping = {
|
| 478 |
+
'fair': ['fair'],
|
| 479 |
+
'medium': ['medium'],
|
| 480 |
+
'dark': ['dark'],
|
| 481 |
+
'no preference': ['fair', 'medium', 'dark']
|
| 482 |
+
}
|
| 483 |
+
for exp_tone, profile_options in tone_mapping.items():
|
| 484 |
+
if expect_str == exp_tone:
|
| 485 |
+
return profile_str in profile_options
|
| 486 |
+
return expect_str == profile_str
|
| 487 |
+
|
| 488 |
+
# 32. marital_status
|
| 489 |
+
elif field_name == 'marital_status':
|
| 490 |
+
status_mapping = {
|
| 491 |
+
'single': ['single'],
|
| 492 |
+
'divorced': ['divorced'],
|
| 493 |
+
'widowed': ['widowed'],
|
| 494 |
+
'no preference': ['single', 'divorced', 'widowed']
|
| 495 |
+
}
|
| 496 |
+
for exp_status, profile_options in status_mapping.items():
|
| 497 |
+
if expect_str == exp_status:
|
| 498 |
+
return profile_str in profile_options
|
| 499 |
+
return expect_str == profile_str
|
| 500 |
+
|
| 501 |
+
# 33. relaxation_mode
|
| 502 |
+
elif field_name == 'relaxation_mode':
|
| 503 |
+
# No preference → always match
|
| 504 |
+
if expect_str in ['no preference', 'any']:
|
| 505 |
+
return True
|
| 506 |
+
|
| 507 |
+
expect_list = [x.strip().lower() for x in expect_str.split(',') if x.strip()]
|
| 508 |
+
profile_list = [x.strip().lower() for x in profile_str.split(',') if x.strip()]
|
| 509 |
+
|
| 510 |
+
# Any overlap = match
|
| 511 |
+
return any(item in profile_list for item in expect_list)
|
| 512 |
+
|
| 513 |
+
elif field_name == 'expectation_summary':
|
| 514 |
+
if not expect_value or str(expect_value).strip().lower() in ['', 'no preference', 'any']:
|
| 515 |
+
return True
|
| 516 |
+
|
| 517 |
+
if not profile_value or str(profile_value).strip().lower() in ['', 'none', 'null']:
|
| 518 |
+
return False
|
| 519 |
+
|
| 520 |
+
return compare_expectation_with_remark(
|
| 521 |
+
str(expect_value).strip(),
|
| 522 |
+
str(profile_value).strip()
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
# 34. Career Aspirations matching
|
| 526 |
+
elif field_name == 'pref_career_aspirations':
|
| 527 |
+
career_mapping = {
|
| 528 |
+
'entrepreneurship': ['entrepreneurship', 'entrepreneur'],
|
| 529 |
+
'leadership': ['leadership'],
|
| 530 |
+
'stable job': ['stable job'],
|
| 531 |
+
'work-life balance': ['work-life balance'],
|
| 532 |
+
'research': ['research'],
|
| 533 |
+
'creativity': ['creativity'],
|
| 534 |
+
'social impact': ['social impact'],
|
| 535 |
+
'no preference': ['entrepreneurship', 'leadership', 'stable job', 'work-life balance', 'research', 'creativity', 'social impact']
|
| 536 |
+
}
|
| 537 |
+
for exp_career, profile_options in career_mapping.items():
|
| 538 |
+
if expect_str == exp_career:
|
| 539 |
+
return any(option in profile_str for option in profile_options)
|
| 540 |
+
return any(option in profile_str for option in career_mapping.get(expect_str, [expect_str]))
|
| 541 |
+
|
| 542 |
+
# Default: Exact match for other fields
|
| 543 |
+
else:
|
| 544 |
+
return expect_str == profile_str
|
| 545 |
+
|
| 546 |
+
def extract_key_concepts(text):
|
| 547 |
+
"""Extract key concepts from text using NLP techniques"""
|
| 548 |
+
text = text.lower()
|
| 549 |
+
|
| 550 |
+
# Remove common stop words
|
| 551 |
+
stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to',
|
| 552 |
+
'for', 'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been'}
|
| 553 |
+
|
| 554 |
+
# Concept mapping - words that mean similar things
|
| 555 |
+
concept_groups = {
|
| 556 |
+
'family': ['family', 'parents', 'children', 'siblings', 'home', 'domestic'],
|
| 557 |
+
'career': ['career', 'job', 'work', 'profession', 'business', 'entrepreneur'],
|
| 558 |
+
'balance': ['balance', 'equilibrium', 'harmony', 'work-life'],
|
| 559 |
+
'growth': ['growth', 'development', 'improvement', 'learning', 'progress'],
|
| 560 |
+
'values': ['values', 'principles', 'ethics', 'morals', 'beliefs'],
|
| 561 |
+
'communication': ['communication', 'talking', 'discussing', 'expressing'],
|
| 562 |
+
'shared': ['shared', 'together', 'mutual', 'common', 'joint'],
|
| 563 |
+
'happiness': ['happiness', 'joy', 'fulfillment', 'contentment'],
|
| 564 |
+
'respect': ['respect', 'esteem', 'admiration', 'honor'],
|
| 565 |
+
'understanding': ['understanding', 'comprehension', 'insight', 'empathy'],
|
| 566 |
+
'goals': ['goals', 'objectives', 'aims', 'aspirations', 'ambitions'],
|
| 567 |
+
'compatibility': ['compatibility', 'harmony', 'agreement', 'fit'],
|
| 568 |
+
'lifestyle': ['lifestyle', 'way of life', 'routine', 'daily life'],
|
| 569 |
+
'culture': ['culture', 'cultural', 'tradition', 'heritage'],
|
| 570 |
+
'religion': ['religion', 'faith', 'spiritual', 'belief'],
|
| 571 |
+
'partner': ['partner', 'spouse', 'companion', 'mate'],
|
| 572 |
+
'love': ['love', 'affection', 'care', 'fondness'],
|
| 573 |
+
'trust': ['trust', 'confidence', 'reliance', 'faith'],
|
| 574 |
+
'support': ['support', 'encouragement', 'backing', 'assistance'],
|
| 575 |
+
'stability': ['stability', 'security', 'steadiness', 'reliability']
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
concepts = set()
|
| 579 |
+
words = text.split()
|
| 580 |
+
|
| 581 |
+
for word in words:
|
| 582 |
+
word_clean = ''.join(c for c in word if c.isalnum()) # Remove punctuation
|
| 583 |
+
|
| 584 |
+
if word_clean in stop_words or len(word_clean) < 3:
|
| 585 |
+
continue
|
| 586 |
+
|
| 587 |
+
# Check if word belongs to any concept group
|
| 588 |
+
for concept, related_words in concept_groups.items():
|
| 589 |
+
if word_clean in related_words:
|
| 590 |
+
concepts.add(concept)
|
| 591 |
+
break
|
| 592 |
+
else:
|
| 593 |
+
# Add the word itself if it's meaningful
|
| 594 |
+
if len(word_clean) > 4: # Longer words are usually more meaningful
|
| 595 |
+
concepts.add(word_clean)
|
| 596 |
+
|
| 597 |
+
return concepts
|
| 598 |
+
|
| 599 |
+
def compare_expectation_with_remark(exp_summary, profile_remark):
|
| 600 |
+
"""Compare expectation summary with profile remark using multiple strategies"""
|
| 601 |
+
|
| 602 |
+
exp_summary_lower = exp_summary.lower()
|
| 603 |
+
profile_remark_lower = profile_remark.lower()
|
| 604 |
+
|
| 605 |
+
print(f"🔍 Comparing expectation with remark:")
|
| 606 |
+
print(f" Expectation: '{exp_summary}'")
|
| 607 |
+
print(f" Remark (first 100 chars): '{profile_remark[:100]}...'")
|
| 608 |
+
|
| 609 |
+
# Strategy 1: Check for exact phrase matching (for very specific expectations)
|
| 610 |
+
if len(exp_summary.split()) <= 6: # Short expectations (6 words or less)
|
| 611 |
+
if exp_summary_lower in profile_remark_lower:
|
| 612 |
+
print(f" ✅ Exact phrase found in remark")
|
| 613 |
+
return True
|
| 614 |
+
|
| 615 |
+
# Strategy 2: Extract and compare key concepts
|
| 616 |
+
exp_concepts = extract_key_concepts(exp_summary)
|
| 617 |
+
remark_concepts = extract_key_concepts(profile_remark)
|
| 618 |
+
|
| 619 |
+
print(f" Expectation concepts: {exp_concepts}")
|
| 620 |
+
print(f" Remark concepts found: {len(remark_concepts)} total")
|
| 621 |
+
|
| 622 |
+
# Count overlapping concepts
|
| 623 |
+
overlapping = exp_concepts.intersection(remark_concepts)
|
| 624 |
+
if exp_concepts:
|
| 625 |
+
concept_overlap = len(overlapping) / len(exp_concepts)
|
| 626 |
+
else:
|
| 627 |
+
concept_overlap = 0
|
| 628 |
+
|
| 629 |
+
print(f" Concept overlap: {len(overlapping)}/{len(exp_concepts)} = {concept_overlap:.2f}")
|
| 630 |
+
|
| 631 |
+
# Strategy 3: Use difflib for text similarity (fallback)
|
| 632 |
+
from difflib import SequenceMatcher
|
| 633 |
+
text_similarity = SequenceMatcher(None, exp_summary, profile_remark).ratio()
|
| 634 |
+
print(f" Text similarity: {text_similarity:.2f}")
|
| 635 |
+
|
| 636 |
+
# Strategy 4: Check for important keywords
|
| 637 |
+
important_keywords = ['family', 'career', 'balance', 'growth', 'values',
|
| 638 |
+
'communication', 'shared', 'respect', 'understanding',
|
| 639 |
+
'partner', 'love', 'trust', 'support', 'happiness']
|
| 640 |
+
|
| 641 |
+
keyword_matches = 0
|
| 642 |
+
for keyword in important_keywords:
|
| 643 |
+
if keyword in exp_summary_lower and keyword in profile_remark_lower:
|
| 644 |
+
keyword_matches += 1
|
| 645 |
+
|
| 646 |
+
print(f" Important keyword matches: {keyword_matches}")
|
| 647 |
+
|
| 648 |
+
# Combined decision logic
|
| 649 |
+
# Match if ANY of these conditions are met:
|
| 650 |
+
# 1. Good concept overlap (> 40%)
|
| 651 |
+
# 2. Reasonable text similarity (> 25%)
|
| 652 |
+
# 3. At least 2 important keyword matches
|
| 653 |
+
# 4. Exact phrase match (already handled above)
|
| 654 |
+
|
| 655 |
+
result = (concept_overlap > 0.4) or (text_similarity > 0.25) or (keyword_matches >= 2)
|
| 656 |
+
|
| 657 |
+
print(f" Final decision: {'✅ MATCH' if result else '❌ NO MATCH'}")
|
| 658 |
+
print(f" Reasons: concept_overlap={concept_overlap:.2f}, "
|
| 659 |
+
f"text_similarity={text_similarity:.2f}, "
|
| 660 |
+
f"keyword_matches={keyword_matches}")
|
| 661 |
+
|
| 662 |
+
return result
|
| 663 |
+
|
| 664 |
+
def compute_expectation_score(expect, profile, mandatory_fields):
|
| 665 |
+
"""Compute expectation match percentage based on satisfied fields"""
|
| 666 |
+
satisfied_fields = 0
|
| 667 |
+
total_fields_checked = 0
|
| 668 |
+
mandatory_violations = 0
|
| 669 |
+
|
| 670 |
+
print(f"🔍 COMPUTE_SCORE: Evaluating profile {profile.user_id} ({profile.full_name}) from {profile.current_city}")
|
| 671 |
+
|
| 672 |
+
# 🚨 UPDATED FIELD MAPPING - all expectation fields
|
| 673 |
+
field_mapping = {
|
| 674 |
+
'pref_age_range': 'date_of_birth',
|
| 675 |
+
'pref_height_range': 'height',
|
| 676 |
+
'pref_education_level': 'education_level',
|
| 677 |
+
'pref_employment_status': 'employment_status',
|
| 678 |
+
'expectation_summary': 'remark', # Map expectation_summary to profile remark
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
'pref_current_city': 'current_city',
|
| 682 |
+
'pref_countries': 'country',
|
| 683 |
+
'pref_diet': 'food_preference',
|
| 684 |
+
'pref_fitness': 'fitness_level',
|
| 685 |
+
'pref_family_type': 'family_type',
|
| 686 |
+
|
| 687 |
+
'accept_smoking': 'smoking_habit',
|
| 688 |
+
'accept_alcohol': 'alcohol_habit',
|
| 689 |
+
'pref_languages': 'languages_spoken',
|
| 690 |
+
'religion_alignment': 'religion',
|
| 691 |
+
'pref_partner_relocation': 'relocation_willingness',
|
| 692 |
+
|
| 693 |
+
'pref_conflict_approach': 'conflict_approach',
|
| 694 |
+
'pref_financial_style': 'financial_style',
|
| 695 |
+
'pref_shared_hobbies': 'hobbies_interests',
|
| 696 |
+
'travel_pref': 'travel_preference',
|
| 697 |
+
'pet_pref': 'own_pets',
|
| 698 |
+
|
| 699 |
+
'pref_income_range': 'income_range',
|
| 700 |
+
'live_with_inlaws': 'live_with_inlaws',
|
| 701 |
+
'pref_live_with_parents': 'live_with_parents',
|
| 702 |
+
'financial_support_to_parents': 'support_parents_financially',
|
| 703 |
+
'pref_career_aspirations': 'career_aspirations',
|
| 704 |
+
|
| 705 |
+
'children_timeline': 'children_timeline',
|
| 706 |
+
'open_to_adoption': 'open_to_adoption',
|
| 707 |
+
'deal_breakers': 'deal_breakers',
|
| 708 |
+
'other_non_negotiables': 'other_non_negotiables',
|
| 709 |
+
'health_constraints': 'health_constraints',
|
| 710 |
+
|
| 711 |
+
'skin_tone': 'skin_tone',
|
| 712 |
+
'marital_status': 'marital_status',
|
| 713 |
+
'daily_routine': 'daily_routine',
|
| 714 |
+
'family_communication_frequency': 'family_communication_frequency',
|
| 715 |
+
'relaxation_mode': 'relaxation_mode'
|
| 716 |
+
}
|
| 717 |
+
|
| 718 |
+
# 🚨 DEBUG: Track all field processing
|
| 719 |
+
field_details = []
|
| 720 |
+
|
| 721 |
+
# 🚨 CRITICAL FIX: Check ALL mandatory fields FIRST
|
| 722 |
+
print(f"🎯 COMPUTE_SCORE: CHECKING ALL MANDATORY FIELDS: {mandatory_fields}")
|
| 723 |
+
|
| 724 |
+
for field_name, is_mandatory in mandatory_fields.items():
|
| 725 |
+
if is_mandatory:
|
| 726 |
+
print(f"🎯 COMPUTE_SCORE: Checking mandatory field: {field_name}")
|
| 727 |
+
|
| 728 |
+
# Get expectation value
|
| 729 |
+
expect_value = getattr(expect, field_name, None)
|
| 730 |
+
|
| 731 |
+
# Map expectation field to actual profile field
|
| 732 |
+
profile_field_name = field_mapping.get(field_name, field_name)
|
| 733 |
+
profile_value = getattr(profile, profile_field_name, None)
|
| 734 |
+
|
| 735 |
+
# Special handling for location field
|
| 736 |
+
if field_name == 'pref_current_city' and not profile_value:
|
| 737 |
+
profile_value = profile.current_city
|
| 738 |
+
|
| 739 |
+
print(f" Expect: '{expect_value}', Profile: '{profile_value}' (mapped to: {profile_field_name})")
|
| 740 |
+
|
| 741 |
+
print(
|
| 742 |
+
f"[COMPARE] Expectation Field: {field_name} "
|
| 743 |
+
f"({expect_value}) ↔ Profile Field: {profile_field_name} "
|
| 744 |
+
f"({profile_value})"
|
| 745 |
+
)
|
| 746 |
+
|
| 747 |
+
# If expectation has a value for this mandatory field
|
| 748 |
+
if expect_value and str(expect_value).strip():
|
| 749 |
+
total_fields_checked += 1
|
| 750 |
+
# Profile must have a matching value
|
| 751 |
+
if not profile_value or not str(profile_value).strip():
|
| 752 |
+
print(f"❌ COMPUTE_SCORE: Mandatory violation: {field_name} - Profile missing value")
|
| 753 |
+
mandatory_violations += 1
|
| 754 |
+
field_details.append(f"🚫 MANDATORY FAIL: {field_name}: {expect_value} -> MISSING")
|
| 755 |
+
elif not values_match(expect_value, profile_value, field_name):
|
| 756 |
+
print(f"❌ COMPUTE_SCORE: Mandatory violation: {field_name} - Values don't match")
|
| 757 |
+
print(f" Expect: '{expect_value}', Profile: '{profile_value}'")
|
| 758 |
+
mandatory_violations += 1
|
| 759 |
+
field_details.append(f"🚫 MANDATORY FAIL: {field_name}: {expect_value} -> {profile_value}")
|
| 760 |
+
else:
|
| 761 |
+
satisfied_fields += 1
|
| 762 |
+
print(f"✅ COMPUTE_SCORE: Mandatory match: {field_name} - '{expect_value}'")
|
| 763 |
+
field_details.append(f"✅ MANDATORY: {field_name}: {expect_value} -> {profile_value}")
|
| 764 |
+
else:
|
| 765 |
+
print(f"ℹ️ COMPUTE_SCORE: Mandatory field {field_name} has no expectation value, skipping")
|
| 766 |
+
field_details.append(f"➖ MANDATORY NO PREF: {field_name}")
|
| 767 |
+
|
| 768 |
+
# 🚨 CRITICAL FIX: REJECT if ANY mandatory violations
|
| 769 |
+
if mandatory_violations > 0:
|
| 770 |
+
print(f"🚫 COMPUTE_SCORE: Profile {profile.user_id} REJECTED due to {mandatory_violations} mandatory violations")
|
| 771 |
+
return 0 # Return 0 score to indicate rejection
|
| 772 |
+
|
| 773 |
+
print(f"✅ COMPUTE_SCORE: Profile {profile.user_id} passed ALL mandatory checks")
|
| 774 |
+
|
| 775 |
+
# 🚨 NOW CHECK ALL EXPECTATION FIELDS for percentage calculation
|
| 776 |
+
all_expectation_fields = [
|
| 777 |
+
'pref_age_range', 'pref_height_range', 'pref_education_level', 'pref_employment_status',
|
| 778 |
+
'pref_current_city', 'pref_countries', 'pref_diet', 'pref_fitness', 'pref_family_type',
|
| 779 |
+
'accept_smoking', 'accept_alcohol', 'pref_languages', 'religion_alignment',
|
| 780 |
+
'pref_partner_relocation', 'pref_conflict_approach', 'pref_financial_style',
|
| 781 |
+
'pref_shared_hobbies', 'travel_pref', 'pet_pref', 'pref_income_range',
|
| 782 |
+
'live_with_inlaws', 'pref_live_with_parents', 'financial_support_to_parents',
|
| 783 |
+
'pref_career_aspirations', 'children_timeline', 'open_to_adoption',
|
| 784 |
+
'deal_breakers', 'other_non_negotiables', 'health_constraints', 'skin_tone',
|
| 785 |
+
'marital_status', 'daily_routine', 'family_communication_frequency', 'relaxation_mode'
|
| 786 |
+
]
|
| 787 |
+
|
| 788 |
+
# Check ALL expectation fields (both mandatory and optional)
|
| 789 |
+
for field_name in all_expectation_fields:
|
| 790 |
+
# Skip if already processed as mandatory
|
| 791 |
+
if field_name in mandatory_fields and mandatory_fields[field_name]:
|
| 792 |
+
continue
|
| 793 |
+
|
| 794 |
+
# Map expectation field to profile field
|
| 795 |
+
profile_field_name = field_mapping.get(field_name, field_name)
|
| 796 |
+
expect_value = getattr(expect, field_name, None)
|
| 797 |
+
profile_value = getattr(profile, profile_field_name, None)
|
| 798 |
+
|
| 799 |
+
# Special handling for location field
|
| 800 |
+
if field_name == 'pref_current_city' and not profile_value:
|
| 801 |
+
profile_value = profile.current_city
|
| 802 |
+
|
| 803 |
+
# Only count if expectation has a value
|
| 804 |
+
if expect_value and str(expect_value).strip():
|
| 805 |
+
total_fields_checked += 1
|
| 806 |
+
# --- Console Log ---
|
| 807 |
+
print(
|
| 808 |
+
f"[COMPARE] Expectation -> {field_name}: '{expect_value}' "
|
| 809 |
+
f" | Profile -> {profile_field_name}: '{profile_value}'"
|
| 810 |
+
)
|
| 811 |
+
|
| 812 |
+
if profile_value and str(profile_value).strip():
|
| 813 |
+
if values_match(expect_value, profile_value, field_name):
|
| 814 |
+
satisfied_fields += 1
|
| 815 |
+
print(f"✅ COMPUTE_SCORE: Field match: {field_name}")
|
| 816 |
+
field_details.append(f"✅ OPTIONAL: {field_name}: {expect_value} -> {profile_value}")
|
| 817 |
+
else:
|
| 818 |
+
print(f"❌ COMPUTE_SCORE: Field mismatch: {field_name} - Expect: '{expect_value}', Profile: '{profile_value}'")
|
| 819 |
+
field_details.append(f"❌ OPTIONAL: {field_name}: {expect_value} -> {profile_value}")
|
| 820 |
+
else:
|
| 821 |
+
print(f"❌ COMPUTE_SCORE: Field missing: {field_name} - Profile has no value")
|
| 822 |
+
field_details.append(f"⚠️ OPTIONAL: {field_name}: {expect_value} -> MISSING")
|
| 823 |
+
else:
|
| 824 |
+
field_details.append(f"➖ OPTIONAL NO PREF: {field_name}")
|
| 825 |
+
|
| 826 |
+
# 🚨 DEBUG: Print detailed field analysis
|
| 827 |
+
print(f"🔍 COMPUTE_SCORE: Field-by-field analysis:")
|
| 828 |
+
for detail in field_details:
|
| 829 |
+
print(f" {detail}")
|
| 830 |
+
print(f"🔍 COMPUTE_SCORE: Total fields checked: {total_fields_checked}")
|
| 831 |
+
print(f"🔍 COMPUTE_SCORE: Satisfied fields: {satisfied_fields}")
|
| 832 |
+
|
| 833 |
+
# 🚨 Calculate percentage based on satisfied fields vs total fields checked
|
| 834 |
+
if total_fields_checked > 0:
|
| 835 |
+
percentage = (satisfied_fields / total_fields_checked) * 100
|
| 836 |
+
print(f"📊 COMPUTE_SCORE: Field Analysis: {satisfied_fields}/{total_fields_checked} fields satisfied = {percentage:.1f}%")
|
| 837 |
+
|
| 838 |
+
# Special handling for expectation summary (bonus)
|
| 839 |
+
if hasattr(expect, 'expectation_summary') and expect.expectation_summary and profile.remark:
|
| 840 |
+
from difflib import SequenceMatcher
|
| 841 |
+
exp_summary = str(expect.expectation_summary).lower()
|
| 842 |
+
profile_remark = str(profile.remark).lower()
|
| 843 |
+
|
| 844 |
+
sim = SequenceMatcher(None, exp_summary, profile_remark).ratio()
|
| 845 |
+
if sim > 0.3:
|
| 846 |
+
# Add bonus for summary similarity (up to 5%)
|
| 847 |
+
bonus = min(sim * 5, 5)
|
| 848 |
+
percentage = min(100, percentage + bonus)
|
| 849 |
+
print(f"✅ COMPUTE_SCORE: Summary similarity bonus: +{bonus:.1f}% (similarity: {sim:.2f})")
|
| 850 |
+
|
| 851 |
+
final_percentage = round(percentage, 2)
|
| 852 |
+
print(f"🎯 COMPUTE_SCORE: Final expectation percentage: {final_percentage}%")
|
| 853 |
+
return final_percentage / 100 # Return as decimal for consistency
|
| 854 |
+
|
| 855 |
+
print(f"⚠️ COMPUTE_SCORE: No expectation fields to check for profile {profile.user_id}")
|
| 856 |
+
return 0
|
| 857 |
+
|
| 858 |
+
def match_expectation_with_profiles(user_id):
|
| 859 |
+
expectation = ExpectationResponse.query.filter_by(user_id=user_id).first()
|
| 860 |
+
if not expectation:
|
| 861 |
+
print(f"❌ No expectation data found for user {user_id}")
|
| 862 |
+
return []
|
| 863 |
+
|
| 864 |
+
# 🚨 CRITICAL FIX: Properly parse mandatory fields from database
|
| 865 |
+
mandatory_fields = {}
|
| 866 |
+
if hasattr(expectation, '_mandatory_fields') and expectation._mandatory_fields:
|
| 867 |
+
try:
|
| 868 |
+
if isinstance(expectation._mandatory_fields, str):
|
| 869 |
+
# Parse JSON string from database
|
| 870 |
+
mandatory_fields = json.loads(expectation._mandatory_fields)
|
| 871 |
+
else:
|
| 872 |
+
mandatory_fields = expectation._mandatory_fields
|
| 873 |
+
except Exception as e:
|
| 874 |
+
print(f"❌ Error parsing mandatory fields: {e}")
|
| 875 |
+
mandatory_fields = {}
|
| 876 |
+
else:
|
| 877 |
+
print("ℹ️ No mandatory fields found or empty")
|
| 878 |
+
|
| 879 |
+
print(f"🔍 DEBUG: Mandatory fields for user {user_id}: {mandatory_fields}")
|
| 880 |
+
|
| 881 |
+
# Get current user to know gender
|
| 882 |
+
current_user = Marriage.query.filter_by(user_id=user_id).first()
|
| 883 |
+
if not current_user:
|
| 884 |
+
print(f"❌ No marriage profile found for user {user_id}")
|
| 885 |
+
return []
|
| 886 |
+
|
| 887 |
+
user_gender = (current_user.gender or "").lower()
|
| 888 |
+
print(f"🔍 DEBUG: Current user gender: {user_gender}")
|
| 889 |
+
|
| 890 |
+
# Opposite gender profiles only
|
| 891 |
+
if user_gender.startswith('male'):
|
| 892 |
+
opposite_profiles = Marriage.query.filter(func.lower(func.trim(Marriage.gender)) == "female").all()
|
| 893 |
+
elif user_gender.startswith('female'):
|
| 894 |
+
opposite_profiles = Marriage.query.filter(func.lower(func.trim(Marriage.gender)) == "male").all()
|
| 895 |
+
else:
|
| 896 |
+
opposite_profiles = Marriage.query.filter(Marriage.gender != current_user.gender).all()
|
| 897 |
+
|
| 898 |
+
print(f"🔍 DEBUG: Found {len(opposite_profiles)} opposite gender profiles")
|
| 899 |
+
|
| 900 |
+
# 🚨 FIX: Initialize candidates list here
|
| 901 |
+
candidates = []
|
| 902 |
+
|
| 903 |
+
# Evaluate all opposite gender profiles
|
| 904 |
+
for profile in opposite_profiles:
|
| 905 |
+
print(f"\n--- Evaluating Profile {profile.user_id} ---")
|
| 906 |
+
s = compute_expectation_score(expectation, profile, mandatory_fields)
|
| 907 |
+
if s > 0:
|
| 908 |
+
candidates.append({
|
| 909 |
+
"user_id": profile.user_id,
|
| 910 |
+
"name": profile.full_name,
|
| 911 |
+
"gender": profile.gender,
|
| 912 |
+
"location": profile.current_city,
|
| 913 |
+
"religion": profile.religion,
|
| 914 |
+
"remark": profile.remark,
|
| 915 |
+
"expectation_score": s,
|
| 916 |
+
"mandatory_matched": True
|
| 917 |
+
})
|
| 918 |
+
print(f"✅ Added candidate {profile.user_id} with score {s}")
|
| 919 |
+
|
| 920 |
+
print(f"📈 Total candidates after mandatory filtering: {len(candidates)}")
|
| 921 |
+
|
| 922 |
+
# 🚨 FIX: Get character compatibility for ALL candidates
|
| 923 |
+
all_ids = [c["user_id"] for c in candidates]
|
| 924 |
+
llm_data = LLMGeneratedQuestions.query.filter(LLMGeneratedQuestions.user_id.in_(all_ids)).all()
|
| 925 |
+
llm_map = {l.user_id: (l.blue, l.green, l.yellow, l.red) for l in llm_data}
|
| 926 |
+
|
| 927 |
+
# 🚨 FIX: Calculate character scores properly
|
| 928 |
+
for c in candidates:
|
| 929 |
+
if c["user_id"] in llm_map:
|
| 930 |
+
b, g, y, r = llm_map[c["user_id"]]
|
| 931 |
+
# Calculate character score as weighted sum of color percentages
|
| 932 |
+
total = b + g + y + r
|
| 933 |
+
if int(total) > 0:
|
| 934 |
+
# Normalize and calculate similarity to ideal distribution
|
| 935 |
+
|
| 936 |
+
char_score = calculate_character_similarity(b, g, y, r)
|
| 937 |
+
c["character_score"] = round(char_score, 2)
|
| 938 |
+
else:
|
| 939 |
+
c["character_score"] = 0
|
| 940 |
+
else:
|
| 941 |
+
c["character_score"] = 0
|
| 942 |
+
|
| 943 |
+
# Overall score combining both expectation and character
|
| 944 |
+
c["overall_score"] = round(0.7 * c["expectation_score"] + 0.3 * c["character_score"], 2)
|
| 945 |
+
|
| 946 |
+
# Return both sorted lists
|
| 947 |
+
expectation_sorted = sorted(candidates, key=lambda x: x["expectation_score"], reverse=True)
|
| 948 |
+
character_sorted = sorted(candidates, key=lambda x: x["character_score"], reverse=True)
|
| 949 |
+
overall_sorted = sorted(candidates, key=lambda x: x["overall_score"], reverse=True)
|
| 950 |
+
|
| 951 |
+
print(f"🎯 Final ranked by expectation: {len(expectation_sorted)}")
|
| 952 |
+
print(f"🎯 Final ranked by character: {len(character_sorted)}")
|
| 953 |
+
|
| 954 |
+
# 🚨 FIX: Return the appropriate list based on what the caller expects
|
| 955 |
+
return expectation_sorted
|
| 956 |
+
|
| 957 |
+
def generate_expectation_explanation(expect_user: dict, profile_user: dict) -> list:
|
| 958 |
+
"""
|
| 959 |
+
Compare user's expectations with another user's profile.
|
| 960 |
+
Gives a clean, correct, field-by-field explanation.
|
| 961 |
+
"""
|
| 962 |
+
|
| 963 |
+
explanations = []
|
| 964 |
+
exact_matches = []
|
| 965 |
+
differences = []
|
| 966 |
+
missing_data = []
|
| 967 |
+
|
| 968 |
+
# -------------------------------------------
|
| 969 |
+
# 🔥 UNIVERSAL SAFE KEY LOOKUP
|
| 970 |
+
# -------------------------------------------
|
| 971 |
+
def get_profile_value(profile_dict, key_name):
|
| 972 |
+
"""Case-insensitive and alias-safe key lookup."""
|
| 973 |
+
key_name = key_name.lower().strip()
|
| 974 |
+
|
| 975 |
+
# Special aliases for country
|
| 976 |
+
country_aliases = ["country", "location", "current_country",
|
| 977 |
+
"residence_country", "live_country"]
|
| 978 |
+
|
| 979 |
+
for k, v in profile_dict.items():
|
| 980 |
+
k_clean = k.lower().strip()
|
| 981 |
+
|
| 982 |
+
# Match correct field
|
| 983 |
+
if k_clean == key_name:
|
| 984 |
+
return str(v).strip()
|
| 985 |
+
|
| 986 |
+
# Match ANY country-related field
|
| 987 |
+
if key_name == "country" and k_clean in country_aliases:
|
| 988 |
+
return str(v).strip()
|
| 989 |
+
|
| 990 |
+
# If not found
|
| 991 |
+
return ""
|
| 992 |
+
|
| 993 |
+
# -------------------------------------------
|
| 994 |
+
# 🔥 FIELD MAPPING (Same as compute_expectation_score)
|
| 995 |
+
# -------------------------------------------
|
| 996 |
+
field_mapping = {
|
| 997 |
+
'pref_age_range': 'date_of_birth',
|
| 998 |
+
'pref_height_range': 'height',
|
| 999 |
+
'pref_education_level': 'education_level',
|
| 1000 |
+
'pref_employment_status': 'employment_status',
|
| 1001 |
+
|
| 1002 |
+
'pref_current_city': 'current_city',
|
| 1003 |
+
'pref_countries': 'country', # 👉 FIXED HERE
|
| 1004 |
+
'pref_diet': 'food_preference',
|
| 1005 |
+
'pref_fitness': 'fitness_level',
|
| 1006 |
+
'pref_family_type': 'family_type',
|
| 1007 |
+
|
| 1008 |
+
'accept_smoking': 'smoking_habit',
|
| 1009 |
+
'accept_alcohol': 'alcohol_habit',
|
| 1010 |
+
'pref_languages': 'languages_spoken',
|
| 1011 |
+
'religion_alignment': 'religion',
|
| 1012 |
+
'pref_partner_relocation': 'relocation_willingness',
|
| 1013 |
+
|
| 1014 |
+
'pref_conflict_approach': 'conflict_approach',
|
| 1015 |
+
'pref_financial_style': 'financial_style',
|
| 1016 |
+
'pref_shared_hobbies': 'hobbies_interests',
|
| 1017 |
+
'travel_pref': 'travel_preference',
|
| 1018 |
+
'pet_pref': 'own_pets',
|
| 1019 |
+
|
| 1020 |
+
'pref_income_range': 'income_range',
|
| 1021 |
+
'live_with_inlaws': 'live_with_inlaws',
|
| 1022 |
+
'pref_live_with_parents': 'live_with_parents',
|
| 1023 |
+
'financial_support_to_parents': 'support_parents_financially',
|
| 1024 |
+
'pref_career_aspirations': 'career_aspirations',
|
| 1025 |
+
|
| 1026 |
+
'children_timeline': 'children_timeline',
|
| 1027 |
+
'open_to_adoption': 'open_to_adoption',
|
| 1028 |
+
'deal_breakers': 'deal_breakers',
|
| 1029 |
+
'other_non_negotiables': 'other_non_negotiables',
|
| 1030 |
+
'health_constraints': 'health_constraints',
|
| 1031 |
+
|
| 1032 |
+
'skin_tone': 'skin_tone',
|
| 1033 |
+
'marital_status': 'marital_status',
|
| 1034 |
+
'daily_routine': 'daily_routine',
|
| 1035 |
+
'family_communication_frequency': 'family_communication_frequency',
|
| 1036 |
+
'relaxation_mode': 'relaxation_mode'
|
| 1037 |
+
}
|
| 1038 |
+
|
| 1039 |
+
all_expectation_fields = list(field_mapping.keys())
|
| 1040 |
+
|
| 1041 |
+
# -------------------------------------------
|
| 1042 |
+
# 🔥 FIELD COMPARISON LOGIC
|
| 1043 |
+
# -------------------------------------------
|
| 1044 |
+
for expect_key, profile_key in field_mapping.items():
|
| 1045 |
+
|
| 1046 |
+
label = expect_key.replace("pref_", "").replace("_", " ").title()
|
| 1047 |
+
expect_value = str(expect_user.get(expect_key, "") or "").strip()
|
| 1048 |
+
|
| 1049 |
+
# If no preference → skip
|
| 1050 |
+
if expect_value.lower() in ["", "no preference", "any", "all"]:
|
| 1051 |
+
continue
|
| 1052 |
+
|
| 1053 |
+
# Correctly fetch profile value
|
| 1054 |
+
profile_value = get_profile_value(profile_user, profile_key)
|
| 1055 |
+
|
| 1056 |
+
# Missing profile data (REAL missing only)
|
| 1057 |
+
if profile_value == "":
|
| 1058 |
+
missing_data.append((label, expect_value))
|
| 1059 |
+
continue
|
| 1060 |
+
|
| 1061 |
+
# Perform match check
|
| 1062 |
+
if values_match(expect_value, profile_value, expect_key):
|
| 1063 |
+
exact_matches.append(f"• Profile matches your preference for {label.lower()} ({profile_value})")
|
| 1064 |
+
else:
|
| 1065 |
+
differences.append(
|
| 1066 |
+
f"• Profile differs from your preference for {label.lower()} "
|
| 1067 |
+
f"(you want: {expect_value}, they are: {profile_value})"
|
| 1068 |
+
)
|
| 1069 |
+
|
| 1070 |
+
# -------------------------------------------
|
| 1071 |
+
# 🔥 COMPUTE COMPATIBILITY (Same as compute_expectation_score)
|
| 1072 |
+
# -------------------------------------------
|
| 1073 |
+
total_pref_fields = 0
|
| 1074 |
+
satisfied_count = 0
|
| 1075 |
+
|
| 1076 |
+
for field_name in all_expectation_fields:
|
| 1077 |
+
expect_value = str(expect_user.get(field_name, "") or "").strip()
|
| 1078 |
+
if expect_value.lower() in ["", "no preference", "any", "all"]:
|
| 1079 |
+
continue
|
| 1080 |
+
|
| 1081 |
+
total_pref_fields += 1
|
| 1082 |
+
profile_key = field_mapping[field_name]
|
| 1083 |
+
profile_value = get_profile_value(profile_user, profile_key)
|
| 1084 |
+
|
| 1085 |
+
if profile_value and values_match(expect_value, profile_value, field_name):
|
| 1086 |
+
satisfied_count += 1
|
| 1087 |
+
|
| 1088 |
+
if total_pref_fields > 0:
|
| 1089 |
+
percent = round((satisfied_count / total_pref_fields) * 100, 2)
|
| 1090 |
+
else:
|
| 1091 |
+
percent = 0
|
| 1092 |
+
|
| 1093 |
+
# -------------------------------------------
|
| 1094 |
+
# 🔥 BUILD EXPLANATION OUTPUT
|
| 1095 |
+
# -------------------------------------------
|
| 1096 |
+
explanations.append(f"📊 **Expectation Compatibility**: {percent}%")
|
| 1097 |
+
explanations.append(f"• {satisfied_count} matches out of {total_pref_fields} preference fields")
|
| 1098 |
+
|
| 1099 |
+
if len(missing_data) > 0:
|
| 1100 |
+
explanations.append(f"• ⚠️ {len(missing_data)} fields missing profile data")
|
| 1101 |
+
explanations.append("")
|
| 1102 |
+
explanations.append("**⚠️ Missing Profile Data:**")
|
| 1103 |
+
for label, expect_val in missing_data:
|
| 1104 |
+
explanations.append(f"• {label}: Profile missing (You want: {expect_val})")
|
| 1105 |
+
|
| 1106 |
+
if len(exact_matches) > 0:
|
| 1107 |
+
explanations.append("")
|
| 1108 |
+
explanations.append("**🔍 Detailed Field Analysis:**")
|
| 1109 |
+
explanations.extend(exact_matches)
|
| 1110 |
+
explanations.extend(differences)
|
| 1111 |
+
|
| 1112 |
+
return explanations
|
models.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# models.py
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from flask_sqlalchemy import SQLAlchemy
|
| 4 |
+
|
| 5 |
+
db = SQLAlchemy()
|
| 6 |
+
|
| 7 |
+
class Users(db.Model):
|
| 8 |
+
__tablename__ = "Users"
|
| 9 |
+
user_id = db.Column(db.Integer, primary_key=True)
|
| 10 |
+
name = db.Column(db.String(128), nullable=False)
|
| 11 |
+
email = db.Column(db.String(128))
|
| 12 |
+
password = db.Column(db.String(128))
|
| 13 |
+
created_at = db.Column(db.DateTime)
|
| 14 |
+
|
| 15 |
+
class LLMGeneratedQuestions(db.Model):
|
| 16 |
+
__tablename__ = "LLMGeneratedQuestions"
|
| 17 |
+
|
| 18 |
+
llm_id = db.Column(db.Integer, primary_key=True)
|
| 19 |
+
user_id = db.Column(db.Integer, nullable=False, index=True)
|
| 20 |
+
role = db.Column(db.String(64), nullable=True)
|
| 21 |
+
blue = db.Column(db.Integer, nullable=False, default=0)
|
| 22 |
+
green = db.Column(db.Integer, nullable=False, default=0)
|
| 23 |
+
yellow = db.Column(db.Integer, nullable=False, default=0)
|
| 24 |
+
red = db.Column(db.Integer, nullable=False, default=0)
|
| 25 |
+
created_at = db.Column(db.DateTime, default=datetime.utcnow)
|
| 26 |
+
|
| 27 |
+
def color_vec(self):
|
| 28 |
+
import numpy as np
|
| 29 |
+
v = np.array([self.blue, self.green, self.yellow, self.red], dtype=np.float32)
|
| 30 |
+
s = float(v.sum())
|
| 31 |
+
return v / s if s > 0 else v
|
| 32 |
+
|
| 33 |
+
class Marriage(db.Model):
|
| 34 |
+
__tablename__ = "Marriage"
|
| 35 |
+
id = db.Column(db.Integer, primary_key=True)
|
| 36 |
+
user_id = db.Column(db.Integer, nullable=False)
|
| 37 |
+
full_name = db.Column(db.String(100))
|
| 38 |
+
date_of_birth = db.Column(db.String(50))
|
| 39 |
+
gender = db.Column(db.String(20))
|
| 40 |
+
current_city = db.Column(db.String(100))
|
| 41 |
+
marital_status = db.Column(db.String(50))
|
| 42 |
+
education_level = db.Column(db.String(100))
|
| 43 |
+
employment_status = db.Column(db.String(100))
|
| 44 |
+
number_of_siblings = db.Column(db.String(50))
|
| 45 |
+
family_type = db.Column(db.String(100))
|
| 46 |
+
hobbies_interests = db.Column(db.Text)
|
| 47 |
+
conflict_approach = db.Column(db.String(100))
|
| 48 |
+
financial_style = db.Column(db.String(100))
|
| 49 |
+
income_range = db.Column(db.String(100))
|
| 50 |
+
relocation_willingness = db.Column(db.String(100))
|
| 51 |
+
created_at = db.Column(db.DateTime, default=datetime.utcnow)
|
| 52 |
+
height = db.Column(db.String(100)) # Changed from height_weight
|
| 53 |
+
skin_tone = db.Column(db.String(50))
|
| 54 |
+
languages_spoken = db.Column(db.String(200))
|
| 55 |
+
country = db.Column(db.String(100))
|
| 56 |
+
blood_group = db.Column(db.String(10))
|
| 57 |
+
religion = db.Column(db.String(100))
|
| 58 |
+
dual_citizenship = db.Column(db.String(50))
|
| 59 |
+
siblings_position = db.Column(db.String(50))
|
| 60 |
+
parents_living_status = db.Column(db.String(100))
|
| 61 |
+
live_with_parents = db.Column(db.String(50))
|
| 62 |
+
support_parents_financially = db.Column(db.String(50))
|
| 63 |
+
family_communication_frequency = db.Column(db.String(100))
|
| 64 |
+
food_preference = db.Column(db.String(100))
|
| 65 |
+
smoking_habit = db.Column(db.String(50))
|
| 66 |
+
alcohol_habit = db.Column(db.String(50))
|
| 67 |
+
daily_routine = db.Column(db.String(200))
|
| 68 |
+
fitness_level = db.Column(db.String(100))
|
| 69 |
+
own_pets = db.Column(db.String(50))
|
| 70 |
+
travel_preference = db.Column(db.String(100))
|
| 71 |
+
relaxation_mode = db.Column(db.String(100))
|
| 72 |
+
job_role = db.Column(db.String(100))
|
| 73 |
+
work_experience_years = db.Column(db.String(50))
|
| 74 |
+
career_aspirations = db.Column(db.String(200))
|
| 75 |
+
field_of_study = db.Column(db.String(200))
|
| 76 |
+
remark = db.Column(db.Text)
|
| 77 |
+
# 🚨 NEW FIELDS
|
| 78 |
+
children_timeline = db.Column(db.String(100))
|
| 79 |
+
open_to_adoption = db.Column(db.String(50))
|
| 80 |
+
deal_breakers = db.Column(db.Text)
|
| 81 |
+
other_non_negotiables = db.Column(db.Text)
|
| 82 |
+
health_constraints = db.Column(db.String(200))
|
| 83 |
+
live_with_inlaws = db.Column(db.String(50))
|
| 84 |
+
|
| 85 |
+
class ExpectationResponse(db.Model):
|
| 86 |
+
__tablename__ = "ExpectationResponse"
|
| 87 |
+
|
| 88 |
+
user_id = db.Column(db.Integer, primary_key=True)
|
| 89 |
+
pref_age_range = db.Column(db.String(100))
|
| 90 |
+
pref_height_range = db.Column(db.String(100))
|
| 91 |
+
pref_current_city = db.Column(db.String(100))
|
| 92 |
+
pref_countries = db.Column(db.String(100))
|
| 93 |
+
pref_languages = db.Column(db.String(100))
|
| 94 |
+
health_constraints = db.Column(db.String(200))
|
| 95 |
+
pref_diet = db.Column(db.String(100))
|
| 96 |
+
accept_smoking = db.Column(db.String(50))
|
| 97 |
+
accept_alcohol = db.Column(db.String(50))
|
| 98 |
+
pref_fitness = db.Column(db.String(100))
|
| 99 |
+
pref_family_type = db.Column(db.String(100))
|
| 100 |
+
live_with_inlaws = db.Column(db.String(50)) # 🚨 CHANGED: Remove 'pref_' prefix
|
| 101 |
+
children_timeline = db.Column(db.String(100))
|
| 102 |
+
open_to_adoption = db.Column(db.String(50))
|
| 103 |
+
pref_conflict_approach = db.Column(db.String(100))
|
| 104 |
+
pref_financial_style = db.Column(db.String(100))
|
| 105 |
+
religion_alignment = db.Column(db.String(50))
|
| 106 |
+
pref_shared_hobbies = db.Column(db.String(200))
|
| 107 |
+
travel_pref = db.Column(db.String(100))
|
| 108 |
+
pet_pref = db.Column(db.String(50))
|
| 109 |
+
pref_income_range = db.Column(db.String(100))
|
| 110 |
+
deal_breakers = db.Column(db.Text)
|
| 111 |
+
other_non_negotiables = db.Column(db.Text)
|
| 112 |
+
created_at = db.Column(db.DateTime, default=datetime.utcnow)
|
| 113 |
+
pref_education_level = db.Column(db.String(100))
|
| 114 |
+
pref_employment_status = db.Column(db.String(100))
|
| 115 |
+
expectation_summary = db.Column(db.Text)
|
| 116 |
+
_mandatory_fields = db.Column(db.Text)
|
| 117 |
+
skin_tone = db.Column(db.String(50))
|
| 118 |
+
marital_status = db.Column(db.String(50))
|
| 119 |
+
daily_routine = db.Column(db.String(200))
|
| 120 |
+
family_communication_frequency = db.Column(db.String(100))
|
| 121 |
+
relaxation_mode = db.Column(db.String(100))
|
| 122 |
+
pref_partner_relocation = db.Column(db.String(50))
|
| 123 |
+
financial_support_to_parents = db.Column(db.String(50))
|
| 124 |
+
pref_career_aspirations = db.Column(db.String(200))
|
| 125 |
+
pref_live_with_parents = db.Column(db.String(50))
|
psychology2e.index
CHANGED
|
Binary files a/psychology2e.index and b/psychology2e.index differ
|
|
|
routes/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# routes/__init__.py
|
| 2 |
+
from .auth_routes import auth_bp
|
| 3 |
+
from .profile_routes import profiles_bp
|
| 4 |
+
from .expectation_routes import expectations_bp
|
| 5 |
+
from .matching_routes import matching_bp
|
| 6 |
+
from .llm_routes import llm_bp
|
| 7 |
+
|
| 8 |
+
__all__ = ['auth_bp', 'profiles_bp', 'expectations_bp', 'matching_bp', 'llm_bp']
|
routes/auth_routes.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# auth_routes.py (with more debug logging)
|
| 2 |
+
from flask import Blueprint, request, jsonify
|
| 3 |
+
import pyodbc
|
| 4 |
+
import traceback
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
auth_bp = Blueprint('auth', __name__)
|
| 8 |
+
print(f"✅ AUTH ROUTES: Blueprint '{auth_bp.name}' created")
|
| 9 |
+
|
| 10 |
+
def get_db_connection():
|
| 11 |
+
# Use the same connection logic from your original server.py
|
| 12 |
+
SQL_DRIVER = os.getenv("PYMATCH_SQL_DRIVER", "ODBC Driver 17 for SQL Server")
|
| 13 |
+
SQL_SERVER = os.getenv("PYMATCH_SQL_SERVER", "localhost\sqlexpress")
|
| 14 |
+
SQL_DB = os.getenv("PYMATCH_SQL_DB", "Py_Match")
|
| 15 |
+
SQL_TRUSTED = os.getenv("PYMATCH_SQL_TRUSTED", "yes")
|
| 16 |
+
|
| 17 |
+
print(f"🔗 AUTH ROUTES: Connecting to {SQL_SERVER}/{SQL_DB} with driver {SQL_DRIVER}")
|
| 18 |
+
|
| 19 |
+
return pyodbc.connect(
|
| 20 |
+
f"DRIVER={SQL_DRIVER};"
|
| 21 |
+
f"SERVER={SQL_SERVER};"
|
| 22 |
+
f"DATABASE={SQL_DB};"
|
| 23 |
+
f"Trusted_Connection={SQL_TRUSTED};"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
@auth_bp.route('/signup', methods=['POST', 'OPTIONS'])
|
| 27 |
+
def signup():
|
| 28 |
+
print(f"🎯 AUTH ROUTES: /signup endpoint called")
|
| 29 |
+
|
| 30 |
+
if request.method == 'OPTIONS':
|
| 31 |
+
print(f"🔄 AUTH ROUTES: Handling OPTIONS preflight request")
|
| 32 |
+
response = jsonify({'success': True})
|
| 33 |
+
response.headers.add('Access-Control-Allow-Origin', '*')
|
| 34 |
+
response.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')
|
| 35 |
+
response.headers.add('Access-Control-Allow-Methods', 'POST, OPTIONS, GET')
|
| 36 |
+
return response
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
data = request.get_json(force=True) or {}
|
| 40 |
+
print(f"🟢 AUTH ROUTES: Received signup request with data: {data}")
|
| 41 |
+
|
| 42 |
+
name = data.get("name")
|
| 43 |
+
email = data.get("email")
|
| 44 |
+
password = data.get("password")
|
| 45 |
+
|
| 46 |
+
if not name or not email or not password:
|
| 47 |
+
print(f"❌ AUTH ROUTES: Missing required fields")
|
| 48 |
+
return jsonify({"success": False, "message": "Name, email, and password are required."}), 400
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
conn = get_db_connection()
|
| 52 |
+
cur = conn.cursor()
|
| 53 |
+
|
| 54 |
+
print(f"🟢 AUTH ROUTES: Checking if email '{email}' already exists...")
|
| 55 |
+
|
| 56 |
+
# Check if email already exists
|
| 57 |
+
cur.execute("SELECT user_id FROM Users WHERE email = ?", (email,))
|
| 58 |
+
existing = cur.fetchone()
|
| 59 |
+
if existing:
|
| 60 |
+
print(f"❌ AUTH ROUTES: Email '{email}' already exists in database")
|
| 61 |
+
return jsonify({"success": False, "message": "User already exists. Please sign in."}), 409
|
| 62 |
+
|
| 63 |
+
# Use plain password (as per your original code)
|
| 64 |
+
plain_password = password
|
| 65 |
+
|
| 66 |
+
print(f"🟢 AUTH ROUTES: Inserting new user '{name}' with email '{email}'")
|
| 67 |
+
|
| 68 |
+
# Insert into Users table with plain password
|
| 69 |
+
cur.execute("""
|
| 70 |
+
INSERT INTO Users (name, email, password)
|
| 71 |
+
VALUES (?, ?, ?)
|
| 72 |
+
""", (name, email, plain_password))
|
| 73 |
+
conn.commit()
|
| 74 |
+
|
| 75 |
+
# Fetch the newly inserted user_id
|
| 76 |
+
cur.execute("SELECT @@IDENTITY AS user_id")
|
| 77 |
+
row = cur.fetchone()
|
| 78 |
+
user_id = row[0] if row else None
|
| 79 |
+
|
| 80 |
+
print(f"✅ AUTH ROUTES: Successfully created user. User ID: {user_id}")
|
| 81 |
+
|
| 82 |
+
conn.close()
|
| 83 |
+
return jsonify({
|
| 84 |
+
"success": True,
|
| 85 |
+
"message": "Signup successful.",
|
| 86 |
+
"user_id": user_id,
|
| 87 |
+
"name": name,
|
| 88 |
+
"email": email
|
| 89 |
+
}), 201
|
| 90 |
+
|
| 91 |
+
except pyodbc.Error as e:
|
| 92 |
+
print(f"❌ AUTH ROUTES: Database Error: {e}")
|
| 93 |
+
print(f"❌ AUTH ROUTES: SQL State: {e.sqlstate if hasattr(e, 'sqlstate') else 'N/A'}")
|
| 94 |
+
print(f"❌ AUTH ROUTES: Error Code: {e.args[0] if e.args else 'N/A'}")
|
| 95 |
+
return jsonify({"success": False, "message": f"Database error: {str(e)}"}), 500
|
| 96 |
+
|
| 97 |
+
except Exception as e:
|
| 98 |
+
print(f"❌ AUTH ROUTES: Unexpected Error: {e}")
|
| 99 |
+
traceback.print_exc()
|
| 100 |
+
return jsonify({"success": False, "message": f"Unexpected error: {str(e)}"}), 500
|
| 101 |
+
|
| 102 |
+
except Exception as e:
|
| 103 |
+
print(f"❌ AUTH ROUTES: Outer exception: {e}")
|
| 104 |
+
traceback.print_exc()
|
| 105 |
+
return jsonify({"success": False, "message": f"Server error: {str(e)}"}), 500
|
| 106 |
+
|
| 107 |
+
@auth_bp.route('/login', methods=['POST'])
|
| 108 |
+
def login():
|
| 109 |
+
print(f"🎯 AUTH ROUTES: /login endpoint called")
|
| 110 |
+
try:
|
| 111 |
+
data = request.get_json(force=True) or {}
|
| 112 |
+
print(f"🟢 AUTH ROUTES: Received login request with email: {data.get('email', 'not provided')}")
|
| 113 |
+
|
| 114 |
+
email = data.get("email")
|
| 115 |
+
password = data.get("password")
|
| 116 |
+
|
| 117 |
+
if not email or not password:
|
| 118 |
+
print(f"❌ AUTH ROUTES: Missing email or password")
|
| 119 |
+
return jsonify({"success": False, "message": "Email and password are required."}), 400
|
| 120 |
+
|
| 121 |
+
try:
|
| 122 |
+
conn = get_db_connection()
|
| 123 |
+
cur = conn.cursor()
|
| 124 |
+
|
| 125 |
+
print(f"🟢 AUTH ROUTES: Looking for user with email: {email}")
|
| 126 |
+
cur.execute("SELECT user_id, name, email, password FROM Users WHERE email = ?", (email,))
|
| 127 |
+
user = cur.fetchone()
|
| 128 |
+
|
| 129 |
+
if not user:
|
| 130 |
+
print(f"❌ AUTH ROUTES: User not found with email: {email}")
|
| 131 |
+
return jsonify({"success": False, "message": "User not found."}), 404
|
| 132 |
+
|
| 133 |
+
user_id, name, email, stored_password = user
|
| 134 |
+
print(f"🟢 AUTH ROUTES: Found user ID: {user_id}, Name: {name}")
|
| 135 |
+
|
| 136 |
+
# Use simple string comparison for plain text passwords
|
| 137 |
+
if stored_password != password:
|
| 138 |
+
print(f"❌ AUTH ROUTES: Password mismatch for user {user_id}")
|
| 139 |
+
return jsonify({"success": False, "message": "Invalid password."}), 401
|
| 140 |
+
|
| 141 |
+
print(f"✅ AUTH ROUTES: Successful login for user {user_id}")
|
| 142 |
+
conn.close()
|
| 143 |
+
return jsonify({
|
| 144 |
+
"success": True,
|
| 145 |
+
"message": "Login successful.",
|
| 146 |
+
"user_id": user_id,
|
| 147 |
+
"name": name,
|
| 148 |
+
"email": email
|
| 149 |
+
}), 200
|
| 150 |
+
|
| 151 |
+
except pyodbc.Error as e:
|
| 152 |
+
print(f"❌ AUTH ROUTES: Database Error: {e}")
|
| 153 |
+
return jsonify({"success": False, "message": f"Database error: {e}"}), 500
|
| 154 |
+
|
| 155 |
+
except Exception as e:
|
| 156 |
+
print(f"❌ AUTH ROUTES: Unexpected Error: {e}")
|
| 157 |
+
traceback.print_exc()
|
| 158 |
+
return jsonify({"success": False, "message": f"Unexpected error: {e}"}), 500
|
| 159 |
+
|
| 160 |
+
except Exception as e:
|
| 161 |
+
print(f"❌ AUTH ROUTES: Outer exception: {e}")
|
| 162 |
+
traceback.print_exc()
|
| 163 |
+
return jsonify({"success": False, "message": f"Server error: {str(e)}"}), 500
|
| 164 |
+
finally:
|
| 165 |
+
try:
|
| 166 |
+
conn.close()
|
| 167 |
+
except:
|
| 168 |
+
pass
|
| 169 |
+
|
| 170 |
+
@auth_bp.route('/test', methods=['GET'])
|
| 171 |
+
def test():
|
| 172 |
+
print("✅ AUTH ROUTES: /test endpoint hit!")
|
| 173 |
+
return jsonify({"message": "Auth routes are working!", "status": "ok", "blueprint": auth_bp.name}), 200
|
routes/expectation_routes.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# routes/expectation_routes.py
|
| 2 |
+
from flask import Blueprint, request, jsonify
|
| 3 |
+
import pyodbc
|
| 4 |
+
import json
|
| 5 |
+
from database import get_db_connection, row_to_dict
|
| 6 |
+
|
| 7 |
+
expectations_bp = Blueprint('expectations', __name__)
|
| 8 |
+
|
| 9 |
+
@expectations_bp.route('/api/existing-preferences/<int:user_id>', methods=['GET'])
|
| 10 |
+
def get_existing_preferences(user_id: int):
|
| 11 |
+
"""Get existing preferences data for a user"""
|
| 12 |
+
try:
|
| 13 |
+
conn = get_db_connection()
|
| 14 |
+
cur = conn.cursor()
|
| 15 |
+
|
| 16 |
+
cur.execute("""
|
| 17 |
+
SELECT TOP 1 * FROM ExpectationResponse
|
| 18 |
+
WHERE user_id = ?
|
| 19 |
+
ORDER BY created_at DESC
|
| 20 |
+
""", (user_id,))
|
| 21 |
+
|
| 22 |
+
row = cur.fetchone()
|
| 23 |
+
if row is None:
|
| 24 |
+
return jsonify({"error": "No preferences found"}), 404
|
| 25 |
+
|
| 26 |
+
# Convert row to dict
|
| 27 |
+
preferences = row_to_dict(cur, row)
|
| 28 |
+
|
| 29 |
+
# Process multi-select fields that are stored as comma-separated strings
|
| 30 |
+
# Get multi_select question keys from ExpectationQuestions
|
| 31 |
+
cur.execute("SELECT column_key FROM ExpectationQuestions WHERE input_type = 'multi_select'")
|
| 32 |
+
multi_select_keys = [row[0] for row in cur.fetchall()]
|
| 33 |
+
|
| 34 |
+
for key in multi_select_keys:
|
| 35 |
+
if key in preferences and preferences[key]:
|
| 36 |
+
# Convert comma-separated string back to array
|
| 37 |
+
if isinstance(preferences[key], str):
|
| 38 |
+
preferences[key] = [item.strip() for item in preferences[key].split(",") if item.strip()]
|
| 39 |
+
|
| 40 |
+
return jsonify(preferences), 200
|
| 41 |
+
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"Error fetching existing preferences: {e}")
|
| 44 |
+
return jsonify({"error": str(e)}), 500
|
| 45 |
+
finally:
|
| 46 |
+
try:
|
| 47 |
+
conn.close()
|
| 48 |
+
except:
|
| 49 |
+
pass
|
| 50 |
+
|
| 51 |
+
@expectations_bp.route('/api/update-preferences/<int:user_id>', methods=['PUT'])
|
| 52 |
+
def update_preferences(user_id: int):
|
| 53 |
+
"""Update existing preferences"""
|
| 54 |
+
data = request.get_json(force=True) or {}
|
| 55 |
+
|
| 56 |
+
if not user_id:
|
| 57 |
+
return jsonify({"error": "User ID is required."}), 400
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
conn = get_db_connection()
|
| 61 |
+
cur = conn.cursor()
|
| 62 |
+
|
| 63 |
+
print("🟢 DEBUG UPDATE: Incoming data keys ->", list(data.keys()))
|
| 64 |
+
|
| 65 |
+
# 🚨 CRITICAL FIX: Handle _mandatory_fields FIRST
|
| 66 |
+
mandatory_fields = data.get('_mandatory_fields', {})
|
| 67 |
+
print("🎯 DEBUG UPDATE: Mandatory fields received:", mandatory_fields)
|
| 68 |
+
|
| 69 |
+
# Build SET clause for update
|
| 70 |
+
set_parts = []
|
| 71 |
+
values = []
|
| 72 |
+
|
| 73 |
+
# Handle _mandatory_fields - convert to JSON string
|
| 74 |
+
if mandatory_fields:
|
| 75 |
+
set_parts.append('_mandatory_fields = ?')
|
| 76 |
+
if isinstance(mandatory_fields, dict):
|
| 77 |
+
mandatory_json = json.dumps(mandatory_fields, ensure_ascii=False)
|
| 78 |
+
else:
|
| 79 |
+
mandatory_json = str(mandatory_fields)
|
| 80 |
+
values.append(mandatory_json)
|
| 81 |
+
print("✅ DEBUG UPDATE: Adding _mandatory_fields to update:", mandatory_json)
|
| 82 |
+
|
| 83 |
+
# 🚨 CRITICAL: Field name mapping from frontend to database
|
| 84 |
+
field_mapping = {
|
| 85 |
+
'pref_live_with_inlaws': 'live_with_inlaws',
|
| 86 |
+
'accept_financial_support_to_parents': 'financial_support_to_parents'
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
# Define all valid ExpectationResponse fields
|
| 90 |
+
valid_fields = [
|
| 91 |
+
'pref_age_range', 'pref_height_range', 'pref_current_city', 'pref_countries',
|
| 92 |
+
'pref_languages', 'health_constraints', 'pref_diet', 'accept_smoking',
|
| 93 |
+
'accept_alcohol', 'pref_fitness', 'pref_family_type', 'live_with_inlaws',
|
| 94 |
+
'children_timeline', 'open_to_adoption', 'pref_conflict_approach',
|
| 95 |
+
'pref_financial_style', 'religion_alignment', 'pref_shared_hobbies',
|
| 96 |
+
'travel_pref', 'pet_pref', 'pref_income_range', 'deal_breakers',
|
| 97 |
+
'other_non_negotiables', 'pref_education_level', 'pref_employment_status',
|
| 98 |
+
'expectation_summary', 'skin_tone', 'marital_status', 'daily_routine',
|
| 99 |
+
'family_communication_frequency', 'relaxation_mode', 'pref_partner_relocation',
|
| 100 |
+
'financial_support_to_parents', 'pref_career_aspirations', 'pref_live_with_parents'
|
| 101 |
+
]
|
| 102 |
+
|
| 103 |
+
# Get input types from ExpectationQuestions
|
| 104 |
+
cur.execute("SELECT column_key, input_type FROM ExpectationQuestions")
|
| 105 |
+
field_types = {row[0]: row[1] for row in cur.fetchall()}
|
| 106 |
+
|
| 107 |
+
# Process all fields
|
| 108 |
+
for key in valid_fields:
|
| 109 |
+
# 🚨 CRITICAL: Check if we need to map the field name
|
| 110 |
+
db_field_name = field_mapping.get(key, key)
|
| 111 |
+
|
| 112 |
+
if key in data and key != 'user_id' and key != '_mandatory_fields':
|
| 113 |
+
value = data[key]
|
| 114 |
+
field_type = field_types.get(key, 'text')
|
| 115 |
+
print(f"🟡 Processing update field {key} -> {db_field_name} (type: {field_type}): {value}")
|
| 116 |
+
|
| 117 |
+
if field_type == 'multi_select' and isinstance(value, list):
|
| 118 |
+
clean_values = []
|
| 119 |
+
for item in value:
|
| 120 |
+
if isinstance(item, str) and item.strip():
|
| 121 |
+
clean_item = item.strip()
|
| 122 |
+
clean_item = clean_item.replace('[', '').replace(']', '').replace('"', '').strip()
|
| 123 |
+
if clean_item and clean_item not in clean_values:
|
| 124 |
+
clean_values.append(clean_item)
|
| 125 |
+
|
| 126 |
+
if clean_values:
|
| 127 |
+
final_value = ", ".join(clean_values)
|
| 128 |
+
print(f"🟢 Converted multi_select array to string: '{final_value}'")
|
| 129 |
+
set_parts.append(f"{db_field_name} = ?") # 🚨 Use mapped field name
|
| 130 |
+
values.append(final_value)
|
| 131 |
+
else:
|
| 132 |
+
set_parts.append(f"{db_field_name} = ?") # 🚨 Use mapped field name
|
| 133 |
+
values.append("")
|
| 134 |
+
|
| 135 |
+
elif field_type == 'multi_select' and isinstance(value, str):
|
| 136 |
+
clean_value = value.strip()
|
| 137 |
+
clean_value = clean_value.replace('[', '').replace(']', '').replace('"', '').strip()
|
| 138 |
+
if clean_value.startswith(',') or clean_value.endswith(','):
|
| 139 |
+
clean_value = clean_value.strip(',')
|
| 140 |
+
|
| 141 |
+
print(f"🟢 Cleaning multi_select string: '{clean_value}'")
|
| 142 |
+
set_parts.append(f"{db_field_name} = ?") # 🚨 Use mapped field name
|
| 143 |
+
values.append(clean_value)
|
| 144 |
+
|
| 145 |
+
elif value is not None:
|
| 146 |
+
final_value = str(value).strip() if isinstance(value, str) else value
|
| 147 |
+
print(f"🟢 Storing single value: '{final_value}'")
|
| 148 |
+
set_parts.append(f"{db_field_name} = ?") # 🚨 Use mapped field name
|
| 149 |
+
values.append(final_value)
|
| 150 |
+
else:
|
| 151 |
+
# Handle empty values
|
| 152 |
+
set_parts.append(f"{db_field_name} = ?") # 🚨 Use mapped field name
|
| 153 |
+
values.append("")
|
| 154 |
+
|
| 155 |
+
if not set_parts and not mandatory_fields:
|
| 156 |
+
return jsonify({"error": "No valid fields to update"}), 400
|
| 157 |
+
|
| 158 |
+
# Add user_id for WHERE clause
|
| 159 |
+
values.append(user_id)
|
| 160 |
+
|
| 161 |
+
set_clause = ", ".join(set_parts)
|
| 162 |
+
query = f"UPDATE ExpectationResponse SET {set_clause} WHERE user_id = ?"
|
| 163 |
+
|
| 164 |
+
print(f"🟢 DEBUG UPDATE: Executing query: {query}")
|
| 165 |
+
print(f"🟢 DEBUG UPDATE: Values: {values}")
|
| 166 |
+
|
| 167 |
+
cur.execute(query, values)
|
| 168 |
+
conn.commit()
|
| 169 |
+
|
| 170 |
+
# Check if any row was updated
|
| 171 |
+
if cur.rowcount == 0:
|
| 172 |
+
print("⚠️ WARNING: No rows were updated - user might not exist")
|
| 173 |
+
return jsonify({"error": "No preferences found to update"}), 404
|
| 174 |
+
|
| 175 |
+
print(f"✅ SUCCESS: Updated {cur.rowcount} row(s) for user {user_id}")
|
| 176 |
+
return jsonify({"message": "Preferences updated successfully."}), 200
|
| 177 |
+
|
| 178 |
+
except Exception as e:
|
| 179 |
+
print(f"🔴 Error updating preferences: {e}")
|
| 180 |
+
import traceback
|
| 181 |
+
traceback.print_exc()
|
| 182 |
+
return jsonify({"error": str(e)}), 500
|
| 183 |
+
finally:
|
| 184 |
+
try:
|
| 185 |
+
conn.close()
|
| 186 |
+
except:
|
| 187 |
+
pass
|
| 188 |
+
|
| 189 |
+
@expectations_bp.route('/api/check-mandatory-fields/<int:user_id>', methods=['GET'])
|
| 190 |
+
def check_mandatory_fields(user_id: int):
|
| 191 |
+
"""Check current mandatory fields in database"""
|
| 192 |
+
try:
|
| 193 |
+
conn = get_db_connection()
|
| 194 |
+
cur = conn.cursor()
|
| 195 |
+
|
| 196 |
+
cur.execute("""
|
| 197 |
+
SELECT user_id, _mandatory_fields
|
| 198 |
+
FROM ExpectationResponse
|
| 199 |
+
WHERE user_id = ?
|
| 200 |
+
""", (user_id,))
|
| 201 |
+
|
| 202 |
+
row = cur.fetchone()
|
| 203 |
+
if row is None:
|
| 204 |
+
return jsonify({"error": "No preferences found for user"}), 404
|
| 205 |
+
|
| 206 |
+
result = {
|
| 207 |
+
"user_id": row[0],
|
| 208 |
+
"_mandatory_fields": row[1],
|
| 209 |
+
"_mandatory_fields_type": str(type(row[1])),
|
| 210 |
+
"exists_in_db": row[1] is not None
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
print("🔍 CHECK MANDATORY FIELDS:", result)
|
| 214 |
+
|
| 215 |
+
return jsonify(result), 200
|
| 216 |
+
|
| 217 |
+
except Exception as e:
|
| 218 |
+
print(f"Error checking mandatory fields: {e}")
|
| 219 |
+
return jsonify({"error": str(e)}), 500
|
| 220 |
+
finally:
|
| 221 |
+
try:
|
| 222 |
+
conn.close()
|
| 223 |
+
except:
|
| 224 |
+
pass
|
| 225 |
+
|
| 226 |
+
@expectations_bp.route('/api/expectation-questions', methods=['GET'])
|
| 227 |
+
def get_expectation_questions():
|
| 228 |
+
try:
|
| 229 |
+
conn = get_db_connection()
|
| 230 |
+
cur = conn.cursor()
|
| 231 |
+
cur.execute("""
|
| 232 |
+
SELECT id, question, options, input_type, column_key, category
|
| 233 |
+
FROM ExpectationQuestions
|
| 234 |
+
ORDER BY id
|
| 235 |
+
""")
|
| 236 |
+
rows = cur.fetchall()
|
| 237 |
+
|
| 238 |
+
out = []
|
| 239 |
+
for r in rows:
|
| 240 |
+
out.append({
|
| 241 |
+
"id": r[0],
|
| 242 |
+
"question": r[1],
|
| 243 |
+
"options": (r[2].split(",") if r[2] else []),
|
| 244 |
+
"input_type": r[3],
|
| 245 |
+
"column_key": r[4],
|
| 246 |
+
"category": r[5]
|
| 247 |
+
})
|
| 248 |
+
return jsonify(out), 200
|
| 249 |
+
except Exception as e:
|
| 250 |
+
return jsonify({"error": str(e)}), 500
|
| 251 |
+
finally:
|
| 252 |
+
try: conn.close()
|
| 253 |
+
except: pass
|
| 254 |
+
|
| 255 |
+
@expectations_bp.route('/api/expectation-response', methods=['POST'])
|
| 256 |
+
def save_expectation_response():
|
| 257 |
+
data = request.get_json(force=True) or {}
|
| 258 |
+
user_id = data.get("user_id")
|
| 259 |
+
|
| 260 |
+
if not user_id:
|
| 261 |
+
return jsonify({"error": "user_id is required"}), 400
|
| 262 |
+
|
| 263 |
+
try:
|
| 264 |
+
conn = get_db_connection()
|
| 265 |
+
cur = conn.cursor()
|
| 266 |
+
|
| 267 |
+
# Get valid keys from ExpectationQuestions AND ExpectationResponse model
|
| 268 |
+
cur.execute("SELECT column_key, input_type FROM ExpectationQuestions")
|
| 269 |
+
valid_fields = {row[0]: row[1] for row in cur.fetchall()}
|
| 270 |
+
|
| 271 |
+
# 🚨 CRITICAL FIX: Add all ExpectationResponse model fields
|
| 272 |
+
expectation_model_fields = [
|
| 273 |
+
'pref_age_range', 'pref_height_range', 'pref_current_city', 'pref_countries',
|
| 274 |
+
'pref_languages', 'health_constraints', 'pref_diet', 'accept_smoking',
|
| 275 |
+
'accept_alcohol', 'pref_fitness', 'pref_family_type', 'live_with_inlaws', # 🚨 CHANGED: Remove 'pref_' prefix
|
| 276 |
+
'children_timeline', 'open_to_adoption', 'pref_conflict_approach',
|
| 277 |
+
'pref_financial_style', 'religion_alignment', 'pref_shared_hobbies',
|
| 278 |
+
'travel_pref', 'pet_pref', 'pref_income_range', 'deal_breakers',
|
| 279 |
+
'other_non_negotiables', 'pref_education_level', 'pref_employment_status',
|
| 280 |
+
'expectation_summary', '_mandatory_fields', 'skin_tone', 'marital_status',
|
| 281 |
+
'daily_routine', 'family_communication_frequency', 'relaxation_mode',
|
| 282 |
+
'pref_partner_relocation', 'financial_support_to_parents', # 🚨 CHANGED: Remove 'accept_' prefix
|
| 283 |
+
'pref_career_aspirations', 'pref_live_with_parents'
|
| 284 |
+
]
|
| 285 |
+
|
| 286 |
+
# Add model fields to valid_fields (default to 'text' input type if not in questions)
|
| 287 |
+
for field in expectation_model_fields:
|
| 288 |
+
if field not in valid_fields:
|
| 289 |
+
valid_fields[field] = 'text' # default type
|
| 290 |
+
|
| 291 |
+
print("🟢 DEBUG: Valid fields ->", list(valid_fields.keys()))
|
| 292 |
+
print("🟢 DEBUG: Incoming data keys ->", list(data.keys()))
|
| 293 |
+
|
| 294 |
+
cols, vals = [], []
|
| 295 |
+
|
| 296 |
+
# 🚨 CRITICAL: Handle _mandatory_fields FIRST
|
| 297 |
+
mandatory_fields = data.get('_mandatory_fields', {})
|
| 298 |
+
if mandatory_fields:
|
| 299 |
+
cols.append('_mandatory_fields')
|
| 300 |
+
if isinstance(mandatory_fields, dict):
|
| 301 |
+
mandatory_json = json.dumps(mandatory_fields, ensure_ascii=False)
|
| 302 |
+
vals.append(mandatory_json)
|
| 303 |
+
else:
|
| 304 |
+
vals.append(str(mandatory_fields))
|
| 305 |
+
print("✅ DEBUG: Added _mandatory_fields:", mandatory_fields)
|
| 306 |
+
|
| 307 |
+
# Process all other fields
|
| 308 |
+
for key, field_type in valid_fields.items():
|
| 309 |
+
if key in data and key != 'user_id' and key != '_mandatory_fields':
|
| 310 |
+
value = data[key]
|
| 311 |
+
print(f"🟡 Processing field {key} (type: {field_type}): {value}")
|
| 312 |
+
|
| 313 |
+
if value is None or value == '':
|
| 314 |
+
# Skip empty values
|
| 315 |
+
continue
|
| 316 |
+
|
| 317 |
+
if field_type == 'multi_select' and isinstance(value, list):
|
| 318 |
+
# Clean array data
|
| 319 |
+
clean_values = []
|
| 320 |
+
for item in value:
|
| 321 |
+
if isinstance(item, str) and item.strip():
|
| 322 |
+
clean_item = item.strip()
|
| 323 |
+
clean_item = clean_item.replace('[', '').replace(']', '').replace('"', '').strip()
|
| 324 |
+
if clean_item and clean_item not in clean_values:
|
| 325 |
+
clean_values.append(clean_item)
|
| 326 |
+
|
| 327 |
+
if clean_values:
|
| 328 |
+
final_value = ", ".join(clean_values)
|
| 329 |
+
print(f"🟢 Converted multi_select array to string: '{final_value}'")
|
| 330 |
+
cols.append(key)
|
| 331 |
+
vals.append(final_value)
|
| 332 |
+
else:
|
| 333 |
+
# Skip empty arrays
|
| 334 |
+
continue
|
| 335 |
+
|
| 336 |
+
elif field_type == 'multi_select' and isinstance(value, str):
|
| 337 |
+
# Clean string data
|
| 338 |
+
clean_value = value.strip()
|
| 339 |
+
clean_value = clean_value.replace('[', '').replace(']', '').replace('"', '').strip()
|
| 340 |
+
if clean_value.startswith(',') or clean_value.endswith(','):
|
| 341 |
+
clean_value = clean_value.strip(',')
|
| 342 |
+
|
| 343 |
+
if clean_value:
|
| 344 |
+
print(f"🟢 Cleaning multi_select string: '{clean_value}'")
|
| 345 |
+
cols.append(key)
|
| 346 |
+
vals.append(clean_value)
|
| 347 |
+
else:
|
| 348 |
+
continue
|
| 349 |
+
|
| 350 |
+
else:
|
| 351 |
+
# For single values
|
| 352 |
+
final_value = str(value).strip() if isinstance(value, str) else value
|
| 353 |
+
if final_value: # Only add non-empty values
|
| 354 |
+
print(f"🟢 Storing single value: '{final_value}'")
|
| 355 |
+
cols.append(key)
|
| 356 |
+
vals.append(final_value)
|
| 357 |
+
|
| 358 |
+
if not cols:
|
| 359 |
+
return jsonify({"error": "No valid fields found in request"}), 400
|
| 360 |
+
|
| 361 |
+
# Build INSERT query
|
| 362 |
+
placeholders = ", ".join(["?"] * (len(cols) + 1)) # +1 for user_id
|
| 363 |
+
col_str = ", ".join([f"[{c}]" for c in cols])
|
| 364 |
+
|
| 365 |
+
query = f"""
|
| 366 |
+
INSERT INTO ExpectationResponse (user_id, {col_str})
|
| 367 |
+
VALUES ({placeholders})
|
| 368 |
+
"""
|
| 369 |
+
print("🟢 DEBUG: Final query ->", query)
|
| 370 |
+
print("🟢 DEBUG: Values count ->", len([user_id] + vals))
|
| 371 |
+
print("🟢 DEBUG: Columns ->", cols)
|
| 372 |
+
|
| 373 |
+
cur.execute(query, [user_id] + vals)
|
| 374 |
+
conn.commit()
|
| 375 |
+
|
| 376 |
+
return jsonify({"message": "Preferences saved successfully"}), 201
|
| 377 |
+
|
| 378 |
+
except Exception as e:
|
| 379 |
+
import traceback
|
| 380 |
+
print("🔴 ERROR in save_expectation_response:")
|
| 381 |
+
traceback.print_exc()
|
| 382 |
+
return jsonify({"error": str(e)}), 500
|
| 383 |
+
|
| 384 |
+
finally:
|
| 385 |
+
try:
|
| 386 |
+
conn.close()
|
| 387 |
+
except:
|
| 388 |
+
pass
|
| 389 |
+
|
| 390 |
+
@expectations_bp.route('/api/check-expectations/<int:user_id>', methods=['GET'])
|
| 391 |
+
def check_expectations(user_id: int):
|
| 392 |
+
"""Check if expectations exist for user"""
|
| 393 |
+
try:
|
| 394 |
+
conn = get_db_connection()
|
| 395 |
+
cur = conn.cursor()
|
| 396 |
+
|
| 397 |
+
cur.execute("""
|
| 398 |
+
SELECT COUNT(*) as count
|
| 399 |
+
FROM ExpectationResponse
|
| 400 |
+
WHERE user_id = ?
|
| 401 |
+
""", (user_id,))
|
| 402 |
+
|
| 403 |
+
row = cur.fetchone()
|
| 404 |
+
exists = row[0] > 0 if row else False
|
| 405 |
+
|
| 406 |
+
return jsonify({"exists": exists}), 200
|
| 407 |
+
|
| 408 |
+
except Exception as e:
|
| 409 |
+
print(f"Error checking expectations: {e}")
|
| 410 |
+
return jsonify({"error": str(e)}), 500
|
| 411 |
+
finally:
|
| 412 |
+
try:
|
| 413 |
+
conn.close()
|
| 414 |
+
except:
|
| 415 |
+
pass
|
routes/llm_routes.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# routes/llm_routes.py
|
| 2 |
+
from flask import Blueprint, request, jsonify
|
| 3 |
+
import uuid
|
| 4 |
+
from llm_service import (
|
| 5 |
+
SessionState, SESSIONS, save_sessions, persist_final_progress,
|
| 6 |
+
choose_themes, generate_batch_questions
|
| 7 |
+
)
|
| 8 |
+
# Import fetch_profile_for_role from database instead of llm_service
|
| 9 |
+
from database import fetch_profile_for_role
|
| 10 |
+
|
| 11 |
+
llm_bp = Blueprint('llm', __name__)
|
| 12 |
+
|
| 13 |
+
@llm_bp.route('/llm/start', methods=['POST'])
|
| 14 |
+
def llm_start():
|
| 15 |
+
data = request.get_json(force=True) or {}
|
| 16 |
+
user_id = str(data.get("user_id") or "").strip()
|
| 17 |
+
role_in = (data.get("role") or "general").lower()
|
| 18 |
+
n_req = int(data.get("n_questions", 20))
|
| 19 |
+
b_req = int(data.get("batch_size", 10))
|
| 20 |
+
|
| 21 |
+
if not user_id:
|
| 22 |
+
return jsonify({"error": "user_id is required"}), 400
|
| 23 |
+
if role_in not in ["marriage", "interview", "partnership", "general"]:
|
| 24 |
+
return jsonify({"error": f"Invalid role. Allowed: marriage, interview, partnership, general"}), 400
|
| 25 |
+
|
| 26 |
+
# 🚨 CRITICAL: Check if user has already taken assessment
|
| 27 |
+
try:
|
| 28 |
+
from database import get_db_connection
|
| 29 |
+
conn = get_db_connection()
|
| 30 |
+
cur = conn.cursor()
|
| 31 |
+
cur.execute("""
|
| 32 |
+
SELECT COUNT(*) as count
|
| 33 |
+
FROM LLMGeneratedQuestions
|
| 34 |
+
WHERE user_id = ? AND (blue > 0 OR green > 0 OR yellow > 0 OR red > 0)
|
| 35 |
+
""", (user_id,))
|
| 36 |
+
row = cur.fetchone()
|
| 37 |
+
has_taken_assessment = row[0] > 0 if row else False
|
| 38 |
+
conn.close()
|
| 39 |
+
|
| 40 |
+
if has_taken_assessment:
|
| 41 |
+
print(f"🚫 User {user_id} already took assessment, blocking new session")
|
| 42 |
+
return jsonify({
|
| 43 |
+
"error": "Assessment already completed",
|
| 44 |
+
"assessment_already_taken": True
|
| 45 |
+
}), 400
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f"Warning: Could not check assessment status: {e}")
|
| 48 |
+
|
| 49 |
+
# Fetch profile from the correct table based on role
|
| 50 |
+
profile = fetch_profile_for_role(user_id, role_in)
|
| 51 |
+
|
| 52 |
+
# Create session
|
| 53 |
+
sid = str(uuid.uuid4())
|
| 54 |
+
sess = SessionState(
|
| 55 |
+
n_questions=n_req,
|
| 56 |
+
batch_size=b_req,
|
| 57 |
+
domain=role_in,
|
| 58 |
+
role=role_in,
|
| 59 |
+
profile=profile
|
| 60 |
+
)
|
| 61 |
+
SESSIONS[sid] = sess
|
| 62 |
+
|
| 63 |
+
# Generate first batch of questions from FAISS chunks
|
| 64 |
+
to_generate = min(sess.batch_size, sess.remaining())
|
| 65 |
+
themes = choose_themes(sess, to_generate)
|
| 66 |
+
|
| 67 |
+
context = ""
|
| 68 |
+
try:
|
| 69 |
+
from faiss_service import HAS_FAISS, FAISS_INDEX, TEXT_CHUNKS
|
| 70 |
+
if HAS_FAISS and FAISS_INDEX is not None and TEXT_CHUNKS:
|
| 71 |
+
import random
|
| 72 |
+
context = "\n".join(random.sample(TEXT_CHUNKS, min(3, len(TEXT_CHUNKS))))
|
| 73 |
+
except ImportError:
|
| 74 |
+
pass
|
| 75 |
+
|
| 76 |
+
# Generate questions
|
| 77 |
+
queue = generate_batch_questions(themes, sess.to_min_state(), context=context, previous_questions=sess.history_of_questions)
|
| 78 |
+
|
| 79 |
+
if not queue:
|
| 80 |
+
return jsonify({"error": "Question generation failed"}), 500
|
| 81 |
+
|
| 82 |
+
sess.queue = queue
|
| 83 |
+
|
| 84 |
+
# Serve first question
|
| 85 |
+
first = sess.queue.pop(0)
|
| 86 |
+
sess.asked += 1
|
| 87 |
+
|
| 88 |
+
# Track the asked question
|
| 89 |
+
sess.history_of_questions.append(first["question"])
|
| 90 |
+
|
| 91 |
+
save_sessions()
|
| 92 |
+
|
| 93 |
+
return jsonify({
|
| 94 |
+
"session_id": sid,
|
| 95 |
+
"index": 1,
|
| 96 |
+
"total": sess.n_questions,
|
| 97 |
+
"question": first["question"],
|
| 98 |
+
"options": first["options"],
|
| 99 |
+
"source": first.get("source", "unknown"),
|
| 100 |
+
"role": sess.role,
|
| 101 |
+
"profile_used": bool(profile),
|
| 102 |
+
"faiss_themes": themes,
|
| 103 |
+
"faiss_context": context
|
| 104 |
+
})
|
| 105 |
+
|
| 106 |
+
@llm_bp.route('/llm/next', methods=['POST'])
|
| 107 |
+
def llm_next():
|
| 108 |
+
data = request.get_json(force=True) or {}
|
| 109 |
+
sid = data.get("session_id")
|
| 110 |
+
color = str(data.get("selected_color") or "").lower()
|
| 111 |
+
|
| 112 |
+
if not sid or sid not in SESSIONS:
|
| 113 |
+
return jsonify({"error": "Invalid or missing session_id"}), 400
|
| 114 |
+
if color not in ["blue", "green", "red", "yellow"]:
|
| 115 |
+
return jsonify({"error": "selected_color must be blue|green|red|yellow"}), 400
|
| 116 |
+
|
| 117 |
+
sess = SESSIONS[sid]
|
| 118 |
+
if sess.finished:
|
| 119 |
+
return jsonify({"done": True, "message": "Session already finished."})
|
| 120 |
+
|
| 121 |
+
# record answer
|
| 122 |
+
sess.color_counts[color] += 1
|
| 123 |
+
sess.history.append({"selected_color": color})
|
| 124 |
+
|
| 125 |
+
# Initialize themes and context with default values
|
| 126 |
+
themes = []
|
| 127 |
+
context = ""
|
| 128 |
+
|
| 129 |
+
# finished?
|
| 130 |
+
if sess.asked >= sess.n_questions:
|
| 131 |
+
sess.finished = True
|
| 132 |
+
mix = sess.to_min_state()["mix"]
|
| 133 |
+
user_id = (sess.profile or {}).get("user_id")
|
| 134 |
+
db_ok = persist_final_progress(user_id=user_id, role=sess.role, mix=mix)
|
| 135 |
+
save_sessions()
|
| 136 |
+
return jsonify({
|
| 137 |
+
"done": True,
|
| 138 |
+
"message": "No more questions.",
|
| 139 |
+
"mix": mix,
|
| 140 |
+
"db_write": "ok" if db_ok else "failed"
|
| 141 |
+
})
|
| 142 |
+
|
| 143 |
+
# ensure queue; refill if needed
|
| 144 |
+
if not sess.queue:
|
| 145 |
+
to_generate = min(sess.batch_size, sess.remaining())
|
| 146 |
+
themes = choose_themes(sess, to_generate)
|
| 147 |
+
|
| 148 |
+
try:
|
| 149 |
+
from faiss_service import HAS_FAISS, FAISS_INDEX, TEXT_CHUNKS
|
| 150 |
+
if HAS_FAISS and FAISS_INDEX is not None and TEXT_CHUNKS:
|
| 151 |
+
import random
|
| 152 |
+
context = "\n".join(random.sample(TEXT_CHUNKS, min(3, len(TEXT_CHUNKS))))
|
| 153 |
+
except ImportError:
|
| 154 |
+
pass
|
| 155 |
+
|
| 156 |
+
# Generate questions
|
| 157 |
+
sess.queue = generate_batch_questions(themes, sess.to_min_state(), context=context, previous_questions=sess.history_of_questions)
|
| 158 |
+
|
| 159 |
+
if not sess.queue:
|
| 160 |
+
return jsonify({"error": "Question generation failed"}), 500
|
| 161 |
+
|
| 162 |
+
nxt = sess.queue.pop(0)
|
| 163 |
+
sess.asked += 1
|
| 164 |
+
|
| 165 |
+
# Track the asked question
|
| 166 |
+
sess.history_of_questions.append(nxt["question"])
|
| 167 |
+
|
| 168 |
+
save_sessions()
|
| 169 |
+
|
| 170 |
+
return jsonify({
|
| 171 |
+
"session_id": sid,
|
| 172 |
+
"index": sess.asked,
|
| 173 |
+
"total": sess.n_questions,
|
| 174 |
+
"question": nxt["question"],
|
| 175 |
+
"options": nxt["options"],
|
| 176 |
+
"progress": sess.to_min_state()["mix"],
|
| 177 |
+
"source": nxt.get("source", "unknown"),
|
| 178 |
+
"role": sess.role,
|
| 179 |
+
"faiss_themes": themes,
|
| 180 |
+
"faiss_context": context
|
| 181 |
+
})
|
routes/matching_routes.py
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# routes/matching_routes.py
|
| 2 |
+
from flask import Blueprint, request, jsonify, current_app
|
| 3 |
+
import numpy as np
|
| 4 |
+
from sqlalchemy import func
|
| 5 |
+
from models import LLMGeneratedQuestions, Marriage, Users, ExpectationResponse, db
|
| 6 |
+
from matching_functions import match_expectation_with_profiles, generate_expectation_explanation
|
| 7 |
+
from character_functions import cosine_sim, generate_character_llm_explanation, generate_character_fallback_explanation
|
| 8 |
+
from database import fetch_expectation_data, fetch_marriage_profile_data
|
| 9 |
+
from config import COLOR_KEYS
|
| 10 |
+
|
| 11 |
+
matching_bp = Blueprint('matching', __name__)
|
| 12 |
+
|
| 13 |
+
@matching_bp.route('/match')
|
| 14 |
+
@matching_bp.route('/match/<int:user_id>')
|
| 15 |
+
def unified_match(user_id=None):
|
| 16 |
+
"""Unified match endpoint that handles all three modes"""
|
| 17 |
+
# Remove the incorrect db.app.app_context() and use current_app instead
|
| 18 |
+
# The app context is already provided by Flask for route handlers
|
| 19 |
+
|
| 20 |
+
# Get user_id from either path parameter or query parameter
|
| 21 |
+
if user_id is None:
|
| 22 |
+
try:
|
| 23 |
+
user_id = int(request.args.get("user_id", ""))
|
| 24 |
+
except ValueError:
|
| 25 |
+
return jsonify({"error": "Missing or invalid user_id"}), 400
|
| 26 |
+
|
| 27 |
+
# Get parameters
|
| 28 |
+
role = request.args.get("role", None)
|
| 29 |
+
limit = int(request.args.get("limit", "10"))
|
| 30 |
+
exclude_self = request.args.get("exclude_self", "yes").lower() == "yes"
|
| 31 |
+
mode = request.args.get("mode", "expectation-only") # Default to expectation-only
|
| 32 |
+
|
| 33 |
+
print(f"🔍 DEBUG: Match request - user_id: {user_id}, mode: {mode}")
|
| 34 |
+
|
| 35 |
+
# 🚨 DECISION: Handle all three modes
|
| 36 |
+
if mode == "expectation-only":
|
| 37 |
+
print("🎯 Using PURE EXPECTATION matching")
|
| 38 |
+
# Pure expectation matching only (with mandatory filtering)
|
| 39 |
+
expectation_matches = match_expectation_with_profiles(user_id)
|
| 40 |
+
|
| 41 |
+
if not expectation_matches:
|
| 42 |
+
return jsonify({"error": f"No matches found for user_id={user_id}"}), 404
|
| 43 |
+
|
| 44 |
+
# Convert to frontend format with expectation scores only
|
| 45 |
+
matches_by_range = {
|
| 46 |
+
"90-100": [],
|
| 47 |
+
"80-89": [],
|
| 48 |
+
"70-79": [],
|
| 49 |
+
"60-69": [],
|
| 50 |
+
"below_60": []
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
for match in expectation_matches:
|
| 54 |
+
# Use expectation score only (0-1 scale) to percentage (0-100)
|
| 55 |
+
score_percentage = match.get("expectation_score", 0) * 100
|
| 56 |
+
|
| 57 |
+
# Determine which range this match belongs to
|
| 58 |
+
if score_percentage >= 90:
|
| 59 |
+
range_key = "90-100"
|
| 60 |
+
elif score_percentage >= 80:
|
| 61 |
+
range_key = "80-89"
|
| 62 |
+
elif score_percentage >= 70:
|
| 63 |
+
range_key = "70-79"
|
| 64 |
+
elif score_percentage >= 60:
|
| 65 |
+
range_key = "60-69"
|
| 66 |
+
else:
|
| 67 |
+
range_key = "below_60"
|
| 68 |
+
|
| 69 |
+
# Create match object with expectation score only
|
| 70 |
+
match_obj = {
|
| 71 |
+
"user_id": match["user_id"],
|
| 72 |
+
"name": match["name"],
|
| 73 |
+
"gender": match.get("gender", ""),
|
| 74 |
+
"city": match.get("location", ""),
|
| 75 |
+
"score_expect": match.get("expectation_score", 0), # Raw score (0-1)
|
| 76 |
+
"score_color": match.get("character_score", 0), # Still include but not used for sorting
|
| 77 |
+
"final_score": round(score_percentage, 2), # Percentage for display
|
| 78 |
+
"blue": 0, "green": 0, "yellow": 0, "red": 0,
|
| 79 |
+
"explanations": [],
|
| 80 |
+
"explanation_source": "expectation"
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
matches_by_range[range_key].append(match_obj)
|
| 84 |
+
|
| 85 |
+
# Get user data for input_user
|
| 86 |
+
user = Users.query.filter_by(user_id=user_id).first()
|
| 87 |
+
|
| 88 |
+
input_user = {
|
| 89 |
+
"user_id": user_id,
|
| 90 |
+
"role": "marriage",
|
| 91 |
+
"name": user.name if user else "Unknown",
|
| 92 |
+
"blue": 0, "green": 0, "yellow": 0, "red": 0, # Not used in this mode
|
| 93 |
+
"created_at": None,
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
print(f"✅ DEBUG: Returning {len(expectation_matches)} pure expectation matches")
|
| 97 |
+
|
| 98 |
+
return jsonify({
|
| 99 |
+
"input_user": input_user,
|
| 100 |
+
"matches": matches_by_range,
|
| 101 |
+
"count": len(expectation_matches),
|
| 102 |
+
"mode": "expectation-only"
|
| 103 |
+
})
|
| 104 |
+
|
| 105 |
+
elif mode == "character":
|
| 106 |
+
print("🎯 Using PURE CHARACTER matching - NO EXPECTATION FILTERING")
|
| 107 |
+
|
| 108 |
+
# Get current user to know gender
|
| 109 |
+
current_user = Marriage.query.filter_by(user_id=user_id).first()
|
| 110 |
+
if not current_user:
|
| 111 |
+
return jsonify({"error": f"No marriage profile found for user_id={user_id}"}), 404
|
| 112 |
+
|
| 113 |
+
user_gender = (current_user.gender or "").lower()
|
| 114 |
+
print(f"🔍 DEBUG: Current user gender: {user_gender}")
|
| 115 |
+
|
| 116 |
+
# Opposite gender profiles only - NO MANDATORY FILTERING
|
| 117 |
+
if user_gender.startswith('male'):
|
| 118 |
+
opposite_profiles = Marriage.query.filter(func.lower(func.trim(Marriage.gender)) == "female").all()
|
| 119 |
+
elif user_gender.startswith('female'):
|
| 120 |
+
opposite_profiles = Marriage.query.filter(func.lower(func.trim(Marriage.gender)) == "male").all()
|
| 121 |
+
else:
|
| 122 |
+
opposite_profiles = Marriage.query.filter(Marriage.gender != current_user.gender).all()
|
| 123 |
+
|
| 124 |
+
print(f"🔍 DEBUG: Found {len(opposite_profiles)} opposite gender profiles (NO MANDATORY FILTERING)")
|
| 125 |
+
|
| 126 |
+
# Get base user's character data
|
| 127 |
+
base_llm = LLMGeneratedQuestions.query.filter_by(user_id=user_id).first()
|
| 128 |
+
if not base_llm:
|
| 129 |
+
return jsonify({"error": f"No character data found for user_id={user_id}"}), 404
|
| 130 |
+
|
| 131 |
+
u_vec = base_llm.color_vec()
|
| 132 |
+
|
| 133 |
+
# Calculate character scores for ALL opposite gender profiles
|
| 134 |
+
candidates = []
|
| 135 |
+
all_ids = [profile.user_id for profile in opposite_profiles]
|
| 136 |
+
|
| 137 |
+
# Get LLM data for all candidates
|
| 138 |
+
llm_data = LLMGeneratedQuestions.query.filter(LLMGeneratedQuestions.user_id.in_(all_ids)).all()
|
| 139 |
+
llm_map = {l.user_id: l for l in llm_data}
|
| 140 |
+
|
| 141 |
+
for profile in opposite_profiles:
|
| 142 |
+
if profile.user_id in llm_map:
|
| 143 |
+
llm_other = llm_map[profile.user_id]
|
| 144 |
+
v_vec = llm_other.color_vec()
|
| 145 |
+
|
| 146 |
+
# Compute character similarity
|
| 147 |
+
character_score = cosine_sim(u_vec, v_vec)
|
| 148 |
+
|
| 149 |
+
# Convert to percentage for display
|
| 150 |
+
score_percentage = round(character_score * 100, 2)
|
| 151 |
+
|
| 152 |
+
candidates.append({
|
| 153 |
+
"user_id": profile.user_id,
|
| 154 |
+
"name": profile.full_name,
|
| 155 |
+
"gender": profile.gender,
|
| 156 |
+
"location": profile.current_city,
|
| 157 |
+
"score_color": character_score, # Raw score (0-1)
|
| 158 |
+
"score_expect": 0, # Not used in this mode
|
| 159 |
+
"final_score": score_percentage, # Percentage for display
|
| 160 |
+
"blue": llm_other.blue,
|
| 161 |
+
"green": llm_other.green,
|
| 162 |
+
"yellow": llm_other.yellow,
|
| 163 |
+
"red": llm_other.red,
|
| 164 |
+
"explanations": [],
|
| 165 |
+
"explanation_source": "character"
|
| 166 |
+
})
|
| 167 |
+
|
| 168 |
+
# Sort by character score (highest first)
|
| 169 |
+
candidates.sort(key=lambda x: x["score_color"], reverse=True)
|
| 170 |
+
print(f"🔍 DEBUG: Pure character matching found {len(candidates)} candidates")
|
| 171 |
+
|
| 172 |
+
# 🚨 ADD: Detailed debug logging for score distribution
|
| 173 |
+
print("🔍 DEBUG: Candidate scores distribution:")
|
| 174 |
+
score_ranges = {"90+": 0, "80-89": 0, "70-79": 0, "60-69": 0, "below_60": 0}
|
| 175 |
+
for candidate in candidates:
|
| 176 |
+
score = candidate["final_score"]
|
| 177 |
+
if score >= 90:
|
| 178 |
+
score_ranges["90+"] += 1
|
| 179 |
+
elif score >= 80:
|
| 180 |
+
score_ranges["80-89"] += 1
|
| 181 |
+
elif score >= 70:
|
| 182 |
+
score_ranges["70-79"] += 1
|
| 183 |
+
elif score >= 60:
|
| 184 |
+
score_ranges["60-69"] += 1
|
| 185 |
+
else:
|
| 186 |
+
score_ranges["below_60"] += 1
|
| 187 |
+
|
| 188 |
+
for range_name, count in score_ranges.items():
|
| 189 |
+
print(f" {range_name}: {count} users")
|
| 190 |
+
|
| 191 |
+
# Show first 10 candidates with detailed scores
|
| 192 |
+
print("🔍 DEBUG: Top 10 candidate scores:")
|
| 193 |
+
for i, candidate in enumerate(candidates[:10]):
|
| 194 |
+
print(f" {i+1}. {candidate['name']}: raw={candidate['score_color']:.3f}, percentage={candidate['final_score']}%")
|
| 195 |
+
|
| 196 |
+
# Group by score ranges
|
| 197 |
+
matches_by_range = {
|
| 198 |
+
"90-100": [],
|
| 199 |
+
"80-89": [],
|
| 200 |
+
"70-79": [],
|
| 201 |
+
"60-69": [],
|
| 202 |
+
"below_60": []
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
for candidate in candidates:
|
| 206 |
+
score_percentage = candidate["final_score"]
|
| 207 |
+
|
| 208 |
+
# Determine which range this match belongs to
|
| 209 |
+
if score_percentage >= 90:
|
| 210 |
+
range_key = "90-100"
|
| 211 |
+
elif score_percentage >= 80:
|
| 212 |
+
range_key = "80-89"
|
| 213 |
+
elif score_percentage >= 70:
|
| 214 |
+
range_key = "70-79"
|
| 215 |
+
elif score_percentage >= 60:
|
| 216 |
+
range_key = "60-69"
|
| 217 |
+
else:
|
| 218 |
+
range_key = "below_60"
|
| 219 |
+
|
| 220 |
+
matches_by_range[range_key].append(candidate)
|
| 221 |
+
|
| 222 |
+
# 🚨 ADD: Debug logging to verify range assignment
|
| 223 |
+
print("🔍 DEBUG: Range distribution after grouping:")
|
| 224 |
+
for range_key, matches in matches_by_range.items():
|
| 225 |
+
if matches:
|
| 226 |
+
scores = [m["final_score"] for m in matches]
|
| 227 |
+
print(f" {range_key}: {len(matches)} users, scores: {min(scores):.1f}% - {max(scores):.1f}%")
|
| 228 |
+
else:
|
| 229 |
+
print(f" {range_key}: 0 users")
|
| 230 |
+
|
| 231 |
+
# Get user data for input_user
|
| 232 |
+
user = Users.query.filter_by(user_id=user_id).first()
|
| 233 |
+
|
| 234 |
+
input_user = {
|
| 235 |
+
"user_id": user_id,
|
| 236 |
+
"role": "marriage",
|
| 237 |
+
"name": user.name if user else "Unknown",
|
| 238 |
+
"blue": base_llm.blue,
|
| 239 |
+
"green": base_llm.green,
|
| 240 |
+
"yellow": base_llm.yellow,
|
| 241 |
+
"red": base_llm.red,
|
| 242 |
+
"created_at": base_llm.created_at.isoformat() if base_llm.created_at else None,
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
print(f"✅ DEBUG: Returning {len(candidates)} pure character matches (NO EXPECTATION FILTERING)")
|
| 246 |
+
|
| 247 |
+
return jsonify({
|
| 248 |
+
"input_user": input_user,
|
| 249 |
+
"matches": matches_by_range,
|
| 250 |
+
"count": len(candidates),
|
| 251 |
+
"mode": "character"
|
| 252 |
+
})
|
| 253 |
+
|
| 254 |
+
else: # expectation mode (default - expectation + character refinement)
|
| 255 |
+
print("🎯 Using EXPECTATION + CHARACTER matching")
|
| 256 |
+
# Use expectation-based matching with character refinement
|
| 257 |
+
expectation_matches = match_expectation_with_profiles(user_id)
|
| 258 |
+
|
| 259 |
+
if not expectation_matches:
|
| 260 |
+
return jsonify({"error": f"No matches found for user_id={user_id}"}), 404
|
| 261 |
+
|
| 262 |
+
# Convert to the expected frontend format with combined scores
|
| 263 |
+
matches_by_range = {
|
| 264 |
+
"90-100": [],
|
| 265 |
+
"80-89": [],
|
| 266 |
+
"70-79": [],
|
| 267 |
+
"60-69": [],
|
| 268 |
+
"below_60": []
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
for match in expectation_matches:
|
| 272 |
+
# Convert overall_score (0-1 scale) to percentage (0-100)
|
| 273 |
+
score_percentage = match.get("overall_score", 0) * 100
|
| 274 |
+
|
| 275 |
+
# Determine which range this match belongs to
|
| 276 |
+
if score_percentage >= 90:
|
| 277 |
+
range_key = "90-100"
|
| 278 |
+
elif score_percentage >= 80:
|
| 279 |
+
range_key = "80-89"
|
| 280 |
+
elif score_percentage >= 70:
|
| 281 |
+
range_key = "70-79"
|
| 282 |
+
elif score_percentage >= 60:
|
| 283 |
+
range_key = "60-69"
|
| 284 |
+
else:
|
| 285 |
+
range_key = "below_60"
|
| 286 |
+
|
| 287 |
+
# Create match object with combined scores
|
| 288 |
+
match_obj = {
|
| 289 |
+
"user_id": match["user_id"],
|
| 290 |
+
"name": match["name"],
|
| 291 |
+
"gender": match.get("gender", ""),
|
| 292 |
+
"city": match.get("location", ""),
|
| 293 |
+
"final_score": round(score_percentage, 2),
|
| 294 |
+
"score_expect": match.get("expectation_score", 0),
|
| 295 |
+
"score_color": match.get("character_score", 0),
|
| 296 |
+
"blue": 0, "green": 0, "yellow": 0, "red": 0,
|
| 297 |
+
"explanations": [],
|
| 298 |
+
"explanation_source": "expectation"
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
matches_by_range[range_key].append(match_obj)
|
| 302 |
+
|
| 303 |
+
# Get user data for input_user
|
| 304 |
+
user = Users.query.filter_by(user_id=user_id).first()
|
| 305 |
+
llm_data = LLMGeneratedQuestions.query.filter_by(user_id=user_id).first()
|
| 306 |
+
|
| 307 |
+
input_user = {
|
| 308 |
+
"user_id": user_id,
|
| 309 |
+
"role": "marriage",
|
| 310 |
+
"name": user.name if user else "Unknown",
|
| 311 |
+
"blue": llm_data.blue if llm_data else 0,
|
| 312 |
+
"green": llm_data.green if llm_data else 0,
|
| 313 |
+
"yellow": llm_data.yellow if llm_data else 0,
|
| 314 |
+
"red": llm_data.red if llm_data else 0,
|
| 315 |
+
"created_at": llm_data.created_at.isoformat() if llm_data and llm_data.created_at else None,
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
print(f"✅ DEBUG: Returning {len(expectation_matches)} expectation + character matches")
|
| 319 |
+
|
| 320 |
+
return jsonify({
|
| 321 |
+
"input_user": input_user,
|
| 322 |
+
"matches": matches_by_range,
|
| 323 |
+
"count": len(expectation_matches),
|
| 324 |
+
"mode": "expectation"
|
| 325 |
+
})
|
routes/profile_routes.py
ADDED
|
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# routes/profile_routes.py
|
| 2 |
+
from flask import Blueprint, request, jsonify
|
| 3 |
+
import pyodbc
|
| 4 |
+
import json
|
| 5 |
+
from database import get_db_connection, row_to_dict
|
| 6 |
+
|
| 7 |
+
profiles_bp = Blueprint('profiles', __name__)
|
| 8 |
+
|
| 9 |
+
@profiles_bp.route('/api/questions/select-role', methods=['POST'])
|
| 10 |
+
def select_role():
|
| 11 |
+
data = request.get_json(force=True) or {}
|
| 12 |
+
user_id = data.get("user_id")
|
| 13 |
+
role_name = data.get("role_name")
|
| 14 |
+
assigned_at = data.get("assigned_at") # ISO or None
|
| 15 |
+
|
| 16 |
+
# Check if user_id and role_name are provided
|
| 17 |
+
if not user_id or not role_name:
|
| 18 |
+
return jsonify({"error": "User ID and role name are required."}), 400
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
# Check if user_id exists in the Users table
|
| 22 |
+
conn = get_db_connection()
|
| 23 |
+
cur = conn.cursor()
|
| 24 |
+
cur.execute("SELECT COUNT(*) FROM Users WHERE user_id = ?", (user_id,))
|
| 25 |
+
user_exists = cur.fetchone()[0]
|
| 26 |
+
|
| 27 |
+
if user_exists == 0:
|
| 28 |
+
return jsonify({"error": "User ID does not exist in the Users table."}), 404
|
| 29 |
+
|
| 30 |
+
# Proceed with inserting into UserRoles
|
| 31 |
+
cur.execute("""
|
| 32 |
+
INSERT INTO UserRoles (user_id, role_name, assigned_at)
|
| 33 |
+
VALUES (?, ?, ?)
|
| 34 |
+
""", (user_id, role_name, assigned_at))
|
| 35 |
+
conn.commit()
|
| 36 |
+
|
| 37 |
+
return jsonify({"message": "Role assigned successfully."}), 201
|
| 38 |
+
|
| 39 |
+
except pyodbc.Error as e:
|
| 40 |
+
# Handle database error, including foreign key constraint violations
|
| 41 |
+
if "foreign key" in str(e).lower():
|
| 42 |
+
return jsonify({"error": "Foreign key violation: User ID not found."}), 400
|
| 43 |
+
return jsonify({"error": f"Database error: {str(e)}"}), 500
|
| 44 |
+
except Exception as e:
|
| 45 |
+
return jsonify({"error": f"Unexpected error: {str(e)}"}), 500
|
| 46 |
+
finally:
|
| 47 |
+
try:
|
| 48 |
+
conn.close()
|
| 49 |
+
except:
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
@profiles_bp.route('/api/questions/marriage', methods=['GET'])
|
| 53 |
+
def get_questions():
|
| 54 |
+
try:
|
| 55 |
+
conn = get_db_connection()
|
| 56 |
+
cur = conn.cursor()
|
| 57 |
+
cur.execute("""
|
| 58 |
+
SELECT question, options, input_type, column_key, category
|
| 59 |
+
FROM RoleQuestions
|
| 60 |
+
WHERE role_name = 'marriage'
|
| 61 |
+
ORDER BY id
|
| 62 |
+
""")
|
| 63 |
+
rows = cur.fetchall()
|
| 64 |
+
out = []
|
| 65 |
+
for r in rows:
|
| 66 |
+
label = r[0]
|
| 67 |
+
options = (r[1].split(",") if r[1] else [])
|
| 68 |
+
input_type = r[2]
|
| 69 |
+
column_key = r[3]
|
| 70 |
+
category = r[4]
|
| 71 |
+
out.append({
|
| 72 |
+
"label": label,
|
| 73 |
+
"options": options,
|
| 74 |
+
"input_type": input_type,
|
| 75 |
+
"column_key": column_key,
|
| 76 |
+
"category": category
|
| 77 |
+
})
|
| 78 |
+
return jsonify(out), 200
|
| 79 |
+
except pyodbc.Error as e:
|
| 80 |
+
return jsonify({"error": str(e)}), 500
|
| 81 |
+
finally:
|
| 82 |
+
try: conn.close()
|
| 83 |
+
except: pass
|
| 84 |
+
|
| 85 |
+
@profiles_bp.route('/api/questions/submit-answers/marriage', methods=['POST'])
|
| 86 |
+
def submit_answers():
|
| 87 |
+
data = request.get_json(force=True) or {}
|
| 88 |
+
user_id = data.get("user_id")
|
| 89 |
+
if not user_id:
|
| 90 |
+
return jsonify({"error": "User ID is required."}), 400
|
| 91 |
+
|
| 92 |
+
role_fields = {
|
| 93 |
+
"marriage": [
|
| 94 |
+
"full_name", "date_of_birth", "gender", "current_city", "marital_status",
|
| 95 |
+
"education_level", "employment_status", "number_of_siblings", "family_type",
|
| 96 |
+
"hobbies_interests", "conflict_approach", "financial_style", "income_range",
|
| 97 |
+
"relocation_willingness", "height", "skin_tone", "languages_spoken", "country",
|
| 98 |
+
"blood_group", "religion", "dual_citizenship", "siblings_position",
|
| 99 |
+
"parents_living_status", "live_with_parents", "support_parents_financially",
|
| 100 |
+
"family_communication_frequency", "food_preference", "smoking_habit",
|
| 101 |
+
"alcohol_habit", "daily_routine", "fitness_level", "own_pets",
|
| 102 |
+
"travel_preference", "relaxation_mode", "job_role", "work_experience_years",
|
| 103 |
+
"career_aspirations", "field_of_study", "remark", "children_timeline",
|
| 104 |
+
"open_to_adoption", "deal_breakers", "other_non_negotiables",
|
| 105 |
+
"health_constraints", "live_with_inlaws"
|
| 106 |
+
# Note: "created_at" is excluded (auto-generated)
|
| 107 |
+
]
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
# Validate all required fields are present
|
| 111 |
+
for f in role_fields["marriage"]:
|
| 112 |
+
if f not in data:
|
| 113 |
+
return jsonify({"error": f"{f} is required."}), 400
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
conn = get_db_connection()
|
| 117 |
+
cur = conn.cursor()
|
| 118 |
+
|
| 119 |
+
table_name = "Marriage"
|
| 120 |
+
|
| 121 |
+
# Build INSERT query without created_at
|
| 122 |
+
columns = ["user_id"] + role_fields["marriage"]
|
| 123 |
+
placeholders = ", ".join(["?"] * len(columns))
|
| 124 |
+
col_str = ", ".join([f"[{c}]" for c in columns])
|
| 125 |
+
|
| 126 |
+
query = f"INSERT INTO {table_name} ({col_str}) VALUES ({placeholders})"
|
| 127 |
+
|
| 128 |
+
values = [user_id]
|
| 129 |
+
for f in role_fields["marriage"]:
|
| 130 |
+
val = data.get(f)
|
| 131 |
+
# Handle radio button values (convert 1/0 to yes/no)
|
| 132 |
+
if f in ["dual_citizenship", "live_with_parents", "support_parents_financially", "own_pets"]:
|
| 133 |
+
if val == 1 or val == "1" or val is True:
|
| 134 |
+
val = "Yes"
|
| 135 |
+
elif val == 0 or val == "0" or val is False:
|
| 136 |
+
val = "No"
|
| 137 |
+
# If it's already "yes" or "no", leave it as is
|
| 138 |
+
elif val not in ["Yes", "No"]:
|
| 139 |
+
val = "No" # default to "no" if invalid value
|
| 140 |
+
# Handle list values (multiselect)
|
| 141 |
+
if isinstance(val, list):
|
| 142 |
+
val = ", ".join([str(v) for v in val])
|
| 143 |
+
|
| 144 |
+
# Convert to string or None
|
| 145 |
+
if val is None:
|
| 146 |
+
val = None
|
| 147 |
+
else:
|
| 148 |
+
val = str(val)
|
| 149 |
+
|
| 150 |
+
values.append(val)
|
| 151 |
+
|
| 152 |
+
print(f"DEBUG: Executing query: {query}")
|
| 153 |
+
print(f"DEBUG: Values: {values}")
|
| 154 |
+
|
| 155 |
+
cur.execute(query, values)
|
| 156 |
+
conn.commit()
|
| 157 |
+
|
| 158 |
+
return jsonify({"message": "Marriage record added successfully."}), 201
|
| 159 |
+
|
| 160 |
+
except pyodbc.Error as e:
|
| 161 |
+
print(f"Database Error: {e}")
|
| 162 |
+
return jsonify({"error": f"Database error: {str(e)}"}), 500
|
| 163 |
+
except Exception as e:
|
| 164 |
+
print(f"Unexpected Error: {e}")
|
| 165 |
+
return jsonify({"error": f"Unexpected error: {str(e)}"}), 500
|
| 166 |
+
finally:
|
| 167 |
+
try:
|
| 168 |
+
conn.close()
|
| 169 |
+
except:
|
| 170 |
+
pass
|
| 171 |
+
|
| 172 |
+
@profiles_bp.route('/api/questions/existing-profile/<role>/<int:user_id>', methods=['GET'])
|
| 173 |
+
def get_existing_profile(role: str, user_id: int):
|
| 174 |
+
"""Get existing profile data for a user"""
|
| 175 |
+
try:
|
| 176 |
+
conn = get_db_connection()
|
| 177 |
+
cur = conn.cursor()
|
| 178 |
+
|
| 179 |
+
# Determine table based on role
|
| 180 |
+
table_map = {
|
| 181 |
+
"marriage": "Marriage",
|
| 182 |
+
"interview": "Interview",
|
| 183 |
+
"partnership": "Partnership"
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
table_name = table_map.get(role.lower())
|
| 187 |
+
if not table_name:
|
| 188 |
+
return jsonify({"error": "Invalid role"}), 400
|
| 189 |
+
|
| 190 |
+
cur.execute(f"""
|
| 191 |
+
SELECT TOP 1 * FROM {table_name}
|
| 192 |
+
WHERE user_id = ?
|
| 193 |
+
ORDER BY created_at DESC
|
| 194 |
+
""", (user_id,))
|
| 195 |
+
|
| 196 |
+
row = cur.fetchone()
|
| 197 |
+
if row is None:
|
| 198 |
+
return jsonify({"error": "No profile found"}), 404
|
| 199 |
+
|
| 200 |
+
# Convert row to dict
|
| 201 |
+
profile = row_to_dict(cur, row)
|
| 202 |
+
|
| 203 |
+
# 🚨 CRITICAL: Clean up data for radio buttons
|
| 204 |
+
# Ensure radio button values are clean strings that match option values
|
| 205 |
+
for key, value in profile.items():
|
| 206 |
+
if value is not None:
|
| 207 |
+
# Convert to string and trim for consistency
|
| 208 |
+
if isinstance(value, bool):
|
| 209 |
+
profile[key] = "Yes" if value else "No"
|
| 210 |
+
elif isinstance(value, (int, float)):
|
| 211 |
+
profile[key] = str(value)
|
| 212 |
+
elif isinstance(value, str):
|
| 213 |
+
profile[key] = value.strip()
|
| 214 |
+
|
| 215 |
+
print(f"🟢 DEBUG: Returning cleaned profile data for user {user_id}")
|
| 216 |
+
return jsonify(profile), 200
|
| 217 |
+
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"Error fetching existing profile: {e}")
|
| 220 |
+
return jsonify({"error": str(e)}), 500
|
| 221 |
+
finally:
|
| 222 |
+
try:
|
| 223 |
+
conn.close()
|
| 224 |
+
except:
|
| 225 |
+
pass
|
| 226 |
+
|
| 227 |
+
@profiles_bp.route('/api/questions/update-answers/<role>', methods=['PUT'])
|
| 228 |
+
def update_answers(role: str):
|
| 229 |
+
"""Update existing profile answers"""
|
| 230 |
+
data = request.get_json(force=True) or {}
|
| 231 |
+
user_id = data.get("user_id")
|
| 232 |
+
|
| 233 |
+
if not user_id:
|
| 234 |
+
return jsonify({"error": "User ID is required."}), 400
|
| 235 |
+
|
| 236 |
+
role_fields = {
|
| 237 |
+
"marriage": [
|
| 238 |
+
"full_name", "date_of_birth", "gender", "current_city", "marital_status",
|
| 239 |
+
"education_level", "employment_status", "number_of_siblings", "family_type",
|
| 240 |
+
"hobbies_interests", "conflict_approach", "financial_style", "income_range",
|
| 241 |
+
"relocation_willingness", "height", "skin_tone", "languages_spoken", "country",
|
| 242 |
+
"blood_group", "religion", "dual_citizenship", "siblings_position",
|
| 243 |
+
"parents_living_status", "live_with_parents", "support_parents_financially",
|
| 244 |
+
"family_communication_frequency", "food_preference", "smoking_habit",
|
| 245 |
+
"alcohol_habit", "daily_routine", "fitness_level", "own_pets",
|
| 246 |
+
"travel_preference", "relaxation_mode", "job_role", "work_experience_years",
|
| 247 |
+
"career_aspirations", "field_of_study", "remark", "children_timeline",
|
| 248 |
+
"open_to_adoption", "deal_breakers", "other_non_negotiables",
|
| 249 |
+
"health_constraints", "live_with_inlaws"
|
| 250 |
+
# Note: "created_at" is excluded (auto-generated)
|
| 251 |
+
]
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
if role not in role_fields:
|
| 255 |
+
return jsonify({"error": f"Invalid role: {role}"}), 400
|
| 256 |
+
|
| 257 |
+
try:
|
| 258 |
+
conn = get_db_connection()
|
| 259 |
+
cur = conn.cursor()
|
| 260 |
+
|
| 261 |
+
table_name = "Marriage" if role == "marriage" else role.capitalize()
|
| 262 |
+
|
| 263 |
+
# Build UPDATE query - only include fields that are present in data
|
| 264 |
+
set_parts = []
|
| 265 |
+
values = []
|
| 266 |
+
|
| 267 |
+
for field in role_fields[role]:
|
| 268 |
+
if field in data:
|
| 269 |
+
set_parts.append(f"{field} = ?")
|
| 270 |
+
val = data.get(field)
|
| 271 |
+
# Handle radio button values (convert 1/0 to yes/no)
|
| 272 |
+
if field in ["dual_citizenship", "live_with_parents", "support_parents_financially", "own_pets"]:
|
| 273 |
+
if val == 1 or val == "1" or val is True:
|
| 274 |
+
val = "Yes"
|
| 275 |
+
elif val == 0 or val == "0" or val is False:
|
| 276 |
+
val = "No"
|
| 277 |
+
# If it's already "Yes" or "no", leave it as is
|
| 278 |
+
elif val not in ["Yes", "No"]:
|
| 279 |
+
val = "No" # default to "no" if invalid value
|
| 280 |
+
# Handle list values (multiselect)
|
| 281 |
+
if isinstance(val, list):
|
| 282 |
+
val = ", ".join([str(v) for v in val])
|
| 283 |
+
|
| 284 |
+
# Convert to string or None
|
| 285 |
+
if val is None:
|
| 286 |
+
val = None
|
| 287 |
+
else:
|
| 288 |
+
val = str(val)
|
| 289 |
+
|
| 290 |
+
values.append(val)
|
| 291 |
+
|
| 292 |
+
if not set_parts:
|
| 293 |
+
return jsonify({"error": "No valid fields to update"}), 400
|
| 294 |
+
|
| 295 |
+
# Add user_id for WHERE clause
|
| 296 |
+
values.append(user_id)
|
| 297 |
+
|
| 298 |
+
set_clause = ", ".join(set_parts)
|
| 299 |
+
query = f"UPDATE {table_name} SET {set_clause} WHERE user_id = ?"
|
| 300 |
+
|
| 301 |
+
print(f"DEBUG: Executing update query: {query}")
|
| 302 |
+
print(f"DEBUG: Values: {values}")
|
| 303 |
+
|
| 304 |
+
cur.execute(query, values)
|
| 305 |
+
conn.commit()
|
| 306 |
+
|
| 307 |
+
# Check if any row was updated
|
| 308 |
+
if cur.rowcount == 0:
|
| 309 |
+
return jsonify({"error": "No profile found to update"}), 404
|
| 310 |
+
|
| 311 |
+
return jsonify({"message": "Profile updated successfully."}), 200
|
| 312 |
+
|
| 313 |
+
except pyodbc.Error as e:
|
| 314 |
+
print(f"Database Error: {e}")
|
| 315 |
+
return jsonify({"error": f"Database error: {str(e)}"}), 500
|
| 316 |
+
except Exception as e:
|
| 317 |
+
print(f"Unexpected Error: {e}")
|
| 318 |
+
return jsonify({"error": f"Unexpected error: {str(e)}"}), 500
|
| 319 |
+
finally:
|
| 320 |
+
try:
|
| 321 |
+
conn.close()
|
| 322 |
+
except:
|
| 323 |
+
pass
|
| 324 |
+
|
| 325 |
+
@profiles_bp.route('/api/marriage-profile/<int:user_id>', methods=['GET'])
|
| 326 |
+
def get_marriage_profile(user_id: int):
|
| 327 |
+
"""Get marriage profile by user_id"""
|
| 328 |
+
try:
|
| 329 |
+
conn = get_db_connection()
|
| 330 |
+
cur = conn.cursor()
|
| 331 |
+
|
| 332 |
+
cur.execute("""
|
| 333 |
+
SELECT * FROM Marriage
|
| 334 |
+
WHERE user_id = ?
|
| 335 |
+
ORDER BY created_at DESC
|
| 336 |
+
""", (user_id,))
|
| 337 |
+
|
| 338 |
+
row = cur.fetchone()
|
| 339 |
+
if row is None:
|
| 340 |
+
return jsonify({"error": "Marriage profile not found"}), 404
|
| 341 |
+
|
| 342 |
+
# Convert row to dict
|
| 343 |
+
profile = row_to_dict(cur, row)
|
| 344 |
+
|
| 345 |
+
return jsonify(profile), 200
|
| 346 |
+
|
| 347 |
+
except Exception as e:
|
| 348 |
+
print(f"Error fetching marriage profile: {e}")
|
| 349 |
+
return jsonify({"error": str(e)}), 500
|
| 350 |
+
finally:
|
| 351 |
+
try:
|
| 352 |
+
conn.close()
|
| 353 |
+
except:
|
| 354 |
+
pass
|
| 355 |
+
|
| 356 |
+
@profiles_bp.route('/api/check-marriage-profile/<int:user_id>', methods=['GET'])
|
| 357 |
+
def check_marriage_profile(user_id: int):
|
| 358 |
+
"""Check if marriage profile exists for user"""
|
| 359 |
+
try:
|
| 360 |
+
conn = get_db_connection()
|
| 361 |
+
cur = conn.cursor()
|
| 362 |
+
|
| 363 |
+
cur.execute("""
|
| 364 |
+
SELECT COUNT(*) as count
|
| 365 |
+
FROM Marriage
|
| 366 |
+
WHERE user_id = ?
|
| 367 |
+
""", (user_id,))
|
| 368 |
+
|
| 369 |
+
row = cur.fetchone()
|
| 370 |
+
exists = row[0] > 0 if row else False
|
| 371 |
+
|
| 372 |
+
return jsonify({"exists": exists}), 200
|
| 373 |
+
|
| 374 |
+
except Exception as e:
|
| 375 |
+
print(f"Error checking marriage profile: {e}")
|
| 376 |
+
return jsonify({"error": str(e)}), 500
|
| 377 |
+
finally:
|
| 378 |
+
try:
|
| 379 |
+
conn.close()
|
| 380 |
+
except:
|
| 381 |
+
pass
|
| 382 |
+
|
| 383 |
+
@profiles_bp.route('/api/check-assessment/<int:user_id>', methods=['GET'])
|
| 384 |
+
def check_assessment(user_id: int):
|
| 385 |
+
"""Check if assessment is completed for user"""
|
| 386 |
+
try:
|
| 387 |
+
conn = get_db_connection()
|
| 388 |
+
cur = conn.cursor()
|
| 389 |
+
|
| 390 |
+
cur.execute("""
|
| 391 |
+
SELECT COUNT(*) as count
|
| 392 |
+
FROM LLMGeneratedQuestions
|
| 393 |
+
WHERE user_id = ?
|
| 394 |
+
""", (user_id,))
|
| 395 |
+
|
| 396 |
+
row = cur.fetchone()
|
| 397 |
+
exists = row[0] > 0 if row else False
|
| 398 |
+
|
| 399 |
+
return jsonify({"exists": exists}), 200
|
| 400 |
+
|
| 401 |
+
except Exception as e:
|
| 402 |
+
print(f"Error checking assessment: {e}")
|
| 403 |
+
return jsonify({"error": str(e)}), 500
|
| 404 |
+
finally:
|
| 405 |
+
try:
|
| 406 |
+
conn.close()
|
| 407 |
+
except:
|
| 408 |
+
pass
|
| 409 |
+
|
| 410 |
+
@profiles_bp.route('/api/check-assessment-completion/<int:user_id>', methods=['GET'])
|
| 411 |
+
def check_assessment_completion(user_id: int):
|
| 412 |
+
"""Check if user has already completed the assessment"""
|
| 413 |
+
try:
|
| 414 |
+
conn = get_db_connection()
|
| 415 |
+
cur = conn.cursor()
|
| 416 |
+
|
| 417 |
+
# 🚨 CRITICAL FIX: Check if user exists in LLMGeneratedQuestions table with valid data
|
| 418 |
+
cur.execute("""
|
| 419 |
+
SELECT COUNT(*) as count
|
| 420 |
+
FROM LLMGeneratedQuestions
|
| 421 |
+
WHERE user_id = ? AND (blue > 0 OR green > 0 OR yellow > 0 OR red > 0)
|
| 422 |
+
""", (user_id,))
|
| 423 |
+
|
| 424 |
+
row = cur.fetchone()
|
| 425 |
+
has_taken_assessment = row[0] > 0 if row else False
|
| 426 |
+
|
| 427 |
+
print(f"🔍 Assessment check for user {user_id}: {has_taken_assessment} (count: {row[0] if row else 0})")
|
| 428 |
+
|
| 429 |
+
return jsonify({
|
| 430 |
+
"has_taken_assessment": has_taken_assessment,
|
| 431 |
+
"message": "User has already taken assessment" if has_taken_assessment else "User can take assessment"
|
| 432 |
+
}), 200
|
| 433 |
+
|
| 434 |
+
except Exception as e:
|
| 435 |
+
print(f"Error checking assessment completion: {e}")
|
| 436 |
+
return jsonify({"error": str(e)}), 500
|
| 437 |
+
finally:
|
| 438 |
+
try:
|
| 439 |
+
conn.close()
|
| 440 |
+
except:
|
| 441 |
+
pass
|
server.py
DELETED
|
@@ -1,2039 +0,0 @@
|
|
| 1 |
-
# server.py
|
| 2 |
-
# -----------------------------------------------------------------------------
|
| 3 |
-
# Unified Py-Match Service (Flask)
|
| 4 |
-
# - Local Windows: Trusted_Connection to SQL Server (e.g., localhost\SQLEXPRESS)
|
| 5 |
-
# - Cloud (AWS RDS / Hugging Face): SQL auth via UID/PWD + Encrypt
|
| 6 |
-
# - ODBC driver name defaults to {ODBC Driver 17 for SQL Server}
|
| 7 |
-
# -----------------------------------------------------------------------------
|
| 8 |
-
|
| 9 |
-
# ADD this line at the top with your imports
|
| 10 |
-
|
| 11 |
-
import os, uuid, json, random, threading, hashlib, pickle
|
| 12 |
-
from typing import Dict, List, Optional, Literal, Tuple
|
| 13 |
-
from datetime import datetime
|
| 14 |
-
from sqlalchemy import func
|
| 15 |
-
# --- load .env so OPENAI_API_KEY (and others) are available ---
|
| 16 |
-
from dotenv import load_dotenv
|
| 17 |
-
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 18 |
-
|
| 19 |
-
# Load environment variables - priority: Hugging Face secrets > .env file
|
| 20 |
-
IS_HUGGING_FACE = os.environ.get('HUGGINGFACE_SPACES') == 'true' or os.environ.get('SPACE_ID') is not None
|
| 21 |
-
if not IS_HUGGING_FACE:
|
| 22 |
-
# Only load from .env file when running locally
|
| 23 |
-
load_dotenv(os.path.join(BASE_DIR, ".env"))
|
| 24 |
-
load_dotenv(os.path.join(BASE_DIR, ".env"))
|
| 25 |
-
else:
|
| 26 |
-
# On Hugging Face, secrets are automatically available as environment variables
|
| 27 |
-
print("Running on Hugging Face Spaces - using secrets from environment variables")
|
| 28 |
-
|
| 29 |
-
from flask import Flask, request, jsonify
|
| 30 |
-
from flask_cors import CORS
|
| 31 |
-
import pyodbc
|
| 32 |
-
from flask_sqlalchemy import SQLAlchemy
|
| 33 |
-
import urllib.parse
|
| 34 |
-
import numpy as np
|
| 35 |
-
# ----------------------------------------------------------------------
|
| 36 |
-
# Expectation-Based Matching (Strict + Soft Comparison)
|
| 37 |
-
# ----------------------------------------------------------------------
|
| 38 |
-
def match_expectation_with_profiles(user_id):
|
| 39 |
-
expectation = ExpectationResponse.query.filter_by(user_id=user_id).first()
|
| 40 |
-
if not expectation:
|
| 41 |
-
return []
|
| 42 |
-
|
| 43 |
-
# Get current user to know gender
|
| 44 |
-
current_user = Marriage.query.filter_by(user_id=user_id).first()
|
| 45 |
-
if not current_user:
|
| 46 |
-
return []
|
| 47 |
-
|
| 48 |
-
# Opposite gender profiles only
|
| 49 |
-
opposite_profiles = Marriage.query.filter(Marriage.gender != current_user.gender).all()
|
| 50 |
-
|
| 51 |
-
def compute_expectation_score(expect, profile):
|
| 52 |
-
"""Compute a weighted expectation match between expectation and profile"""
|
| 53 |
-
score, total = 0, 0
|
| 54 |
-
|
| 55 |
-
# ----- Strict rules -----
|
| 56 |
-
# Religion: strict match unless 'different faith acceptable'
|
| 57 |
-
if expect.religion_alignment and "different" not in expect.religion_alignment.lower():
|
| 58 |
-
if expect.religion_alignment.lower() not in str(profile.religion or "").lower():
|
| 59 |
-
return 0 # strict fail
|
| 60 |
-
|
| 61 |
-
# Smoking strict rule
|
| 62 |
-
if expect.accept_smoking and expect.accept_smoking.lower() == "never":
|
| 63 |
-
if str(profile.smoking_habit or "").lower() != "never":
|
| 64 |
-
return 0
|
| 65 |
-
|
| 66 |
-
# Alcohol strict rule
|
| 67 |
-
if expect.accept_alcohol and expect.accept_alcohol.lower() == "never":
|
| 68 |
-
if str(profile.alcohol_habit or "").lower() != "never":
|
| 69 |
-
return 0
|
| 70 |
-
|
| 71 |
-
# ----- Flexible rules -----
|
| 72 |
-
# Location
|
| 73 |
-
if expect.pref_location and str(expect.pref_location).lower() in str(profile.current_city or "").lower():
|
| 74 |
-
score += 1
|
| 75 |
-
total += 1
|
| 76 |
-
|
| 77 |
-
# Country
|
| 78 |
-
if expect.pref_countries and str(expect.pref_countries).lower() in str(profile.country or "").lower():
|
| 79 |
-
score += 1
|
| 80 |
-
total += 1
|
| 81 |
-
|
| 82 |
-
# Language
|
| 83 |
-
if expect.pref_languages and any(lang.lower() in str(profile.languages_spoken or "").lower() for lang in eval(expect.pref_languages)):
|
| 84 |
-
score += 1
|
| 85 |
-
total += 1
|
| 86 |
-
|
| 87 |
-
# Diet
|
| 88 |
-
if expect.pref_diet and str(expect.pref_diet).lower() in str(profile.food_preference or "").lower():
|
| 89 |
-
score += 1
|
| 90 |
-
total += 1
|
| 91 |
-
|
| 92 |
-
# Family type
|
| 93 |
-
if expect.pref_family_type and str(expect.pref_family_type).lower() in str(profile.family_type or "").lower():
|
| 94 |
-
score += 1
|
| 95 |
-
total += 1
|
| 96 |
-
|
| 97 |
-
# Fitness
|
| 98 |
-
if expect.pref_fitness and str(expect.pref_fitness).lower() in str(profile.fitness_level or "").lower():
|
| 99 |
-
score += 1
|
| 100 |
-
total += 1
|
| 101 |
-
|
| 102 |
-
# Expectation summary ↔ remark text match
|
| 103 |
-
if expect.expectation_summary and profile.remark:
|
| 104 |
-
from difflib import SequenceMatcher
|
| 105 |
-
sim = SequenceMatcher(None, expect.expectation_summary.lower(), str(profile.remark).lower()).ratio()
|
| 106 |
-
if sim > 0.3:
|
| 107 |
-
score += sim * 2 # higher weight for similar mindset
|
| 108 |
-
total += 2
|
| 109 |
-
|
| 110 |
-
return round(score / total, 2) if total > 0 else 0
|
| 111 |
-
|
| 112 |
-
# Evaluate all opposite gender profiles
|
| 113 |
-
candidates = []
|
| 114 |
-
for profile in opposite_profiles:
|
| 115 |
-
s = compute_expectation_score(expectation, profile)
|
| 116 |
-
if s > 0:
|
| 117 |
-
candidates.append({
|
| 118 |
-
"user_id": profile.user_id,
|
| 119 |
-
"name": profile.full_name,
|
| 120 |
-
"gender": profile.gender,
|
| 121 |
-
"location": profile.current_city,
|
| 122 |
-
"religion": profile.religion,
|
| 123 |
-
"remark": profile.remark,
|
| 124 |
-
"expectation_score": s
|
| 125 |
-
})
|
| 126 |
-
|
| 127 |
-
# Sort by expectation score descending
|
| 128 |
-
candidates.sort(key=lambda x: x["expectation_score"], reverse=True)
|
| 129 |
-
|
| 130 |
-
# Once sorted, get character compatibility for top matches
|
| 131 |
-
top_ids = [c["user_id"] for c in candidates[:10]]
|
| 132 |
-
llm_data = LLMGeneratedQuestions.query.filter(LLMGeneratedQuestions.user_id.in_(top_ids)).all()
|
| 133 |
-
llm_map = {l.user_id: (l.blue, l.green, l.yellow, l.red) for l in llm_data}
|
| 134 |
-
|
| 135 |
-
# Combine both results
|
| 136 |
-
for c in candidates:
|
| 137 |
-
if c["user_id"] in llm_map:
|
| 138 |
-
b, g, y, r = llm_map[c["user_id"]]
|
| 139 |
-
c["character_score"] = round((b + g + y + r) / 400, 2)
|
| 140 |
-
c["overall_score"] = round(0.7 * c["expectation_score"] + 0.3 * c["character_score"], 2)
|
| 141 |
-
else:
|
| 142 |
-
c["character_score"] = 0
|
| 143 |
-
c["overall_score"] = c["expectation_score"]
|
| 144 |
-
|
| 145 |
-
candidates.sort(key=lambda x: x["overall_score"], reverse=True)
|
| 146 |
-
return candidates
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
# Try importing faiss (may be "faiss" or "faiss-cpu" depending on installation)
|
| 151 |
-
try:
|
| 152 |
-
import faiss
|
| 153 |
-
HAS_FAISS = True
|
| 154 |
-
except Exception as e:
|
| 155 |
-
print("faiss import failed:", e)
|
| 156 |
-
HAS_FAISS = False
|
| 157 |
-
|
| 158 |
-
# Try importing sentence-transformers
|
| 159 |
-
try:
|
| 160 |
-
from sentence_transformers import SentenceTransformer # type: ignore
|
| 161 |
-
HAS_EMBEDDER = True
|
| 162 |
-
except Exception:
|
| 163 |
-
SentenceTransformer = None
|
| 164 |
-
HAS_EMBEDDER = False
|
| 165 |
-
|
| 166 |
-
# ---------- Optional LLM deps (fallback if missing) ----------
|
| 167 |
-
try:
|
| 168 |
-
from pydantic import BaseModel, Field
|
| 169 |
-
from langchain_core.prompts import ChatPromptTemplate
|
| 170 |
-
from langchain_core.output_parsers import PydanticOutputParser, StrOutputParser
|
| 171 |
-
from langchain_openai import ChatOpenAI
|
| 172 |
-
HAS_LLM_STACK = True
|
| 173 |
-
HAS_LLM = True
|
| 174 |
-
except Exception:
|
| 175 |
-
HAS_LLM_STACK = False
|
| 176 |
-
HAS_LLM = False
|
| 177 |
-
|
| 178 |
-
# ==============================
|
| 179 |
-
# Configuration / DB Connection
|
| 180 |
-
# ==============================
|
| 181 |
-
|
| 182 |
-
# ==============================
|
| 183 |
-
# Configuration / DB Connection
|
| 184 |
-
# ==============================
|
| 185 |
-
|
| 186 |
-
#IS_HUGGING_FACE = os.environ.get('HUGGINGFACE_SPACES') == 'true' or os.environ.get('SPACE_ID') is not None
|
| 187 |
-
|
| 188 |
-
if IS_HUGGING_FACE:
|
| 189 |
-
# Hugging Face Spaces configuration
|
| 190 |
-
DEFAULT_SQL_SERVER = "pykara-sqlserver.c5aosm6ie5j3.eu-north-1.rds.amazonaws.com,1433"
|
| 191 |
-
DEFAULT_SQL_DB = "PyMatch"
|
| 192 |
-
DEFAULT_SQL_TRUSTED = "yes" # Use SQL authentication on Hugging Face
|
| 193 |
-
else:
|
| 194 |
-
# Local development configuration
|
| 195 |
-
DEFAULT_SQL_SERVER = "localhost\\SQLEXPRESS"
|
| 196 |
-
DEFAULT_SQL_DB = "Py_Match"
|
| 197 |
-
DEFAULT_SQL_TRUSTED = "yes" # Use Windows authentication locally
|
| 198 |
-
|
| 199 |
-
SQL_DRIVER = os.getenv("PYMATCH_SQL_DRIVER", "ODBC Driver 17 for SQL Server")
|
| 200 |
-
SQL_SERVER = os.getenv("PYMATCH_SQL_SERVER", DEFAULT_SQL_SERVER)
|
| 201 |
-
SQL_DB = os.getenv("PYMATCH_SQL_DB", DEFAULT_SQL_DB)
|
| 202 |
-
SQL_TRUSTED = os.getenv("PYMATCH_SQL_TRUSTED", DEFAULT_SQL_TRUSTED) # yes/no
|
| 203 |
-
SQL_USER = os.getenv("PYMATCH_SQL_USER", "")
|
| 204 |
-
SQL_PASSWORD = os.getenv("PYMATCH_SQL_PASSWORD", "")
|
| 205 |
-
SQL_PORT = os.getenv("PYMATCH_SQL_PORT", "")
|
| 206 |
-
SQL_ENCRYPT = os.getenv("PYMATCH_SQL_ENCRYPT", "no").lower().strip()
|
| 207 |
-
SQL_TRUSTCERT = os.getenv("PYMATCH_SQL_TRUST_CERT", "yes").lower().strip()
|
| 208 |
-
|
| 209 |
-
PROGRESS_TBL = os.getenv("PYMATCH_PROGRESS_TABLE", "LLMGeneratedQuestions")
|
| 210 |
-
DEFAULT_N_QUESTIONS = int(os.getenv("PYMATCH_DEFAULT_N_QUESTIONS", "20"))
|
| 211 |
-
DEFAULT_BATCH_SIZE = int(os.getenv("PYMATCH_DEFAULT_BATCH_SIZE", "10"))
|
| 212 |
-
MAX_QUESTIONS = int(os.getenv("PYMATCH_MAX_QUESTIONS", "50"))
|
| 213 |
-
|
| 214 |
-
# Some constants used across the app
|
| 215 |
-
COLOR_KEYS = ["blue", "green", "red", "yellow"]
|
| 216 |
-
DOMAINS = ["marriage", "interview", "partnership", "general"]
|
| 217 |
-
|
| 218 |
-
# Faiss index / chunks defaults - user should update FAISS_INDEX_PATH or provide companion chunks file
|
| 219 |
-
FAISS_INDEX_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "faiss_index_file.index")
|
| 220 |
-
# try companion files: same base name + (.chunks.json | _chunks.json | .chunks.pkl)
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
def get_db_connection():
|
| 224 |
-
return pyodbc.connect(
|
| 225 |
-
f"DRIVER={SQL_DRIVER};"
|
| 226 |
-
f"SERVER={SQL_SERVER};"
|
| 227 |
-
f"DATABASE={SQL_DB};"
|
| 228 |
-
f"Trusted_Connection={SQL_TRUSTED};"
|
| 229 |
-
)
|
| 230 |
-
|
| 231 |
-
# ==========
|
| 232 |
-
# Flask App
|
| 233 |
-
# ==========
|
| 234 |
-
app = Flask(__name__)
|
| 235 |
-
CORS(app, resources={r"/*": {"origins": "*"}})
|
| 236 |
-
|
| 237 |
-
# =================================================================
|
| 238 |
-
# SQLAlchemy & Database Models (from matching.py)
|
| 239 |
-
# =================================================================
|
| 240 |
-
_server = SQL_SERVER
|
| 241 |
-
if SQL_PORT:
|
| 242 |
-
_server = f"{SQL_SERVER},{SQL_PORT}"
|
| 243 |
-
|
| 244 |
-
if SQL_TRUSTED == "yes":
|
| 245 |
-
raw = (
|
| 246 |
-
f"DRIVER={{{SQL_DRIVER}}};"
|
| 247 |
-
f"SERVER={_server};"
|
| 248 |
-
f"DATABASE={SQL_DB};"
|
| 249 |
-
f"Trusted_Connection=yes;"
|
| 250 |
-
)
|
| 251 |
-
else:
|
| 252 |
-
raw = (
|
| 253 |
-
f"DRIVER={{{SQL_DRIVER}}};"
|
| 254 |
-
f"SERVER={_server};"
|
| 255 |
-
f"DATABASE={SQL_DB};"
|
| 256 |
-
f"UID={SQL_USER};PWD={SQL_PASSWORD};"
|
| 257 |
-
)
|
| 258 |
-
|
| 259 |
-
if SQL_ENCRYPT == "yes":
|
| 260 |
-
raw += "Encrypt=yes;"
|
| 261 |
-
if SQL_TRUSTCERT == "yes":
|
| 262 |
-
raw += "TrustServerCertificate=yes;"
|
| 263 |
-
|
| 264 |
-
params = urllib.parse.quote_plus(raw)
|
| 265 |
-
SQLALCHEMY_DATABASE_URI = f"mssql+pyodbc:///?odbc_connect={params}"
|
| 266 |
-
|
| 267 |
-
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
|
| 268 |
-
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
|
| 269 |
-
|
| 270 |
-
db = SQLAlchemy(app)
|
| 271 |
-
|
| 272 |
-
class Users(db.Model):
|
| 273 |
-
__tablename__ = "Users"
|
| 274 |
-
user_id = db.Column(db.Integer, primary_key=True)
|
| 275 |
-
name = db.Column(db.String(128), nullable=False)
|
| 276 |
-
email = db.Column(db.String(128))
|
| 277 |
-
# phone = db.Column(db.String(32))
|
| 278 |
-
# gender = db.Column(db.String(16))
|
| 279 |
-
password = db.Column(db.String(128))
|
| 280 |
-
created_at = db.Column(db.DateTime)
|
| 281 |
-
|
| 282 |
-
class LLMGeneratedQuestions(db.Model):
|
| 283 |
-
__tablename__ = "LLMGeneratedQuestions"
|
| 284 |
-
|
| 285 |
-
llm_id = db.Column(db.Integer, primary_key=True)
|
| 286 |
-
user_id = db.Column(db.Integer, nullable=False, index=True)
|
| 287 |
-
role = db.Column(db.String(64), nullable=True)
|
| 288 |
-
blue = db.Column(db.Integer, nullable=False, default=0)
|
| 289 |
-
green = db.Column(db.Integer, nullable=False, default=0)
|
| 290 |
-
yellow = db.Column(db.Integer, nullable=False, default=0)
|
| 291 |
-
red = db.Column(db.Integer, nullable=False, default=0)
|
| 292 |
-
created_at = db.Column(db.DateTime, default=datetime.utcnow)
|
| 293 |
-
|
| 294 |
-
def color_vec(self) -> np.ndarray:
|
| 295 |
-
v = np.array([self.blue, self.green, self.yellow, self.red], dtype=np.float32)
|
| 296 |
-
s = float(v.sum())
|
| 297 |
-
return v / s if s > 0 else v
|
| 298 |
-
|
| 299 |
-
class Marriage(db.Model):
|
| 300 |
-
__tablename__ = "Marriage"
|
| 301 |
-
id = db.Column(db.Integer, primary_key=True)
|
| 302 |
-
user_id = db.Column(db.Integer, nullable=False)
|
| 303 |
-
full_name = db.Column(db.String(100))
|
| 304 |
-
date_of_birth = db.Column(db.String(50))
|
| 305 |
-
gender = db.Column(db.String(20))
|
| 306 |
-
current_city = db.Column(db.String(100))
|
| 307 |
-
marital_status = db.Column(db.String(50))
|
| 308 |
-
education_level = db.Column(db.String(100))
|
| 309 |
-
employment_status = db.Column(db.String(100))
|
| 310 |
-
number_of_siblings = db.Column(db.String(50))
|
| 311 |
-
family_type = db.Column(db.String(100))
|
| 312 |
-
hobbies_interests = db.Column(db.Text)
|
| 313 |
-
conflict_approach = db.Column(db.String(100))
|
| 314 |
-
financial_style = db.Column(db.String(100))
|
| 315 |
-
income_range = db.Column(db.String(100))
|
| 316 |
-
relocation_willingness = db.Column(db.String(100))
|
| 317 |
-
created_at = db.Column(db.DateTime, default=datetime.utcnow)
|
| 318 |
-
# Newly added columns
|
| 319 |
-
height_weight = db.Column(db.String(100))
|
| 320 |
-
eye_colour = db.Column(db.String(50))
|
| 321 |
-
skin_tone = db.Column(db.String(50))
|
| 322 |
-
languages_spoken = db.Column(db.String(200))
|
| 323 |
-
country = db.Column(db.String(100))
|
| 324 |
-
blood_group = db.Column(db.String(10))
|
| 325 |
-
religion = db.Column(db.String(100))
|
| 326 |
-
dual_citizenship = db.Column(db.String(50))
|
| 327 |
-
siblings_position = db.Column(db.String(50))
|
| 328 |
-
parents_living_status = db.Column(db.String(100))
|
| 329 |
-
live_with_parents = db.Column(db.String(50))
|
| 330 |
-
support_parents_financially = db.Column(db.String(50))
|
| 331 |
-
family_communication_frequency = db.Column(db.String(100))
|
| 332 |
-
food_preference = db.Column(db.String(100))
|
| 333 |
-
smoking_habit = db.Column(db.String(50))
|
| 334 |
-
alcohol_habit = db.Column(db.String(50))
|
| 335 |
-
daily_routine = db.Column(db.String(200))
|
| 336 |
-
fitness_level = db.Column(db.String(100))
|
| 337 |
-
own_pets = db.Column(db.String(50))
|
| 338 |
-
travel_preference = db.Column(db.String(100))
|
| 339 |
-
relaxation_mode = db.Column(db.String(100))
|
| 340 |
-
job_role = db.Column(db.String(100))
|
| 341 |
-
work_experience_years = db.Column(db.String(50))
|
| 342 |
-
career_aspirations = db.Column(db.String(200))
|
| 343 |
-
field_of_study = db.Column(db.String(200))
|
| 344 |
-
work_preference = db.Column(db.String(100))
|
| 345 |
-
remark = db.Column(db.Text)
|
| 346 |
-
|
| 347 |
-
class ExpectationResponse(db.Model):
|
| 348 |
-
__tablename__ = "ExpectationResponse"
|
| 349 |
-
|
| 350 |
-
user_id = db.Column(db.Integer, primary_key=True)
|
| 351 |
-
religion_alignment = db.Column(db.String(50))
|
| 352 |
-
accept_smoking = db.Column(db.String(50))
|
| 353 |
-
accept_alcohol = db.Column(db.String(50))
|
| 354 |
-
pref_location = db.Column(db.String(100))
|
| 355 |
-
pref_countries = db.Column(db.String(100))
|
| 356 |
-
pref_languages = db.Column(db.String(100))
|
| 357 |
-
pref_diet = db.Column(db.String(100))
|
| 358 |
-
pref_family_type = db.Column(db.String(100))
|
| 359 |
-
pref_fitness = db.Column(db.String(100))
|
| 360 |
-
expectation_summary = db.Column(db.Text)
|
| 361 |
-
|
| 362 |
-
# =================================================================
|
| 363 |
-
# Knowledge source (FAISS) and LLM setup (from matching.py)
|
| 364 |
-
# =================================================================
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 368 |
-
|
| 369 |
-
BOOKS = [
|
| 370 |
-
{
|
| 371 |
-
"index": os.path.join(BASE_DIR, "psychology2e.index"),
|
| 372 |
-
"meta": os.path.join(BASE_DIR, "psychology2e_meta.pkl"),
|
| 373 |
-
"name": "Psychology 2e",
|
| 374 |
-
},
|
| 375 |
-
{
|
| 376 |
-
"index": os.path.join(BASE_DIR, "surrounded_by_idiots.index"),
|
| 377 |
-
"meta": os.path.join(BASE_DIR, "surrounded_by_idiots_meta.pkl"),
|
| 378 |
-
"name": "Surrounded by Idiots",
|
| 379 |
-
},
|
| 380 |
-
]
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
class KnowledgeSource:
|
| 385 |
-
def __init__(self):
|
| 386 |
-
self.indices: List = []
|
| 387 |
-
self.metas: List[List[Dict]] = []
|
| 388 |
-
self.embedder = None
|
| 389 |
-
if not HAS_FAISS: # Use the global flag instead of checking faiss directly
|
| 390 |
-
return
|
| 391 |
-
try:
|
| 392 |
-
if SentenceTransformer:
|
| 393 |
-
self.embedder = SentenceTransformer("all-MiniLM-L6-v2")
|
| 394 |
-
except Exception:
|
| 395 |
-
self.embedder = None
|
| 396 |
-
for b in BOOKS:
|
| 397 |
-
try:
|
| 398 |
-
idx_path = b["index"]
|
| 399 |
-
meta_path = b["meta"]
|
| 400 |
-
if os.path.exists(idx_path) and os.path.exists(meta_path):
|
| 401 |
-
index = faiss.read_index(idx_path)
|
| 402 |
-
with open(meta_path, "rb") as f:
|
| 403 |
-
meta = pickle.load(f)
|
| 404 |
-
self.indices.append(index)
|
| 405 |
-
self.metas.append(meta)
|
| 406 |
-
except Exception:
|
| 407 |
-
continue
|
| 408 |
-
|
| 409 |
-
def get_relevant_context(self, query: str, topk: int = 3) -> List[str]:
|
| 410 |
-
"""Get relevant context from knowledge base for relationship matching"""
|
| 411 |
-
if not self.indices or not self.embedder:
|
| 412 |
-
return []
|
| 413 |
-
try:
|
| 414 |
-
vec = self.embedder.encode([query]).astype("float32")
|
| 415 |
-
results: List[Tuple[float, str]] = []
|
| 416 |
-
for index, meta in zip(self.indices, self.metas):
|
| 417 |
-
D, I = index.search(vec, topk)
|
| 418 |
-
for d, i in zip(D[0], I[0]):
|
| 419 |
-
if 0 <= i < len(meta):
|
| 420 |
-
txt = meta[i].get("text", "")[:500] # Increased length for better context
|
| 421 |
-
results.append((float(d), txt))
|
| 422 |
-
results.sort(key=lambda x: x[0])
|
| 423 |
-
return [t for _, t in results[:topk]]
|
| 424 |
-
except Exception:
|
| 425 |
-
return []
|
| 426 |
-
|
| 427 |
-
# Initialize knowledge base only if FAISS is available
|
| 428 |
-
knowledge = None
|
| 429 |
-
if HAS_FAISS:
|
| 430 |
-
knowledge = KnowledgeSource()
|
| 431 |
-
else:
|
| 432 |
-
print("FAISS not available - KnowledgeSource disabled")
|
| 433 |
-
# Create a dummy knowledge object with empty methods
|
| 434 |
-
class DummyKnowledge:
|
| 435 |
-
def get_relevant_context(self, *args, **kwargs):
|
| 436 |
-
return []
|
| 437 |
-
knowledge = DummyKnowledge()
|
| 438 |
-
|
| 439 |
-
# ---------------------------------------------------------------------------
|
| 440 |
-
# LLM Setup for Dynamic Explanations (from matching.py)
|
| 441 |
-
# ---------------------------------------------------------------------------
|
| 442 |
-
|
| 443 |
-
def create_llm_chain():
|
| 444 |
-
"""Create LLM chain for dynamic match explanations"""
|
| 445 |
-
if not HAS_LLM or not os.getenv("OPENAI_API_KEY"):
|
| 446 |
-
return None
|
| 447 |
-
|
| 448 |
-
try:
|
| 449 |
-
llm = ChatOpenAI(
|
| 450 |
-
model="gpt-4o-mini",
|
| 451 |
-
temperature=0.7,
|
| 452 |
-
max_retries=2,
|
| 453 |
-
timeout=30
|
| 454 |
-
)
|
| 455 |
-
|
| 456 |
-
prompt_template = ChatPromptTemplate.from_messages([
|
| 457 |
-
("system", """You are a relationship compatibility expert specializing in personality color analysis (Blue, Green, Yellow, Red).
|
| 458 |
-
Based on the personality profiles, relevant psychological context, and user profiles, provide a detailed compatibility analysis.
|
| 459 |
-
|
| 460 |
-
CRITICAL GUIDELINES:
|
| 461 |
-
1. Focus on practical relationship dynamics, not just theoretical compatibility
|
| 462 |
-
2. Use insights from the provided psychological context when relevant
|
| 463 |
-
3. Be specific about strengths and potential challenges
|
| 464 |
-
4. Provide actionable advice for the couple
|
| 465 |
-
5. Keep explanations natural and conversational, not robotic
|
| 466 |
-
6. Reference specific personality traits and how they interact
|
| 467 |
-
7. Consider cultural and personal background when relevant
|
| 468 |
-
8. Balance positivity with realistic expectations
|
| 469 |
-
|
| 470 |
-
Structure your response with:
|
| 471 |
-
- Compatibility overview (1-2 sentences)
|
| 472 |
-
- Key strengths of this pairing
|
| 473 |
-
- Potential challenges to be aware of
|
| 474 |
-
- Practical advice for success
|
| 475 |
-
- Daily life compatibility"""),
|
| 476 |
-
("human", """Personality Profiles:
|
| 477 |
-
User 1 ({user1_name}, {user1_gender}): Blue {user1_blue}%, Green {user1_green}%, Yellow {user1_yellow}%, Red {user1_red}%
|
| 478 |
-
User 2 ({user2_name}, {user2_gender}): Blue {user2_blue}%, Green {user2_green}%, Yellow {user2_yellow}%, Red {user2_red}%
|
| 479 |
-
|
| 480 |
-
User 1 Background: {user1_background}
|
| 481 |
-
User 2 Background: {user2_background}
|
| 482 |
-
|
| 483 |
-
Relevant Psychological Context:
|
| 484 |
-
{context}
|
| 485 |
-
|
| 486 |
-
Please provide a comprehensive compatibility analysis:""")
|
| 487 |
-
])
|
| 488 |
-
|
| 489 |
-
return prompt_template | llm | StrOutputParser()
|
| 490 |
-
except Exception as e:
|
| 491 |
-
print(f"Failed to create LLM chain: {e}")
|
| 492 |
-
return None
|
| 493 |
-
|
| 494 |
-
llm_chain = create_llm_chain() if HAS_LLM else None
|
| 495 |
-
|
| 496 |
-
# =================================================================
|
| 497 |
-
# Enhanced Helpers with LLM Integration (from matching.py)
|
| 498 |
-
# =================================================================
|
| 499 |
-
|
| 500 |
-
def cosine_sim(a: np.ndarray, b: np.ndarray) -> float:
|
| 501 |
-
na, nb = np.linalg.norm(a), np.linalg.norm(b)
|
| 502 |
-
if na == 0.0 or nb == 0.0: return 0.0
|
| 503 |
-
return float(np.dot(a, b) / (na * nb))
|
| 504 |
-
|
| 505 |
-
def get_user_background(user_id: int) -> Dict:
|
| 506 |
-
"""Get comprehensive user background for LLM analysis"""
|
| 507 |
-
background = {}
|
| 508 |
-
|
| 509 |
-
# Get basic user info
|
| 510 |
-
user = Users.query.filter_by(user_id=user_id).first()
|
| 511 |
-
if user:
|
| 512 |
-
background.update({
|
| 513 |
-
"name": user.name or "Unknown",
|
| 514 |
-
# "gender": user.gender or "Not specified",
|
| 515 |
-
"email": user.email or "",
|
| 516 |
-
# "phone": user.phone or ""
|
| 517 |
-
})
|
| 518 |
-
|
| 519 |
-
# Get marriage profile if exists
|
| 520 |
-
marriage_profile = Marriage.query.filter_by(user_id=user_id).first()
|
| 521 |
-
if marriage_profile:
|
| 522 |
-
background.update({
|
| 523 |
-
"current_location": marriage_profile.current_city or "",
|
| 524 |
-
"education": marriage_profile.education_level or "",
|
| 525 |
-
"employment": marriage_profile.employment_status or "",
|
| 526 |
-
"hobbies": marriage_profile.hobbies_interests or "",
|
| 527 |
-
"conflict_style": marriage_profile.conflict_approach or "",
|
| 528 |
-
"financial_style": marriage_profile.financial_style or "",
|
| 529 |
-
"family_type": marriage_profile.family_type or "",
|
| 530 |
-
|
| 531 |
-
})
|
| 532 |
-
|
| 533 |
-
return background
|
| 534 |
-
|
| 535 |
-
def generate_dynamic_explanation(user1_id: int, user2_id: int, user1_vec: np.ndarray, user2_vec: np.ndarray) -> List[str]:
|
| 536 |
-
"""Generate dynamic explanation using LLM and knowledge base"""
|
| 537 |
-
|
| 538 |
-
# Get user backgrounds
|
| 539 |
-
user1_bg = get_user_background(user1_id)
|
| 540 |
-
user2_bg = get_user_background(user2_id)
|
| 541 |
-
|
| 542 |
-
# Create query for knowledge base
|
| 543 |
-
query = f"compatibility between personality types: {user1_bg.get('conflict_style', '')} and {user2_bg.get('conflict_style', '')}"
|
| 544 |
-
context_chunks = knowledge.get_relevant_context(query, topk=2)
|
| 545 |
-
context = "\n".join(context_chunks) if context_chunks else "No specific psychological context available."
|
| 546 |
-
|
| 547 |
-
# Prepare data for LLM
|
| 548 |
-
llm_data = {
|
| 549 |
-
"user1_name": user1_bg.get("name", "User 1"),
|
| 550 |
-
"user1_gender": user1_bg.get("gender", "Not specified"),
|
| 551 |
-
"user1_blue": round(user1_vec[0] * 100, 1),
|
| 552 |
-
"user1_green": round(user1_vec[1] * 100, 1),
|
| 553 |
-
"user1_yellow": round(user1_vec[2] * 100, 1),
|
| 554 |
-
"user1_red": round(user1_vec[3] * 100, 1),
|
| 555 |
-
"user2_name": user2_bg.get("name", "User 2"),
|
| 556 |
-
"user2_gender": user2_bg.get("gender", "Not specified"),
|
| 557 |
-
"user2_blue": round(user2_vec[0] * 100, 1),
|
| 558 |
-
"user2_green": round(user2_vec[1] * 100, 1),
|
| 559 |
-
"user2_yellow": round(user2_vec[2] * 100, 1),
|
| 560 |
-
"user2_red": round(user2_vec[3] * 100, 1),
|
| 561 |
-
"user1_background": json.dumps(user1_bg, indent=2),
|
| 562 |
-
"user2_background": json.dumps(user2_bg, indent=2),
|
| 563 |
-
"context": context
|
| 564 |
-
}
|
| 565 |
-
|
| 566 |
-
# Try LLM first
|
| 567 |
-
if llm_chain is not None:
|
| 568 |
-
try:
|
| 569 |
-
response = llm_chain.invoke(llm_data)
|
| 570 |
-
# Parse LLM response into structured points
|
| 571 |
-
points = []
|
| 572 |
-
lines = response.split('\n')
|
| 573 |
-
for line in lines:
|
| 574 |
-
line = line.strip()
|
| 575 |
-
if line and not line.startswith(('- Compatibility', '- Key', '- Potential', '- Practical', '- Daily')):
|
| 576 |
-
if line.startswith('•') or line.startswith('-'):
|
| 577 |
-
points.append(line[1:].strip())
|
| 578 |
-
elif len(line) > 20: # Substantive lines
|
| 579 |
-
points.append(line)
|
| 580 |
-
|
| 581 |
-
if points:
|
| 582 |
-
return points[:5] # Return top 5 most relevant points
|
| 583 |
-
except Exception as e:
|
| 584 |
-
print(f"LLM explanation failed: {e}")
|
| 585 |
-
|
| 586 |
-
# Fallback to rule-based explanations
|
| 587 |
-
return generate_rule_based_explanation(user1_vec, user2_vec, user1_bg, user2_bg)
|
| 588 |
-
|
| 589 |
-
def generate_rule_based_explanation(user1_vec: np.ndarray, user2_vec: np.ndarray, user1_bg: Dict, user2_bg: Dict) -> List[str]:
|
| 590 |
-
"""Rule-based fallback explanation"""
|
| 591 |
-
labels = ["Blue", "Green", "Yellow", "Red"]
|
| 592 |
-
user1_dom = labels[int(np.argmax(user1_vec))]
|
| 593 |
-
user2_dom = labels[int(np.argmax(user2_vec))]
|
| 594 |
-
|
| 595 |
-
explanations = []
|
| 596 |
-
|
| 597 |
-
# Dominant trait analysis
|
| 598 |
-
if user1_dom == user2_dom:
|
| 599 |
-
explanations.append(f"Both share {user1_dom} dominance: Strong alignment in core approach and values.")
|
| 600 |
-
else:
|
| 601 |
-
explanations.append(f"{user1_dom}-{user2_dom} pairing: Complementary strengths create balanced dynamics.")
|
| 602 |
-
|
| 603 |
-
# Difference analysis
|
| 604 |
-
diffs = user2_vec - user1_vec
|
| 605 |
-
for idx, diff in enumerate(diffs):
|
| 606 |
-
color = labels[idx]
|
| 607 |
-
if abs(diff) > 0.15:
|
| 608 |
-
if diff > 0:
|
| 609 |
-
explanations.append(f"Higher {color} influence brings {get_color_strength(color)} to the relationship.")
|
| 610 |
-
else:
|
| 611 |
-
explanations.append(f"Lower {color} presence allows for more {get_color_balance(color)} in dynamics.")
|
| 612 |
-
|
| 613 |
-
# Background considerations
|
| 614 |
-
if user1_bg.get("hobbies") and user2_bg.get("hobbies"):
|
| 615 |
-
explanations.append("Shared interests and hobbies create strong bonding opportunities.")
|
| 616 |
-
|
| 617 |
-
if user1_bg.get("conflict_style") and user2_bg.get("conflict_style"):
|
| 618 |
-
explanations.append("Complementary conflict styles can lead to effective problem-solving.")
|
| 619 |
-
|
| 620 |
-
return explanations[:4] # Limit to 4 points
|
| 621 |
-
|
| 622 |
-
def get_color_strength(color: str) -> str:
|
| 623 |
-
strengths = {
|
| 624 |
-
"Blue": "analytical precision and structured thinking",
|
| 625 |
-
"Green": "emotional stability and patient understanding",
|
| 626 |
-
"Yellow": "creative energy and social connection",
|
| 627 |
-
"Red": "decisive action and goal orientation"
|
| 628 |
-
}
|
| 629 |
-
return strengths.get(color, "unique strengths")
|
| 630 |
-
|
| 631 |
-
def get_color_balance(color: str) -> str:
|
| 632 |
-
balances = {
|
| 633 |
-
"Blue": "flexibility and spontaneity",
|
| 634 |
-
"Green": "directness and assertiveness",
|
| 635 |
-
"Yellow": "focus and routine",
|
| 636 |
-
"Red": "collaboration and patience"
|
| 637 |
-
}
|
| 638 |
-
return balances.get(color, "balanced approaches")
|
| 639 |
-
|
| 640 |
-
def detailed_explanation(user1_id: int, user2_id: int, u_vec: np.ndarray, v_vec: np.ndarray) -> List[str]:
|
| 641 |
-
"""Main function to generate detailed explanations"""
|
| 642 |
-
return generate_dynamic_explanation(user1_id, user2_id, u_vec, v_vec)
|
| 643 |
-
|
| 644 |
-
def expectation_similarity(exp1: dict, exp2: dict) -> float:
|
| 645 |
-
"""Compare expectation data fields and return score between 0–1."""
|
| 646 |
-
if not exp1 or not exp2:
|
| 647 |
-
return 0.0
|
| 648 |
-
|
| 649 |
-
keys = ["pref_conflict_approach", "pref_financial_style", "pref_core_values",
|
| 650 |
-
"pref_family_type", "work_life_pref", "ambition_pref",
|
| 651 |
-
"social_pref", "move_for_career", "deal_breakers"]
|
| 652 |
-
|
| 653 |
-
matches = 0
|
| 654 |
-
total = 0
|
| 655 |
-
|
| 656 |
-
for k in keys:
|
| 657 |
-
if k in exp1 and k in exp2:
|
| 658 |
-
total += 1
|
| 659 |
-
v1 = str(exp1[k]).strip().lower()
|
| 660 |
-
v2 = str(exp2[k]).strip().lower()
|
| 661 |
-
if v1 == v2:
|
| 662 |
-
matches += 1
|
| 663 |
-
elif v1 in v2 or v2 in v1:
|
| 664 |
-
matches += 0.5 # partial match
|
| 665 |
-
|
| 666 |
-
return matches / total if total else 0.0
|
| 667 |
-
|
| 668 |
-
def generate_expectation_explanation(expect1: dict, expect2: dict) -> list:
|
| 669 |
-
"""
|
| 670 |
-
Compare two users' expectations and produce text explanations
|
| 671 |
-
describing why they match or differ, based only on expectation fields.
|
| 672 |
-
"""
|
| 673 |
-
|
| 674 |
-
explanations = []
|
| 675 |
-
|
| 676 |
-
def compare_field(key, label):
|
| 677 |
-
v1 = str(expect1.get(key, "") or "").strip().lower()
|
| 678 |
-
v2 = str(expect2.get(key, "") or "").strip().lower()
|
| 679 |
-
if not v1 or not v2:
|
| 680 |
-
return
|
| 681 |
-
|
| 682 |
-
if v1 == v2:
|
| 683 |
-
explanations.append(f"Both share similar preferences in {label} ({v1.capitalize()}).")
|
| 684 |
-
elif v1 in v2 or v2 in v1:
|
| 685 |
-
explanations.append(f"They have partly aligned {label} preferences ({v1} vs {v2}).")
|
| 686 |
-
else:
|
| 687 |
-
explanations.append(f"Their {label} expectations differ ({v1} vs {v2}).")
|
| 688 |
-
|
| 689 |
-
# Key expectation areas
|
| 690 |
-
compare_field("religion_alignment", "religion or faith")
|
| 691 |
-
compare_field("accept_smoking", "smoking preference")
|
| 692 |
-
compare_field("accept_alcohol", "alcohol preference")
|
| 693 |
-
compare_field("pref_diet", "dietary habits")
|
| 694 |
-
compare_field("pref_family_type", "family type")
|
| 695 |
-
compare_field("pref_fitness", "fitness lifestyle")
|
| 696 |
-
compare_field("pref_location", "preferred location")
|
| 697 |
-
compare_field("pref_countries", "preferred country")
|
| 698 |
-
compare_field("pref_languages", "spoken languages")
|
| 699 |
-
compare_field("pref_conflict_approach", "conflict approach")
|
| 700 |
-
compare_field("pref_financial_style", "financial management style")
|
| 701 |
-
compare_field("work_life_pref", "work–life balance")
|
| 702 |
-
compare_field("pref_core_values", "core values")
|
| 703 |
-
compare_field("social_pref", "social interaction style")
|
| 704 |
-
compare_field("ambition_pref", "ambition level")
|
| 705 |
-
compare_field("pref_income_range", "income expectation")
|
| 706 |
-
compare_field("move_for_career", "career relocation preference")
|
| 707 |
-
compare_field("deal_breakers", "deal breakers")
|
| 708 |
-
compare_field("other_non_negotiables", "non-negotiable expectations")
|
| 709 |
-
|
| 710 |
-
# If both have expectation_summary text
|
| 711 |
-
if expect1.get("expectation_summary") and expect2.get("expectation_summary"):
|
| 712 |
-
from difflib import SequenceMatcher
|
| 713 |
-
sim = SequenceMatcher(None, expect1["expectation_summary"].lower(), expect2["expectation_summary"].lower()).ratio()
|
| 714 |
-
if sim > 0.6:
|
| 715 |
-
explanations.append("Their overall expectation summaries express similar relationship outlooks.")
|
| 716 |
-
else:
|
| 717 |
-
explanations.append("They express different overall expectations about relationship goals.")
|
| 718 |
-
|
| 719 |
-
# Clean up and limit
|
| 720 |
-
if not explanations:
|
| 721 |
-
explanations.append("No strong similarities or differences could be identified in expectations.")
|
| 722 |
-
|
| 723 |
-
return explanations[:8] # Limit to 8 concise points
|
| 724 |
-
|
| 725 |
-
# ----------------------------------------------------------------------
|
| 726 |
-
# Step 2: Character-Based Filtering for Expectation Matches
|
| 727 |
-
# ----------------------------------------------------------------------
|
| 728 |
-
def refine_with_character_match(expectation_matches: list, base_user_id: int):
|
| 729 |
-
"""Given a list of expectation-matched profiles, rank them further by character similarity."""
|
| 730 |
-
|
| 731 |
-
# Fetch base user's color data
|
| 732 |
-
base_llm = LLMGeneratedQuestions.query.filter_by(user_id=base_user_id).first()
|
| 733 |
-
if not base_llm:
|
| 734 |
-
return expectation_matches # No character data → return as-is
|
| 735 |
-
|
| 736 |
-
u_vec = base_llm.color_vec() # [blue, green, yellow, red]
|
| 737 |
-
|
| 738 |
-
refined = []
|
| 739 |
-
for match in expectation_matches:
|
| 740 |
-
other_id = match["user_id"]
|
| 741 |
-
llm_other = LLMGeneratedQuestions.query.filter_by(user_id=other_id).first()
|
| 742 |
-
if llm_other:
|
| 743 |
-
v_vec = llm_other.color_vec()
|
| 744 |
-
# Compute cosine similarity for personality color match
|
| 745 |
-
score_color = cosine_sim(u_vec, v_vec)
|
| 746 |
-
match["character_match_score"] = round(score_color, 3)
|
| 747 |
-
# Blend both expectation + character score (e.g., 70–30 weighting)
|
| 748 |
-
match["final_combined_score"] = round(0.7 * match["expectation_score"] + 0.3 * score_color, 3)
|
| 749 |
-
else:
|
| 750 |
-
match["character_match_score"] = 0
|
| 751 |
-
match["final_combined_score"] = match["expectation_score"]
|
| 752 |
-
|
| 753 |
-
refined.append(match)
|
| 754 |
-
|
| 755 |
-
# Sort by final combined score descending
|
| 756 |
-
refined.sort(key=lambda x: x["final_combined_score"], reverse=True)
|
| 757 |
-
return refined
|
| 758 |
-
|
| 759 |
-
def _compute_matches(user_id: int, role: Optional[str], limit: int, exclude_self: bool):
|
| 760 |
-
"""Compute matches – initial (expectation only), characterwise sorting triggered separately."""
|
| 761 |
-
base_user_query = db.session.query(LLMGeneratedQuestions, Users, Marriage)\
|
| 762 |
-
.join(Users, Users.user_id == LLMGeneratedQuestions.user_id)\
|
| 763 |
-
.join(Marriage, Marriage.user_id == Users.user_id)\
|
| 764 |
-
.filter(LLMGeneratedQuestions.user_id == user_id)
|
| 765 |
-
|
| 766 |
-
if role:
|
| 767 |
-
base_user_query = base_user_query.filter(LLMGeneratedQuestions.role == role)
|
| 768 |
-
|
| 769 |
-
base_user = base_user_query.first()
|
| 770 |
-
if not base_user:
|
| 771 |
-
return None, []
|
| 772 |
-
|
| 773 |
-
llm_src, user_src, marriage_src = base_user
|
| 774 |
-
u_vec = llm_src.color_vec()
|
| 775 |
-
user_gender = (marriage_src.gender or "").lower()
|
| 776 |
-
|
| 777 |
-
cq = db.session.query(LLMGeneratedQuestions, Users, Marriage)\
|
| 778 |
-
.join(Users, Users.user_id == LLMGeneratedQuestions.user_id)\
|
| 779 |
-
.join(Marriage, Marriage.user_id == Users.user_id)
|
| 780 |
-
|
| 781 |
-
if role:
|
| 782 |
-
cq = cq.filter(LLMGeneratedQuestions.role == role)
|
| 783 |
-
if exclude_self:
|
| 784 |
-
cq = cq.filter(LLMGeneratedQuestions.user_id != user_id)
|
| 785 |
-
|
| 786 |
-
if role == "marriage":
|
| 787 |
-
if user_gender.startswith("male"):
|
| 788 |
-
cq = cq.filter(func.lower(func.trim(Marriage.gender)) == "female")
|
| 789 |
-
elif user_gender.startswith("female"):
|
| 790 |
-
cq = cq.filter(func.lower(func.trim(Marriage.gender)) == "male")
|
| 791 |
-
|
| 792 |
-
candidates = cq.all()
|
| 793 |
-
results = []
|
| 794 |
-
|
| 795 |
-
exp_user = fetch_expectation_data(user_id)
|
| 796 |
-
weight_color = float(request.args.get("weight_color", 0))
|
| 797 |
-
weight_expect = float(request.args.get("weight_expect", 1))
|
| 798 |
-
|
| 799 |
-
show_characterwise = request.args.get("characterwise", "false").lower() == "true"
|
| 800 |
-
|
| 801 |
-
for llm, u, m in candidates:
|
| 802 |
-
v_vec = llm.color_vec()
|
| 803 |
-
score_color = cosine_sim(u_vec, v_vec)
|
| 804 |
-
exp_candidate = fetch_expectation_data(llm.user_id)
|
| 805 |
-
score_expect = expectation_similarity(exp_user, exp_candidate)
|
| 806 |
-
final_score = (weight_color * score_color) + (weight_expect * score_expect)
|
| 807 |
-
percentage = round(final_score * 100, 2)
|
| 808 |
-
|
| 809 |
-
if not show_characterwise:
|
| 810 |
-
# First load: Expectation-based explanation only
|
| 811 |
-
explanations = generate_expectation_explanation(exp_user, exp_candidate)
|
| 812 |
-
source_type = "expectation"
|
| 813 |
-
else:
|
| 814 |
-
# After clicking Characterwise button: LLM + psychology book explanation
|
| 815 |
-
explanations = detailed_explanation(user_id, llm.user_id, u_vec, v_vec)
|
| 816 |
-
source_type = "character"
|
| 817 |
-
|
| 818 |
-
results.append({
|
| 819 |
-
"user_id": llm.user_id,
|
| 820 |
-
"name": u.name,
|
| 821 |
-
"gender": m.gender,
|
| 822 |
-
"blue": llm.blue,
|
| 823 |
-
"green": llm.green,
|
| 824 |
-
"yellow": llm.yellow,
|
| 825 |
-
"red": llm.red,
|
| 826 |
-
"score_color": round(score_color, 4),
|
| 827 |
-
"score_expect": round(score_expect, 4),
|
| 828 |
-
"final_score": round(final_score, 4),
|
| 829 |
-
"percentage": percentage,
|
| 830 |
-
"created_at": llm.created_at.isoformat() if llm.created_at else None,
|
| 831 |
-
"explanations": explanations,
|
| 832 |
-
"explanation_source": source_type
|
| 833 |
-
})
|
| 834 |
-
|
| 835 |
-
results.sort(key=lambda x: x["final_score"], reverse=True)
|
| 836 |
-
|
| 837 |
-
grouped = {
|
| 838 |
-
"90-100": [r for r in results if r["percentage"] >= 90],
|
| 839 |
-
"80-89": [r for r in results if 80 <= r["percentage"] < 90],
|
| 840 |
-
"70-79": [r for r in results if 70 <= r["percentage"] < 80],
|
| 841 |
-
"60-69": [r for r in results if 60 <= r["percentage"] < 70],
|
| 842 |
-
"below_60": [r for r in results if r["percentage"] < 60],
|
| 843 |
-
}
|
| 844 |
-
|
| 845 |
-
return llm_src, grouped
|
| 846 |
-
|
| 847 |
-
|
| 848 |
-
|
| 849 |
-
|
| 850 |
-
# ==========
|
| 851 |
-
# Utilities
|
| 852 |
-
# ==========
|
| 853 |
-
def hash_password(password: str) -> str:
|
| 854 |
-
return hashlib.sha256(password.encode("utf-8")).hexdigest()
|
| 855 |
-
|
| 856 |
-
|
| 857 |
-
def row_to_dict(cursor, row) -> Dict:
|
| 858 |
-
if row is None:
|
| 859 |
-
return {}
|
| 860 |
-
cols = [col[0] for col in cursor.description]
|
| 861 |
-
return {cols[i]: row[i] for i in range(len(cols))}
|
| 862 |
-
|
| 863 |
-
# --------------------
|
| 864 |
-
# FAISS helpers
|
| 865 |
-
# --------------------
|
| 866 |
-
FAISS_INDEX = None
|
| 867 |
-
TEXT_CHUNKS: List[str] = []
|
| 868 |
-
|
| 869 |
-
|
| 870 |
-
def try_load_chunks_from_disk(index_path: str) -> List[str]:
|
| 871 |
-
"""Try several companion filenames for the chunk/text mapping."""
|
| 872 |
-
# base = os.path.splitext(index_path)[0]
|
| 873 |
-
# base = r"C:\Malini AI\Py-Match back end\New folder\faiss_index_file"
|
| 874 |
-
base = os.path.splitext(index_path)[0]
|
| 875 |
-
candidates = [base + ".chunks.json", base + "_chunks.json", base + ".chunks.pkl", base + "_chunks.pkl"]
|
| 876 |
-
|
| 877 |
-
for c in candidates:
|
| 878 |
-
if os.path.exists(c):
|
| 879 |
-
try:
|
| 880 |
-
if c.endswith(".json"):
|
| 881 |
-
with open(c, "r", encoding="utf-8") as f:
|
| 882 |
-
data = json.load(f)
|
| 883 |
-
# expecting list of strings
|
| 884 |
-
if isinstance(data, list):
|
| 885 |
-
return data
|
| 886 |
-
# sometimes stored as {"chunks": [...]}
|
| 887 |
-
if isinstance(data, dict) and "chunks" in data:
|
| 888 |
-
return data["chunks"]
|
| 889 |
-
else:
|
| 890 |
-
with open(c, "rb") as f:
|
| 891 |
-
data = pickle.load(f)
|
| 892 |
-
if isinstance(data, list):
|
| 893 |
-
return data
|
| 894 |
-
except Exception as e:
|
| 895 |
-
print(f"Failed to load chunks from {c}:", e)
|
| 896 |
-
return []
|
| 897 |
-
|
| 898 |
-
|
| 899 |
-
def load_faiss_index(index_path: str):
|
| 900 |
-
global FAISS_INDEX, TEXT_CHUNKS
|
| 901 |
-
if not HAS_FAISS:
|
| 902 |
-
print("FAISS not installed. Skipping index load.")
|
| 903 |
-
return
|
| 904 |
-
if not os.path.exists(index_path):
|
| 905 |
-
print("Faiss index path does not exist:", index_path)
|
| 906 |
-
return
|
| 907 |
-
try:
|
| 908 |
-
FAISS_INDEX = faiss.read_index(index_path)
|
| 909 |
-
# try to load chunks from companion files
|
| 910 |
-
TEXT_CHUNKS = try_load_chunks_from_disk(index_path)
|
| 911 |
-
if not TEXT_CHUNKS:
|
| 912 |
-
print("Warning: Faiss index loaded but no companion text chunks found.")
|
| 913 |
-
print("Provide a companion .chunks.json or .chunks.pkl file with a list of text chunks.")
|
| 914 |
-
except Exception as e:
|
| 915 |
-
print("Failed to load faiss index:", e)
|
| 916 |
-
FAISS_INDEX = None
|
| 917 |
-
|
| 918 |
-
|
| 919 |
-
def get_nearest_context(query_emb: List[float] = None, k: int = 5, query_vector: Optional[List[float]] = None):
|
| 920 |
-
"""Return concatenated top-k chunks for a query."""
|
| 921 |
-
if FAISS_INDEX is None or not HAS_FAISS:
|
| 922 |
-
return ""
|
| 923 |
-
try:
|
| 924 |
-
# ... rest of the existing code remains the same
|
| 925 |
-
if query_vector is None:
|
| 926 |
-
# no embedding generation in this script: rely on an external embedding or LLM to provide a context id
|
| 927 |
-
return ""
|
| 928 |
-
import numpy as np
|
| 929 |
-
vec = np.array([query_vector], dtype='float32')
|
| 930 |
-
D, I = FAISS_INDEX.search(vec, k)
|
| 931 |
-
idxs = I[0].tolist()
|
| 932 |
-
texts = []
|
| 933 |
-
for idx in idxs:
|
| 934 |
-
if 0 <= idx < len(TEXT_CHUNKS):
|
| 935 |
-
texts.append(TEXT_CHUNKS[idx])
|
| 936 |
-
return "\n\n".join(texts)
|
| 937 |
-
except Exception as e:
|
| 938 |
-
print("Faiss search failed:", e)
|
| 939 |
-
return ""
|
| 940 |
-
|
| 941 |
-
# attempt to load faiss index at startup if path given
|
| 942 |
-
if FAISS_INDEX is None and HAS_FAISS and FAISS_INDEX is None and FAISS_INDEX != False:
|
| 943 |
-
load_faiss_index(FAISS_INDEX_PATH)
|
| 944 |
-
|
| 945 |
-
# =======================
|
| 946 |
-
# 1) AUTH / SIGNUP (auth)
|
| 947 |
-
# =======================
|
| 948 |
-
@app.post("/api/signup")
|
| 949 |
-
def signup():
|
| 950 |
-
data = request.get_json(force=True) or {}
|
| 951 |
-
name = data.get("name")
|
| 952 |
-
email = data.get("email")
|
| 953 |
-
password = data.get("password")
|
| 954 |
-
|
| 955 |
-
if not name or not email or not password:
|
| 956 |
-
return jsonify({"error": "Name, email, and password are required."}), 400
|
| 957 |
-
|
| 958 |
-
password_hash = hash_password(password)
|
| 959 |
-
|
| 960 |
-
try:
|
| 961 |
-
conn = get_db_connection()
|
| 962 |
-
cur = conn.cursor()
|
| 963 |
-
cur.execute("""
|
| 964 |
-
INSERT INTO Users (name, email, password)
|
| 965 |
-
VALUES (?, ?, ?)
|
| 966 |
-
""", (name, email, password_hash))
|
| 967 |
-
conn.commit()
|
| 968 |
-
|
| 969 |
-
# Get the last inserted user_id
|
| 970 |
-
cur.execute("SELECT @@IDENTITY AS user_id")
|
| 971 |
-
user_id_row = cur.fetchone()
|
| 972 |
-
user_id = user_id_row[0] if user_id_row else None
|
| 973 |
-
|
| 974 |
-
if user_id:
|
| 975 |
-
return jsonify({"message": "User created successfully.", "user_id": user_id}), 201
|
| 976 |
-
else:
|
| 977 |
-
return jsonify({"error": "Failed to retrieve user ID."}), 500
|
| 978 |
-
except pyodbc.Error as e:
|
| 979 |
-
print(f"Database Error: {e}") # Add this line to log the specific error
|
| 980 |
-
return jsonify({"error": f"DB error: {e}"}), 500
|
| 981 |
-
except Exception as e:
|
| 982 |
-
print(f"Unexpected Error: {e}") # Log unexpected errors
|
| 983 |
-
return jsonify({"error": f"Unexpected error: {e}"}), 500
|
| 984 |
-
finally:
|
| 985 |
-
try: conn.close()
|
| 986 |
-
except: pass
|
| 987 |
-
|
| 988 |
-
|
| 989 |
-
|
| 990 |
-
# ==================================================
|
| 991 |
-
# 2) ROLE SELECTION + STATIC QUESTION FETCH + SAVE
|
| 992 |
-
# ==================================================
|
| 993 |
-
@app.post("/api/questions/select-role")
|
| 994 |
-
def select_role():
|
| 995 |
-
data = request.get_json(force=True) or {}
|
| 996 |
-
user_id = data.get("user_id")
|
| 997 |
-
role_name = data.get("role_name")
|
| 998 |
-
assigned_at = data.get("assigned_at") # ISO or None
|
| 999 |
-
|
| 1000 |
-
# Check if user_id and role_name are provided
|
| 1001 |
-
if not user_id or not role_name:
|
| 1002 |
-
return jsonify({"error": "User ID and role name are required."}), 400
|
| 1003 |
-
|
| 1004 |
-
try:
|
| 1005 |
-
# Check if user_id exists in the Users table
|
| 1006 |
-
conn = get_db_connection()
|
| 1007 |
-
cur = conn.cursor()
|
| 1008 |
-
cur.execute("SELECT COUNT(*) FROM Users WHERE user_id = ?", (user_id,))
|
| 1009 |
-
user_exists = cur.fetchone()[0]
|
| 1010 |
-
|
| 1011 |
-
if user_exists == 0:
|
| 1012 |
-
return jsonify({"error": "User ID does not exist in the Users table."}), 404
|
| 1013 |
-
|
| 1014 |
-
# Proceed with inserting into UserRoles
|
| 1015 |
-
cur.execute("""
|
| 1016 |
-
INSERT INTO UserRoles (user_id, role_name, assigned_at)
|
| 1017 |
-
VALUES (?, ?, ?)
|
| 1018 |
-
""", (user_id, role_name, assigned_at))
|
| 1019 |
-
conn.commit()
|
| 1020 |
-
|
| 1021 |
-
return jsonify({"message": "Role assigned successfully."}), 201
|
| 1022 |
-
|
| 1023 |
-
except pyodbc.Error as e:
|
| 1024 |
-
# Handle database error, including foreign key constraint violations
|
| 1025 |
-
if "foreign key" in str(e).lower():
|
| 1026 |
-
return jsonify({"error": "Foreign key violation: User ID not found."}), 400
|
| 1027 |
-
return jsonify({"error": f"Database error: {str(e)}"}), 500
|
| 1028 |
-
except Exception as e:
|
| 1029 |
-
return jsonify({"error": f"Unexpected error: {str(e)}"}), 500
|
| 1030 |
-
finally:
|
| 1031 |
-
try:
|
| 1032 |
-
conn.close()
|
| 1033 |
-
except:
|
| 1034 |
-
pass
|
| 1035 |
-
|
| 1036 |
-
@app.get("/api/questions/marriage")
|
| 1037 |
-
def get_questions():
|
| 1038 |
-
try:
|
| 1039 |
-
conn = get_db_connection()
|
| 1040 |
-
cur = conn.cursor()
|
| 1041 |
-
cur.execute("""
|
| 1042 |
-
SELECT question, options, input_type, column_key, category
|
| 1043 |
-
FROM RoleQuestions
|
| 1044 |
-
WHERE role_name = 'marriage'
|
| 1045 |
-
ORDER BY id
|
| 1046 |
-
""")
|
| 1047 |
-
rows = cur.fetchall()
|
| 1048 |
-
out = []
|
| 1049 |
-
for r in rows:
|
| 1050 |
-
label = r[0]
|
| 1051 |
-
options = (r[1].split(",") if r[1] else [])
|
| 1052 |
-
input_type = r[2]
|
| 1053 |
-
column_key = r[3]
|
| 1054 |
-
category = r[4]
|
| 1055 |
-
out.append({
|
| 1056 |
-
"label": label,
|
| 1057 |
-
"options": options,
|
| 1058 |
-
"input_type": input_type,
|
| 1059 |
-
"column_key": column_key,
|
| 1060 |
-
"category": category
|
| 1061 |
-
})
|
| 1062 |
-
return jsonify(out), 200
|
| 1063 |
-
except pyodbc.Error as e:
|
| 1064 |
-
return jsonify({"error": str(e)}), 500
|
| 1065 |
-
finally:
|
| 1066 |
-
try: conn.close()
|
| 1067 |
-
except: pass
|
| 1068 |
-
|
| 1069 |
-
@app.post("/api/questions/submit-answers/marriage")
|
| 1070 |
-
def submit_answers():
|
| 1071 |
-
data = request.get_json(force=True) or {}
|
| 1072 |
-
user_id = data.get("user_id")
|
| 1073 |
-
if not user_id:
|
| 1074 |
-
return jsonify({"error": "User ID is required."}), 400
|
| 1075 |
-
|
| 1076 |
-
role_fields = {
|
| 1077 |
-
"marriage": [
|
| 1078 |
-
"full_name", "date_of_birth", "gender", "current_city", "marital_status",
|
| 1079 |
-
"education_level", "employment_status", "number_of_siblings", "family_type", "hobbies_interests",
|
| 1080 |
-
"conflict_approach", "financial_style", "income_range", "relocation_willingness", "height_weight",
|
| 1081 |
-
"eye_colour", "skin_tone", "languages_spoken", "country", "blood_group", "religion", "dual_citizenship",
|
| 1082 |
-
"siblings_position", "parents_living_status", "live_with_parents", "support_parents_financially",
|
| 1083 |
-
"family_communication_frequency", "food_preference", "smoking_habit", "alcohol_habit", "daily_routine",
|
| 1084 |
-
"fitness_level", "own_pets", "travel_preference", "relaxation_mode", "job_role", "work_experience_years",
|
| 1085 |
-
"career_aspirations", "field_of_study", "work_preference", "remark", "created_at"
|
| 1086 |
-
]
|
| 1087 |
-
}
|
| 1088 |
-
|
| 1089 |
-
# Validate all required fields are present
|
| 1090 |
-
for f in role_fields["marriage"]:
|
| 1091 |
-
if f not in data:
|
| 1092 |
-
return jsonify({"error": f"{f} is required."}), 400
|
| 1093 |
-
|
| 1094 |
-
try:
|
| 1095 |
-
conn = get_db_connection()
|
| 1096 |
-
cur = conn.cursor()
|
| 1097 |
-
|
| 1098 |
-
table_name = "Marriage"
|
| 1099 |
-
placeholders = ", ".join(["?"] * (len(role_fields["marriage"]) + 1))
|
| 1100 |
-
query = f"INSERT INTO {table_name} (user_id, {', '.join(role_fields['marriage'])}) VALUES ({placeholders})"
|
| 1101 |
-
|
| 1102 |
-
values = [user_id]
|
| 1103 |
-
for f in role_fields["marriage"]:
|
| 1104 |
-
val = data.get(f)
|
| 1105 |
-
|
| 1106 |
-
# Handle list values (multiselect)
|
| 1107 |
-
if isinstance(val, list):
|
| 1108 |
-
val = ", ".join([str(v) for v in val])
|
| 1109 |
-
|
| 1110 |
-
# Convert to string or None
|
| 1111 |
-
if val is None:
|
| 1112 |
-
val = None
|
| 1113 |
-
else:
|
| 1114 |
-
val = str(val)
|
| 1115 |
-
|
| 1116 |
-
values.append(val)
|
| 1117 |
-
|
| 1118 |
-
print(f"DEBUG: Executing query: {query}") # Debug log
|
| 1119 |
-
print(f"DEBUG: Values: {values}") # Debug log
|
| 1120 |
-
|
| 1121 |
-
cur.execute(query, values)
|
| 1122 |
-
conn.commit()
|
| 1123 |
-
|
| 1124 |
-
return jsonify({"message": "Marriage record added successfully."}), 201
|
| 1125 |
-
|
| 1126 |
-
except pyodbc.Error as e:
|
| 1127 |
-
print(f"Database Error: {e}") # Detailed error logging
|
| 1128 |
-
return jsonify({"error": f"Database error: {str(e)}"}), 500
|
| 1129 |
-
except Exception as e:
|
| 1130 |
-
print(f"Unexpected Error: {e}") # Detailed error logging
|
| 1131 |
-
return jsonify({"error": f"Unexpected error: {str(e)}"}), 500
|
| 1132 |
-
finally:
|
| 1133 |
-
try:
|
| 1134 |
-
conn.close()
|
| 1135 |
-
except:
|
| 1136 |
-
pass
|
| 1137 |
-
|
| 1138 |
-
@app.get("/api/expectation-questions")
|
| 1139 |
-
def get_expectation_questions():
|
| 1140 |
-
try:
|
| 1141 |
-
conn = get_db_connection()
|
| 1142 |
-
cur = conn.cursor()
|
| 1143 |
-
cur.execute("""
|
| 1144 |
-
SELECT id, question, options, input_type, column_key, category
|
| 1145 |
-
FROM ExpectationQuestions
|
| 1146 |
-
ORDER BY id
|
| 1147 |
-
""")
|
| 1148 |
-
rows = cur.fetchall()
|
| 1149 |
-
|
| 1150 |
-
out = []
|
| 1151 |
-
for r in rows:
|
| 1152 |
-
out.append({
|
| 1153 |
-
"id": r[0],
|
| 1154 |
-
"question": r[1],
|
| 1155 |
-
"options": (r[2].split(",") if r[2] else []),
|
| 1156 |
-
"input_type": r[3],
|
| 1157 |
-
"column_key": r[4],
|
| 1158 |
-
"category": r[5]
|
| 1159 |
-
})
|
| 1160 |
-
return jsonify(out), 200
|
| 1161 |
-
except Exception as e:
|
| 1162 |
-
return jsonify({"error": str(e)}), 500
|
| 1163 |
-
finally:
|
| 1164 |
-
try: conn.close()
|
| 1165 |
-
except: pass
|
| 1166 |
-
|
| 1167 |
-
|
| 1168 |
-
@app.post("/api/expectation-response")
|
| 1169 |
-
def save_expectation_response():
|
| 1170 |
-
data = request.get_json(force=True) or {}
|
| 1171 |
-
user_id = data.get("user_id")
|
| 1172 |
-
|
| 1173 |
-
if not user_id:
|
| 1174 |
-
return jsonify({"error": "user_id is required"}), 400
|
| 1175 |
-
|
| 1176 |
-
try:
|
| 1177 |
-
conn = get_db_connection()
|
| 1178 |
-
cur = conn.cursor()
|
| 1179 |
-
|
| 1180 |
-
# Get valid keys from ExpectationQuestions
|
| 1181 |
-
cur.execute("SELECT column_key FROM ExpectationQuestions")
|
| 1182 |
-
valid_keys = [row[0] for row in cur.fetchall()]
|
| 1183 |
-
print("DEBUG: Valid keys from DB ->", valid_keys)
|
| 1184 |
-
print("DEBUG: Incoming data keys ->", list(data.keys()))
|
| 1185 |
-
|
| 1186 |
-
cols, vals = [], []
|
| 1187 |
-
for k, v in data.items():
|
| 1188 |
-
if k in valid_keys:
|
| 1189 |
-
cols.append(k)
|
| 1190 |
-
if isinstance(v, (list, dict)):
|
| 1191 |
-
vals.append(json.dumps(v))
|
| 1192 |
-
else:
|
| 1193 |
-
vals.append(v)
|
| 1194 |
-
|
| 1195 |
-
if not cols:
|
| 1196 |
-
return jsonify({"error": "No valid fields found in request"}), 400
|
| 1197 |
-
|
| 1198 |
-
# Build placeholders safely
|
| 1199 |
-
placeholders = ", ".join(["?"] * (len(cols) + 1)) # +1 for user_id
|
| 1200 |
-
col_str = ", ".join([f"[{c}]" for c in cols]) # safe column quoting
|
| 1201 |
-
|
| 1202 |
-
query = f"""
|
| 1203 |
-
INSERT INTO ExpectationResponse (user_id, {col_str})
|
| 1204 |
-
VALUES ({placeholders})
|
| 1205 |
-
"""
|
| 1206 |
-
print("DEBUG: Final query ->", query)
|
| 1207 |
-
print("DEBUG: Values ->", [user_id] + vals)
|
| 1208 |
-
|
| 1209 |
-
cur.execute(query, [user_id] + vals)
|
| 1210 |
-
conn.commit()
|
| 1211 |
-
|
| 1212 |
-
return jsonify({"message": "Preferences saved successfully"}), 201
|
| 1213 |
-
|
| 1214 |
-
except Exception as e:
|
| 1215 |
-
import traceback
|
| 1216 |
-
traceback.print_exc() # full error in server logs
|
| 1217 |
-
return jsonify({"error": str(e)}), 500
|
| 1218 |
-
|
| 1219 |
-
finally:
|
| 1220 |
-
try:
|
| 1221 |
-
conn.close()
|
| 1222 |
-
except:
|
| 1223 |
-
pass
|
| 1224 |
-
|
| 1225 |
-
|
| 1226 |
-
# ==========================================
|
| 1227 |
-
# 3) LLM BATCH Q-GEN + COLOR % PERSIST (LLM)
|
| 1228 |
-
# ==========================================
|
| 1229 |
-
# Marriage-specific topics
|
| 1230 |
-
# TOPIC_BANK_BY_DOMAIN = {
|
| 1231 |
-
# "marriage": [
|
| 1232 |
-
# "household budget plan", "holiday travel decision", "child's school choice", "conflict about chores",
|
| 1233 |
-
# "time with in-laws", "health and fitness routine", "weekend family schedule", "saving vs spending debate",
|
| 1234 |
-
# "home renovation plan", "vacation destination", "career prioritization", "parenting disagreements",
|
| 1235 |
-
# "personal space and boundaries", "cultural value clashes", "major financial decision", "health emergency reaction",
|
| 1236 |
-
# "supporting partner’s dream", "anniversary planning", "family crisis handling", "friend group conflict"
|
| 1237 |
-
# ],
|
| 1238 |
-
# "general": [
|
| 1239 |
-
# "time management", "work-life balance", "goal setting", "communication challenge",
|
| 1240 |
-
# "team collaboration", "conflict resolution", "planning an event", "change management"
|
| 1241 |
-
# ]
|
| 1242 |
-
# }
|
| 1243 |
-
|
| 1244 |
-
# COLOR_PHRASES_BY_DOMAIN = {
|
| 1245 |
-
# "marriage": {
|
| 1246 |
-
# "blue": "evidence-based family decision",
|
| 1247 |
-
# "green": "routine and planning at home",
|
| 1248 |
-
# "red": "direct discussion and action",
|
| 1249 |
-
# "yellow": "creative family options"
|
| 1250 |
-
# },
|
| 1251 |
-
# "general": {
|
| 1252 |
-
# "blue": "data-driven choice",
|
| 1253 |
-
# "green": "process and planning",
|
| 1254 |
-
# "red": "people and action",
|
| 1255 |
-
# "yellow": "creative possibility"
|
| 1256 |
-
# }
|
| 1257 |
-
# }
|
| 1258 |
-
|
| 1259 |
-
# Expand parser/chain initialization to include context slot in prompt
|
| 1260 |
-
PARSER_BATCH = None
|
| 1261 |
-
CHAIN_BATCH = None
|
| 1262 |
-
if HAS_LLM_STACK and os.getenv("OPENAI_API_KEY"):
|
| 1263 |
-
class Option(BaseModel):
|
| 1264 |
-
text: str
|
| 1265 |
-
color: Literal["blue", "green", "red", "yellow"]
|
| 1266 |
-
|
| 1267 |
-
class QAItem(BaseModel):
|
| 1268 |
-
question: str
|
| 1269 |
-
options: List[Option] = Field(min_items=4, max_items=4)
|
| 1270 |
-
|
| 1271 |
-
class BatchQA(BaseModel):
|
| 1272 |
-
items: List[QAItem] = Field(..., min_items=1)
|
| 1273 |
-
|
| 1274 |
-
SYSTEM_PROMPT = (
|
| 1275 |
-
"You write marriage compatibility assessment questions that reveal four personality colors through forced choices:\n"
|
| 1276 |
-
"- blue=analytical, fact-based (positive: thorough, precise | negative: overly critical, data-obsessed)\n"
|
| 1277 |
-
"- green=organized, process-oriented (positive: systematic, reliable | negative: rigid, bureaucratic)\n"
|
| 1278 |
-
"- red=decisive, action-oriented (positive: direct, results-driven | negative: impulsive, controlling)\n"
|
| 1279 |
-
"- yellow=creative, big-picture (positive: innovative, visionary | negative: unrealistic, scattered)\n"
|
| 1280 |
-
"\n"
|
| 1281 |
-
"CRITICAL RULES:\n"
|
| 1282 |
-
"1) Each option MUST clearly represent one color's typical behavior (include both positive and negative aspects)\n"
|
| 1283 |
-
"2) Questions should force a choice that reveals CORE PERSONALITY TRAITS, not future plans\n"
|
| 1284 |
-
"3) Use simple, everyday language but maintain situational specificity\n"
|
| 1285 |
-
"4) Options should be practical actions someone would actually take (show both strengths and weaknesses)\n"
|
| 1286 |
-
"5) Focus on CURRENT CHARACTER ANALYSIS, not future hypotheticals\n"
|
| 1287 |
-
"6) Ensure each color option is distinctly different from others\n"
|
| 1288 |
-
"7) Each option MUST be maximum 15 words - keep them concise and clear\n"
|
| 1289 |
-
"8) For 20 total questions, distribute as:\n"
|
| 1290 |
-
" - 25% (5 questions): Profile-based (use user's background, education, job, family background)\n"
|
| 1291 |
-
" - 25% (5 questions): Expectation-based (use relationship expectations, values, lifestyle preferences)\n"
|
| 1292 |
-
" - 50% (10 questions): Character-based scenarios (current behavior in various situations)\n"
|
| 1293 |
-
"9) AVOID future-focused questions about children, family planning, or long-term hypotheticals\n"
|
| 1294 |
-
"10) Include some options that reveal potential negative/shadow aspects of each color\n"
|
| 1295 |
-
"\n"
|
| 1296 |
-
"EXPECTATION-BASED QUESTION SOURCES:\n"
|
| 1297 |
-
"- Conflict Approach: How user prefers to handle disagreements\n"
|
| 1298 |
-
"- Financial Style: Money management and financial values\n"
|
| 1299 |
-
"- Work-Life Balance: Career vs personal life priorities\n"
|
| 1300 |
-
"- Social Preferences: Introversion/extroversion levels\n"
|
| 1301 |
-
"- Core Values: Fundamental beliefs and principles\n"
|
| 1302 |
-
"- Family Values: Views on family structure and relationships\n"
|
| 1303 |
-
"- Ambition Level: Career and personal growth aspirations\n"
|
| 1304 |
-
"- Deal Breakers: Non-negotiable relationship requirements\n"
|
| 1305 |
-
"\n"
|
| 1306 |
-
"PROFILE-BASED QUESTION SOURCES:\n"
|
| 1307 |
-
"- Education Level: Academic background\n"
|
| 1308 |
-
"- Employment Status: Current profession/work situation\n"
|
| 1309 |
-
"- Hobbies/Interests: Leisure activities and passions\n"
|
| 1310 |
-
"- Family Background: Siblings, family type, upbringing\n"
|
| 1311 |
-
"- Current Location: Living situation and environment\n"
|
| 1312 |
-
"\n"
|
| 1313 |
-
"CHARACTER-BASED QUESTIONS:\n"
|
| 1314 |
-
"- CURRENT behavior scenarios that reveal core personality colors\n"
|
| 1315 |
-
"- Decision-making styles in everyday situations\n"
|
| 1316 |
-
"- Problem-solving approaches\n"
|
| 1317 |
-
"- Communication patterns\n"
|
| 1318 |
-
"- Stress response behaviors\n"
|
| 1319 |
-
)
|
| 1320 |
-
|
| 1321 |
-
USER_PROMPT_BATCH = (
|
| 1322 |
-
"Context (from Surrounded by Idiots or other corpus):\n{context}\n\n"
|
| 1323 |
-
"User Profile (Current Background):\n"
|
| 1324 |
-
"- Education: {education}\n"
|
| 1325 |
-
"- Employment: {employment}\n"
|
| 1326 |
-
"- Hobbies: {hobbies}\n"
|
| 1327 |
-
"- Family Background: {family_type}\n"
|
| 1328 |
-
"- Current Lifestyle: {current_lifestyle}\n"
|
| 1329 |
-
"\n"
|
| 1330 |
-
"User Relationship Expectations:\n"
|
| 1331 |
-
"- Conflict Style: {conflict_style}\n"
|
| 1332 |
-
"- Financial Style: {financial_style}\n"
|
| 1333 |
-
"- Income Expectations: {income_range}\n"
|
| 1334 |
-
"- Career Mobility: {relocation_willingness}\n"
|
| 1335 |
-
"- Family Values: {family_values}\n"
|
| 1336 |
-
"- Core Values: {core_values}\n"
|
| 1337 |
-
"- Work-Life Balance: {lifestyle_pref}\n"
|
| 1338 |
-
"- Social Preference: {social_pref}\n"
|
| 1339 |
-
"- Ambition Level: {ambition_pref}\n"
|
| 1340 |
-
"- Deal Breakers: {deal_breakers}\n"
|
| 1341 |
-
"\n"
|
| 1342 |
-
"Themes (array of short strings): {themes_json}\n"
|
| 1343 |
-
"Previously asked questions: {previous_questions}\n\n"
|
| 1344 |
-
"{format_instructions}\n\n"
|
| 1345 |
-
"Generate CHARACTER ANALYSIS questions with this distribution:\n"
|
| 1346 |
-
"- 25% PROFILE-BASED (5 questions): Use the user's CURRENT background above\n"
|
| 1347 |
-
"- 25% EXPECTATION-BASED (5 questions): Use relationship expectations and values above\n"
|
| 1348 |
-
"- 50% CHARACTER-BASED (10 questions): Current behavior in various life situations\n\n"
|
| 1349 |
-
"FOCUS ON CURRENT TRAITS, NOT FUTURE PLANS.\n"
|
| 1350 |
-
"AVOID questions about marriage plans, children, or long-term future.\n"
|
| 1351 |
-
"EACH OPTION MUST REVEAL CORE PERSONALITY COLORS (MAX 15 WORDS).\n"
|
| 1352 |
-
"INCLUDE SHADOW ASPECTS THAT SHOW POTENTIAL CHALLENGES.\n"
|
| 1353 |
-
"ALL QUESTIONS SHOULD HELP ANALYZE CURRENT CHARACTER FOR COMPATIBILITY MATCHING.\n"
|
| 1354 |
-
)
|
| 1355 |
-
|
| 1356 |
-
|
| 1357 |
-
|
| 1358 |
-
PARSER_BATCH = PydanticOutputParser(pydantic_object=BatchQA)
|
| 1359 |
-
|
| 1360 |
-
def build_batch_chain():
|
| 1361 |
-
llm = ChatOpenAI(
|
| 1362 |
-
model="gpt-4o-mini",
|
| 1363 |
-
temperature=0.7,
|
| 1364 |
-
max_retries=2,
|
| 1365 |
-
timeout=30,
|
| 1366 |
-
model_kwargs={"response_format": {"type": "json_object"}},
|
| 1367 |
-
)
|
| 1368 |
-
prompt = ChatPromptTemplate.from_messages([
|
| 1369 |
-
("system", SYSTEM_PROMPT),
|
| 1370 |
-
("user", USER_PROMPT_BATCH),
|
| 1371 |
-
])
|
| 1372 |
-
return prompt | llm | PARSER_BATCH
|
| 1373 |
-
|
| 1374 |
-
try:
|
| 1375 |
-
CHAIN_BATCH = build_batch_chain()
|
| 1376 |
-
except Exception as e:
|
| 1377 |
-
print("Failed to build CHAIN_BATCH:", e)
|
| 1378 |
-
CHAIN_BATCH = None
|
| 1379 |
-
|
| 1380 |
-
|
| 1381 |
-
|
| 1382 |
-
def ensure_valid_colors(options: List[Dict]) -> List[Dict]:
|
| 1383 |
-
seen, fixed = set(), []
|
| 1384 |
-
defaults = {
|
| 1385 |
-
"blue": "Verify facts and numbers",
|
| 1386 |
-
"green": "Outline a clear process",
|
| 1387 |
-
"red": "Coordinate people and act",
|
| 1388 |
-
"yellow": "Propose a fresh idea",
|
| 1389 |
-
}
|
| 1390 |
-
for o in options:
|
| 1391 |
-
c = str(o.get("color", "")).lower()
|
| 1392 |
-
t = str(o.get("text", "")).strip()
|
| 1393 |
-
if c in COLOR_KEYS and c not in seen and t:
|
| 1394 |
-
seen.add(c); fixed.append({"text": t[:80], "color": c})
|
| 1395 |
-
for c in COLOR_KEYS:
|
| 1396 |
-
if c not in seen:
|
| 1397 |
-
fixed.append({"text": defaults[c], "color": c})
|
| 1398 |
-
return fixed[:4]
|
| 1399 |
-
|
| 1400 |
-
# Expanded: include more profile fields so LLM can use them safely
|
| 1401 |
-
def summarize_profile(profile: Dict) -> Dict:
|
| 1402 |
-
out: Dict = {}
|
| 1403 |
-
|
| 1404 |
-
# name (optional)  if you prefer first name only, derive it here
|
| 1405 |
-
if isinstance(profile.get("full_name"), str) and profile["full_name"].strip():
|
| 1406 |
-
out["full_name"] = profile["full_name"].strip()
|
| 1407 |
-
|
| 1408 |
-
# safe, non-PII signals to expose
|
| 1409 |
-
keys_allowed = [
|
| 1410 |
-
"gender","current_city","marital_status","family_type","number_of_siblings",
|
| 1411 |
-
"education_level","employment_status","hobbies_interests",
|
| 1412 |
-
"conflict_approach","financial_style","income_range","relocation_willingness",
|
| 1413 |
-
"user_id"
|
| 1414 |
-
]
|
| 1415 |
-
for k in keys_allowed:
|
| 1416 |
-
v = profile.get(k)
|
| 1417 |
-
if v not in (None, "", []):
|
| 1418 |
-
out[k] = v
|
| 1419 |
-
return out
|
| 1420 |
-
|
| 1421 |
-
# Fallback uses name + scenario bank and lightly weaves profile signals
|
| 1422 |
-
# Fallback uses name + scenario bank and lightly weaves profile signals
|
| 1423 |
-
def offline_generate_batch(themes: List[str], state: Dict, context: str = "") -> List[Dict]:
|
| 1424 |
-
prof = state.get("profile", {}) or {}
|
| 1425 |
-
name = prof.get("full_name") or "Partner"
|
| 1426 |
-
conflict = (prof.get("conflict_approach") or "").lower()
|
| 1427 |
-
money = (prof.get("financial_style") or "").lower()
|
| 1428 |
-
hobby = None
|
| 1429 |
-
if isinstance(prof.get("hobbies_interests"), list) and prof["hobbies_interests"]:
|
| 1430 |
-
hobby = random.choice(prof["hobbies_interests"])
|
| 1431 |
-
|
| 1432 |
-
def flavor():
|
| 1433 |
-
bits = []
|
| 1434 |
-
if conflict:
|
| 1435 |
-
bits.append(f"{conflict} style")
|
| 1436 |
-
if money:
|
| 1437 |
-
bits.append(f"{money} finances")
|
| 1438 |
-
if hobby:
|
| 1439 |
-
bits.append(f"likes {hobby}")
|
| 1440 |
-
return ", ".join(bits)
|
| 1441 |
-
|
| 1442 |
-
items = []
|
| 1443 |
-
for theme in themes:
|
| 1444 |
-
short = theme.split(" around ")[-1].strip()
|
| 1445 |
-
|
| 1446 |
-
# Simple fallback question generation without PROMPT_SCENARIOS_BY_THEME
|
| 1447 |
-
tail = (", " + flavor()) if flavor() else ""
|
| 1448 |
-
q = f"{name}, what would you do about {short}?".strip()
|
| 1449 |
-
|
| 1450 |
-
# incorporate small bit from context if available (first 120 chars)
|
| 1451 |
-
if context:
|
| 1452 |
-
ctx_snip = context.replace('\n', ' ')[:120]
|
| 1453 |
-
q = f"{q} (Note: {ctx_snip})"
|
| 1454 |
-
|
| 1455 |
-
# Keep concise
|
| 1456 |
-
if len(q.split()) > 20:
|
| 1457 |
-
q = " ".join(q.split()[:20])
|
| 1458 |
-
|
| 1459 |
-
opts = [
|
| 1460 |
-
{"text": "Check data and facts", "color": "blue"},
|
| 1461 |
-
{"text": "Draft a step-by-step plan", "color": "green"},
|
| 1462 |
-
{"text": "Align people and act", "color": "red"},
|
| 1463 |
-
{"text": "Brainstorm bold ideas", "color": "yellow"},
|
| 1464 |
-
]
|
| 1465 |
-
random.shuffle(opts)
|
| 1466 |
-
items.append({"question": q, "options": opts, "source": "fallback"})
|
| 1467 |
-
return items
|
| 1468 |
-
|
| 1469 |
-
|
| 1470 |
-
def generate_batch_questions(themes: List[str], state: Dict, context: str = "", previous_questions: List[str] = None) -> List[Dict]:
|
| 1471 |
-
# Extract PROFILE-BASED data from Marriage table
|
| 1472 |
-
profile = state.get("profile", {})
|
| 1473 |
-
user_id = profile.get("user_id")
|
| 1474 |
-
|
| 1475 |
-
# Fetch EXPECTATION data from ExpectationResponse table
|
| 1476 |
-
expectation_data = fetch_expectation_data(user_id) if user_id else {}
|
| 1477 |
-
|
| 1478 |
-
# PROFILE-BASED COLUMNS (from Marriage table)
|
| 1479 |
-
education = profile.get("education_level", "Not specified")
|
| 1480 |
-
employment = profile.get("employment_status", "Not specified")
|
| 1481 |
-
hobbies = profile.get("hobbies_interests", "Not specified")
|
| 1482 |
-
family_type = profile.get("family_type", "Not specified")
|
| 1483 |
-
current_city = profile.get("current_city", "Not specified")
|
| 1484 |
-
marital_status = profile.get("marital_status", "Not specified")
|
| 1485 |
-
number_of_siblings = profile.get("number_of_siblings", "Not specified")
|
| 1486 |
-
|
| 1487 |
-
# EXPECTATION-BASED COLUMNS (from ExpectationResponse table)
|
| 1488 |
-
conflict_style = expectation_data.get("pref_conflict_approach", "Not specified")
|
| 1489 |
-
financial_style = expectation_data.get("pref_financial_style", "Not specified")
|
| 1490 |
-
income_range = expectation_data.get("pref_income_range", "Not specified")
|
| 1491 |
-
relocation_willingness = expectation_data.get("move_for_career", "Not specified")
|
| 1492 |
-
family_values = expectation_data.get("pref_family_type", "Not specified")
|
| 1493 |
-
core_values = expectation_data.get("pref_core_values", "Not specified")
|
| 1494 |
-
lifestyle_pref = expectation_data.get("work_life_pref", "Not specified")
|
| 1495 |
-
social_pref = expectation_data.get("social_pref", "Not specified")
|
| 1496 |
-
ambition_pref = expectation_data.get("ambition_pref", "Not specified")
|
| 1497 |
-
deal_breakers = expectation_data.get("deal_breakers", "Not specified")
|
| 1498 |
-
|
| 1499 |
-
# Calculate current lifestyle from PROFILE data
|
| 1500 |
-
current_lifestyle = f"Education: {education}, Employment: {employment}, Location: {current_city}, Hobbies: {hobbies}"
|
| 1501 |
-
|
| 1502 |
-
# Prepare expectation summary
|
| 1503 |
-
expectation_summary = f"""
|
| 1504 |
-
Relationship Expectations:
|
| 1505 |
-
- Conflict Style: {conflict_style}
|
| 1506 |
-
- Financial Style: {financial_style}
|
| 1507 |
-
- Income Range: {income_range}
|
| 1508 |
-
- Career Mobility: {relocation_willingness}
|
| 1509 |
-
- Family Values: {family_values}
|
| 1510 |
-
- Core Values: {core_values}
|
| 1511 |
-
- Work-Life Balance: {lifestyle_pref}
|
| 1512 |
-
- Social Preference: {social_pref}
|
| 1513 |
-
- Ambition Level: {ambition_pref}
|
| 1514 |
-
- Deal Breakers: {deal_breakers}
|
| 1515 |
-
"""
|
| 1516 |
-
|
| 1517 |
-
if CHAIN_BATCH is not None and PARSER_BATCH is not None:
|
| 1518 |
-
try:
|
| 1519 |
-
payload = {
|
| 1520 |
-
"state": json.dumps(state, ensure_ascii=False),
|
| 1521 |
-
"themes_json": json.dumps(themes, ensure_ascii=False),
|
| 1522 |
-
"previous_questions": json.dumps(previous_questions or [], ensure_ascii=False),
|
| 1523 |
-
"format_instructions": PARSER_BATCH.get_format_instructions(),
|
| 1524 |
-
"context": context[:4000],
|
| 1525 |
-
|
| 1526 |
-
# PROFILE-BASED parameters (25% questions)
|
| 1527 |
-
"education": education,
|
| 1528 |
-
"employment": employment,
|
| 1529 |
-
"hobbies": str(hobbies),
|
| 1530 |
-
"family_type": family_type,
|
| 1531 |
-
"current_lifestyle": current_lifestyle,
|
| 1532 |
-
"marital_status": marital_status,
|
| 1533 |
-
"number_of_siblings": number_of_siblings,
|
| 1534 |
-
|
| 1535 |
-
# EXPECTATION-BASED parameters (25% questions)
|
| 1536 |
-
"conflict_style": conflict_style,
|
| 1537 |
-
"financial_style": financial_style,
|
| 1538 |
-
"income_range": income_range,
|
| 1539 |
-
"relocation_willingness": relocation_willingness,
|
| 1540 |
-
"family_values": family_values,
|
| 1541 |
-
"core_values": core_values,
|
| 1542 |
-
"lifestyle_pref": lifestyle_pref,
|
| 1543 |
-
"social_pref": social_pref,
|
| 1544 |
-
"ambition_pref": ambition_pref,
|
| 1545 |
-
"deal_breakers": deal_breakers,
|
| 1546 |
-
"expectation_summary": expectation_summary
|
| 1547 |
-
}
|
| 1548 |
-
|
| 1549 |
-
result = CHAIN_BATCH.invoke(payload)
|
| 1550 |
-
|
| 1551 |
-
if hasattr(result, "items"):
|
| 1552 |
-
items_raw = result.items
|
| 1553 |
-
elif isinstance(result, dict) and "items" in result:
|
| 1554 |
-
items_raw = result["items"]
|
| 1555 |
-
else:
|
| 1556 |
-
items_raw = []
|
| 1557 |
-
|
| 1558 |
-
items: List[Dict] = []
|
| 1559 |
-
for qa in items_raw:
|
| 1560 |
-
out = qa.dict() if hasattr(qa, "dict") else dict(qa)
|
| 1561 |
-
out["options"] = ensure_valid_colors(out.get("options", []))
|
| 1562 |
-
out["source"] = "llm"
|
| 1563 |
-
random.shuffle(out["options"]) # reduce position bias
|
| 1564 |
-
items.append(out)
|
| 1565 |
-
|
| 1566 |
-
if items:
|
| 1567 |
-
return items
|
| 1568 |
-
except Exception as e:
|
| 1569 |
-
print("LLM batch generation failed:", e)
|
| 1570 |
-
|
| 1571 |
-
# Fallback generator (always returns items if themes not empty)
|
| 1572 |
-
return offline_generate_batch(themes, state, context=context)
|
| 1573 |
-
|
| 1574 |
-
|
| 1575 |
-
class SessionState:
|
| 1576 |
-
def __init__(self, n_questions: int, batch_size: int, domain: str = "general", role: Optional[str] = None, profile: Optional[Dict] = None):
|
| 1577 |
-
domain = (domain or role or "general").lower()
|
| 1578 |
-
self.domain = domain if domain in DOMAINS else "general"
|
| 1579 |
-
self.role = (role or self.domain)
|
| 1580 |
-
self.profile = profile or {}
|
| 1581 |
-
self.n_questions = max(1, min(n_questions, MAX_QUESTIONS))
|
| 1582 |
-
self.batch_size = max(1, batch_size)
|
| 1583 |
-
self.asked = 0
|
| 1584 |
-
self.color_counts = {c: 0 for c in COLOR_KEYS}
|
| 1585 |
-
self.history: List[Dict] = []
|
| 1586 |
-
self.queue: List[Dict] = []
|
| 1587 |
-
self.finished = False
|
| 1588 |
-
self.used_topics: List[str] = []
|
| 1589 |
-
self.history_of_questions: List[str] = [] # Add this line to track question texts
|
| 1590 |
-
|
| 1591 |
-
def to_min_state(self) -> Dict:
|
| 1592 |
-
total = sum(self.color_counts.values()) or 1
|
| 1593 |
-
mix_percentages = {k: round((v / total) * 100, 2) for k, v in self.color_counts.items()}
|
| 1594 |
-
dominant = max(self.color_counts, key=self.color_counts.get) if total else None
|
| 1595 |
-
return {
|
| 1596 |
-
"asked": self.asked,
|
| 1597 |
-
"dominant": dominant,
|
| 1598 |
-
"mix": mix_percentages,
|
| 1599 |
-
"domain": self.domain,
|
| 1600 |
-
"role": self.role,
|
| 1601 |
-
"profile": summarize_profile(self.profile),
|
| 1602 |
-
}
|
| 1603 |
-
|
| 1604 |
-
def remaining(self) -> int:
|
| 1605 |
-
return self.n_questions - self.asked
|
| 1606 |
-
|
| 1607 |
-
SESSIONS_FILE = os.getenv("PYMATCH_SESSIONS_FILE", "sessions.json")
|
| 1608 |
-
_sessions_lock = threading.Lock()
|
| 1609 |
-
SESSIONS: Dict[str, SessionState] = {}
|
| 1610 |
-
|
| 1611 |
-
def save_sessions():
|
| 1612 |
-
try:
|
| 1613 |
-
with _sessions_lock:
|
| 1614 |
-
serializable = {sid: s.__dict__ for sid, s in SESSIONS.items()}
|
| 1615 |
-
tmp = SESSIONS_FILE + ".tmp"
|
| 1616 |
-
with open(tmp, "w", encoding="utf-8") as f:
|
| 1617 |
-
json.dump(serializable, f, ensure_ascii=False, indent=2, default=str)
|
| 1618 |
-
os.replace(tmp, SESSIONS_FILE)
|
| 1619 |
-
except Exception as e:
|
| 1620 |
-
print("Failed to save sessions:", e)
|
| 1621 |
-
|
| 1622 |
-
|
| 1623 |
-
def persist_final_progress(user_id: Optional[str], role: str, mix: Dict[str, float]) -> bool:
|
| 1624 |
-
llm_id = str(uuid.uuid4())
|
| 1625 |
-
blue = float(mix.get("blue", 0.0))
|
| 1626 |
-
green = float(mix.get("green", 0.0))
|
| 1627 |
-
yellow = float(mix.get("yellow", 0.0))
|
| 1628 |
-
red = float(mix.get("red", 0.0))
|
| 1629 |
-
try:
|
| 1630 |
-
conn = get_db_connection()
|
| 1631 |
-
cur = conn.cursor()
|
| 1632 |
-
# Try with llm_id; if identity error, retry without it
|
| 1633 |
-
try:
|
| 1634 |
-
cur.execute(f"""
|
| 1635 |
-
INSERT INTO [dbo].[{PROGRESS_TBL}]
|
| 1636 |
-
([llm_id],[user_id],[role],[blue],[green],[yellow],[red],[created_at])
|
| 1637 |
-
VALUES (?,?,?,?,?,?,?,SYSUTCDATETIME())
|
| 1638 |
-
""", (llm_id, str(user_id) if user_id is not None else None, role, blue, green, yellow, red))
|
| 1639 |
-
conn.commit()
|
| 1640 |
-
return True
|
| 1641 |
-
except pyodbc.Error as e:
|
| 1642 |
-
if "IDENTITY_INSERT" in str(e) or "(544)" in str(e):
|
| 1643 |
-
cur.execute(f"""
|
| 1644 |
-
INSERT INTO [dbo].[{PROGRESS_TBL}]
|
| 1645 |
-
([user_id],[role],[blue],[green],[yellow],[red],[created_at])
|
| 1646 |
-
VALUES (?,?,?,?,?,?,SYSUTCDATETIME())
|
| 1647 |
-
""", (str(user_id) if user_id is not None else None, role, blue, green, yellow, red))
|
| 1648 |
-
conn.commit()
|
| 1649 |
-
return True
|
| 1650 |
-
else:
|
| 1651 |
-
print("Persist failed:", e)
|
| 1652 |
-
return False
|
| 1653 |
-
except Exception as ex:
|
| 1654 |
-
print("Persist final progress failed:", ex)
|
| 1655 |
-
return False
|
| 1656 |
-
finally:
|
| 1657 |
-
try: conn.close()
|
| 1658 |
-
except: pass
|
| 1659 |
-
|
| 1660 |
-
# -------------------------
|
| 1661 |
-
# Profile fetch by role/id
|
| 1662 |
-
# -------------------------
|
| 1663 |
-
def fetch_profile_for_role(user_id: str, role: str) -> Dict:
|
| 1664 |
-
table = {
|
| 1665 |
-
"marriage": "Marriage",
|
| 1666 |
-
"interview": "Interview",
|
| 1667 |
-
"partnership": "Partnership"
|
| 1668 |
-
}.get(role.lower())
|
| 1669 |
-
|
| 1670 |
-
if not table:
|
| 1671 |
-
return {}
|
| 1672 |
-
|
| 1673 |
-
try:
|
| 1674 |
-
conn = get_db_connection()
|
| 1675 |
-
cur = conn.cursor()
|
| 1676 |
-
cur.execute(f"""
|
| 1677 |
-
SELECT TOP 1 *
|
| 1678 |
-
FROM {table}
|
| 1679 |
-
WHERE user_id = ?
|
| 1680 |
-
ORDER BY created_at DESC
|
| 1681 |
-
""", (user_id,))
|
| 1682 |
-
row = cur.fetchone()
|
| 1683 |
-
if row is None:
|
| 1684 |
-
return {}
|
| 1685 |
-
prof = row_to_dict(cur, row)
|
| 1686 |
-
# Normalize hobbies_interests if it exists
|
| 1687 |
-
if "hobbies_interests" in prof and isinstance(prof["hobbies_interests"], str):
|
| 1688 |
-
if prof["hobbies_interests"].strip().startswith("["):
|
| 1689 |
-
try:
|
| 1690 |
-
prof["hobbies_interests"] = json.loads(prof["hobbies_interests"])
|
| 1691 |
-
except Exception:
|
| 1692 |
-
prof["hobbies_interests"] = [s.strip() for s in prof["hobbies_interests"].split(",") if s.strip()]
|
| 1693 |
-
else:
|
| 1694 |
-
prof["hobbies_interests"] = [s.strip() for s in prof["hobbies_interests"].split(",") if s.strip()]
|
| 1695 |
-
prof["user_id"] = str(user_id)
|
| 1696 |
-
return prof
|
| 1697 |
-
except pyodbc.Error as e:
|
| 1698 |
-
print("Profile fetch error:", e)
|
| 1699 |
-
return {}
|
| 1700 |
-
finally:
|
| 1701 |
-
try: conn.close()
|
| 1702 |
-
except: pass
|
| 1703 |
-
|
| 1704 |
-
def fetch_expectation_data(user_id: str) -> Dict:
|
| 1705 |
-
"""Fetch expectation data from ExpectationResponse table"""
|
| 1706 |
-
try:
|
| 1707 |
-
conn = get_db_connection()
|
| 1708 |
-
cur = conn.cursor()
|
| 1709 |
-
cur.execute("""
|
| 1710 |
-
SELECT * FROM ExpectationResponse
|
| 1711 |
-
WHERE user_id = ?
|
| 1712 |
-
ORDER BY created_at DESC
|
| 1713 |
-
""", (user_id,))
|
| 1714 |
-
row = cur.fetchone()
|
| 1715 |
-
if row is None:
|
| 1716 |
-
return {}
|
| 1717 |
-
return row_to_dict(cur, row)
|
| 1718 |
-
except Exception as e:
|
| 1719 |
-
print(f"Error fetching expectation data: {e}")
|
| 1720 |
-
return {}
|
| 1721 |
-
finally:
|
| 1722 |
-
try: conn.close()
|
| 1723 |
-
except: pass
|
| 1724 |
-
# -------------------
|
| 1725 |
-
# Theme chooser
|
| 1726 |
-
# -------------------
|
| 1727 |
-
def choose_themes(sess: SessionState, k: int) -> List[str]:
|
| 1728 |
-
"""
|
| 1729 |
-
Instead of generic topic banks, use FAISS to retrieve text chunks from the document.
|
| 1730 |
-
"""
|
| 1731 |
-
if HAS_FAISS and FAISS_INDEX is not None and TEXT_CHUNKS:
|
| 1732 |
-
# Just grab k random chunks from the indexed document
|
| 1733 |
-
selected = random.sample(TEXT_CHUNKS, min(k, len(TEXT_CHUNKS)))
|
| 1734 |
-
# Wrap them as "themes" but really they're just context
|
| 1735 |
-
return selected
|
| 1736 |
-
else:
|
| 1737 |
-
# fallback: use generic themes
|
| 1738 |
-
fallback_themes = [
|
| 1739 |
-
"communication style", "conflict resolution", "decision making",
|
| 1740 |
-
"problem solving", "team collaboration", "personal values",
|
| 1741 |
-
"work habits", "social interaction", "stress management",
|
| 1742 |
-
"goal setting", "time management", "relationship dynamics"
|
| 1743 |
-
]
|
| 1744 |
-
return random.sample(fallback_themes, min(k, len(fallback_themes)))
|
| 1745 |
-
|
| 1746 |
-
|
| 1747 |
-
|
| 1748 |
-
# ---------------
|
| 1749 |
-
# Health / Home
|
| 1750 |
-
# ---------------
|
| 1751 |
-
@app.get("/health")
|
| 1752 |
-
def health():
|
| 1753 |
-
return {
|
| 1754 |
-
"status": "ok",
|
| 1755 |
-
"llm": ("openai" if CHAIN_BATCH is not None else "offline-fallback"),
|
| 1756 |
-
"has_openai_key": bool(os.getenv("OPENAI_API_KEY")),
|
| 1757 |
-
"db": {"server": SQL_SERVER, "database": SQL_DB, "table": PROGRESS_TBL},
|
| 1758 |
-
"faiss_loaded": bool(FAISS_INDEX is not None and HAS_FAISS), # Added HAS_FAISS check
|
| 1759 |
-
"faiss_chunks": len(TEXT_CHUNKS),
|
| 1760 |
-
"matching_llm_available": llm_chain is not None,
|
| 1761 |
-
"knowledge_base_loaded": knowledge is not None and hasattr(knowledge, 'indices') and len(knowledge.indices) > 0,
|
| 1762 |
-
"faiss_available": HAS_FAISS # Use the global flag
|
| 1763 |
-
}
|
| 1764 |
-
|
| 1765 |
-
|
| 1766 |
-
|
| 1767 |
-
@app.get("/")
|
| 1768 |
-
def home():
|
| 1769 |
-
return {
|
| 1770 |
-
"message": "Unified Py-Match Service (FAISS-enabled)",
|
| 1771 |
-
"try": [
|
| 1772 |
-
"POST /api/signup",
|
| 1773 |
-
"POST /api/questions/select-role",
|
| 1774 |
-
"GET /api/questions/<role>",
|
| 1775 |
-
"POST /api/questions/submit-answers/<role>",
|
| 1776 |
-
"POST /llm/start (body: { user_id, role, n_questions, batch_size })",
|
| 1777 |
-
"POST /llm/next (body: { session_id, selected_color })",
|
| 1778 |
-
"GET /api/match/<user_id> (query: ?role=<role>&limit=<num>)",
|
| 1779 |
-
]
|
| 1780 |
-
}
|
| 1781 |
-
|
| 1782 |
-
# -------------------------
|
| 1783 |
-
# LLM Session: start / next
|
| 1784 |
-
# ----------------------
|
| 1785 |
-
|
| 1786 |
-
|
| 1787 |
-
@app.post("/llm/start")
|
| 1788 |
-
def llm_start():
|
| 1789 |
-
data = request.get_json(force=True) or {}
|
| 1790 |
-
user_id = str(data.get("user_id") or "").strip()
|
| 1791 |
-
role_in = (data.get("role") or "general").lower()
|
| 1792 |
-
n_req = int(data.get("n_questions", DEFAULT_N_QUESTIONS))
|
| 1793 |
-
b_req = int(data.get("batch_size", DEFAULT_BATCH_SIZE))
|
| 1794 |
-
|
| 1795 |
-
if not user_id:
|
| 1796 |
-
return jsonify({"error": "user_id is required"}), 400
|
| 1797 |
-
if role_in not in DOMAINS:
|
| 1798 |
-
return jsonify({"error": f"Invalid role. Allowed: {', '.join(DOMAINS)}"}), 400
|
| 1799 |
-
|
| 1800 |
-
# Fetch profile from the correct table based on role
|
| 1801 |
-
profile = fetch_profile_for_role(user_id, role_in)
|
| 1802 |
-
|
| 1803 |
-
# Create session
|
| 1804 |
-
sid = str(uuid.uuid4())
|
| 1805 |
-
sess = SessionState(
|
| 1806 |
-
n_questions=n_req,
|
| 1807 |
-
batch_size=b_req,
|
| 1808 |
-
domain=role_in,
|
| 1809 |
-
role=role_in,
|
| 1810 |
-
profile=profile
|
| 1811 |
-
)
|
| 1812 |
-
SESSIONS[sid] = sess
|
| 1813 |
-
|
| 1814 |
-
# Generate first batch of questions from FAISS chunks
|
| 1815 |
-
to_generate = min(sess.batch_size, sess.remaining())
|
| 1816 |
-
themes = choose_themes(sess, to_generate)
|
| 1817 |
-
|
| 1818 |
-
context = ""
|
| 1819 |
-
if HAS_FAISS and FAISS_INDEX is not None and TEXT_CHUNKS:
|
| 1820 |
-
context = "\n".join(random.sample(TEXT_CHUNKS, min(3, len(TEXT_CHUNKS))))
|
| 1821 |
-
|
| 1822 |
-
# INSERT THIS LINE (replace the existing queue generation line):
|
| 1823 |
-
queue = generate_batch_questions(themes, sess.to_min_state(), context=context, previous_questions=sess.history_of_questions)
|
| 1824 |
-
|
| 1825 |
-
if not queue:
|
| 1826 |
-
return jsonify({"error": "Question generation failed"}), 500
|
| 1827 |
-
|
| 1828 |
-
sess.queue = queue
|
| 1829 |
-
|
| 1830 |
-
# Serve first question
|
| 1831 |
-
first = sess.queue.pop(0)
|
| 1832 |
-
sess.asked += 1
|
| 1833 |
-
|
| 1834 |
-
# INSERT THIS LINE (to track the asked question):
|
| 1835 |
-
sess.history_of_questions.append(first["question"])
|
| 1836 |
-
|
| 1837 |
-
save_sessions()
|
| 1838 |
-
|
| 1839 |
-
return jsonify({
|
| 1840 |
-
"session_id": sid,
|
| 1841 |
-
"index": 1,
|
| 1842 |
-
"total": sess.n_questions,
|
| 1843 |
-
"question": first["question"],
|
| 1844 |
-
"options": first["options"],
|
| 1845 |
-
"source": first.get("source", "unknown"),
|
| 1846 |
-
"role": sess.role,
|
| 1847 |
-
"profile_used": bool(profile),
|
| 1848 |
-
"faiss_themes": themes,
|
| 1849 |
-
"faiss_context": context
|
| 1850 |
-
})
|
| 1851 |
-
|
| 1852 |
-
|
| 1853 |
-
|
| 1854 |
-
@app.post("/llm/next")
|
| 1855 |
-
def llm_next():
|
| 1856 |
-
data = request.get_json(force=True) or {}
|
| 1857 |
-
sid = data.get("session_id")
|
| 1858 |
-
color = str(data.get("selected_color") or "").lower()
|
| 1859 |
-
|
| 1860 |
-
if not sid or sid not in SESSIONS:
|
| 1861 |
-
return jsonify({"error": "Invalid or missing session_id"}), 400
|
| 1862 |
-
if color not in COLOR_KEYS:
|
| 1863 |
-
return jsonify({"error": "selected_color must be blue|green|red|yellow"}), 400
|
| 1864 |
-
|
| 1865 |
-
sess = SESSIONS[sid]
|
| 1866 |
-
if sess.finished:
|
| 1867 |
-
return jsonify({"done": True, "message": "Session already finished."})
|
| 1868 |
-
|
| 1869 |
-
# record answer
|
| 1870 |
-
sess.color_counts[color] += 1
|
| 1871 |
-
sess.history.append({"selected_color": color})
|
| 1872 |
-
|
| 1873 |
-
# Initialize themes and context with default values
|
| 1874 |
-
themes = []
|
| 1875 |
-
context = ""
|
| 1876 |
-
|
| 1877 |
-
# finished?
|
| 1878 |
-
if sess.asked >= sess.n_questions:
|
| 1879 |
-
sess.finished = True
|
| 1880 |
-
mix = sess.to_min_state()["mix"]
|
| 1881 |
-
user_id = (sess.profile or {}).get("user_id")
|
| 1882 |
-
db_ok = persist_final_progress(user_id=user_id, role=sess.role, mix=mix)
|
| 1883 |
-
save_sessions()
|
| 1884 |
-
return jsonify({
|
| 1885 |
-
"done": True,
|
| 1886 |
-
"message": "No more questions.",
|
| 1887 |
-
"mix": mix,
|
| 1888 |
-
"db_write": "ok" if db_ok else "failed"
|
| 1889 |
-
})
|
| 1890 |
-
|
| 1891 |
-
# ensure queue; refill if needed
|
| 1892 |
-
if not sess.queue:
|
| 1893 |
-
to_generate = min(sess.batch_size, sess.remaining())
|
| 1894 |
-
themes = choose_themes(sess, to_generate)
|
| 1895 |
-
|
| 1896 |
-
if HAS_FAISS and FAISS_INDEX is not None and TEXT_CHUNKS:
|
| 1897 |
-
context = "\n".join(random.sample(TEXT_CHUNKS, min(3, len(TEXT_CHUNKS))))
|
| 1898 |
-
|
| 1899 |
-
# INSERT THIS LINE (replace the existing queue generation line):
|
| 1900 |
-
sess.queue = generate_batch_questions(themes, sess.to_min_state(), context=context, previous_questions=sess.history_of_questions)
|
| 1901 |
-
|
| 1902 |
-
if not sess.queue:
|
| 1903 |
-
return jsonify({"error": "Question generation failed"}), 500
|
| 1904 |
-
|
| 1905 |
-
nxt = sess.queue.pop(0)
|
| 1906 |
-
sess.asked += 1
|
| 1907 |
-
|
| 1908 |
-
# INSERT THIS LINE (to track the asked question):
|
| 1909 |
-
sess.history_of_questions.append(nxt["question"])
|
| 1910 |
-
|
| 1911 |
-
save_sessions()
|
| 1912 |
-
|
| 1913 |
-
return jsonify({
|
| 1914 |
-
"session_id": sid,
|
| 1915 |
-
"index": sess.asked,
|
| 1916 |
-
"total": sess.n_questions,
|
| 1917 |
-
"question": nxt["question"],
|
| 1918 |
-
"options": nxt["options"],
|
| 1919 |
-
"progress": sess.to_min_state()["mix"],
|
| 1920 |
-
"source": nxt.get("source", "unknown"),
|
| 1921 |
-
"role": sess.role,
|
| 1922 |
-
"faiss_themes": themes,
|
| 1923 |
-
"faiss_context": context
|
| 1924 |
-
})
|
| 1925 |
-
|
| 1926 |
-
|
| 1927 |
-
# =================================================================
|
| 1928 |
-
# Routes (from matching.py)
|
| 1929 |
-
# =================================================================
|
| 1930 |
-
@app.get("/api/marriage-profile/<int:user_id>")
|
| 1931 |
-
def get_marriage_profile(user_id: int):
|
| 1932 |
-
"""Get marriage profile by user_id"""
|
| 1933 |
-
try:
|
| 1934 |
-
conn = get_db_connection()
|
| 1935 |
-
cur = conn.cursor()
|
| 1936 |
-
|
| 1937 |
-
cur.execute("""
|
| 1938 |
-
SELECT * FROM Marriage
|
| 1939 |
-
WHERE user_id = ?
|
| 1940 |
-
ORDER BY created_at DESC
|
| 1941 |
-
""", (user_id,))
|
| 1942 |
-
|
| 1943 |
-
row = cur.fetchone()
|
| 1944 |
-
if row is None:
|
| 1945 |
-
return jsonify({"error": "Marriage profile not found"}), 404
|
| 1946 |
-
|
| 1947 |
-
# Convert row to dict
|
| 1948 |
-
profile = row_to_dict(cur, row)
|
| 1949 |
-
|
| 1950 |
-
return jsonify(profile), 200
|
| 1951 |
-
|
| 1952 |
-
except Exception as e:
|
| 1953 |
-
print(f"Error fetching marriage profile: {e}")
|
| 1954 |
-
return jsonify({"error": str(e)}), 500
|
| 1955 |
-
finally:
|
| 1956 |
-
try:
|
| 1957 |
-
conn.close()
|
| 1958 |
-
except:
|
| 1959 |
-
pass
|
| 1960 |
-
|
| 1961 |
-
|
| 1962 |
-
@app.get("/api/match/<int:user_id>")
|
| 1963 |
-
def api_match(user_id: int):
|
| 1964 |
-
with app.app_context():
|
| 1965 |
-
role = request.args.get("role", None)
|
| 1966 |
-
limit = int(request.args.get("limit", "10"))
|
| 1967 |
-
exclude_self = request.args.get("exclude_self", "yes").lower() == "yes"
|
| 1968 |
-
|
| 1969 |
-
src, top = _compute_matches(user_id, role, limit, exclude_self)
|
| 1970 |
-
if src is None:
|
| 1971 |
-
return jsonify({"error": f"No data found for user_id={user_id}"}), 404
|
| 1972 |
-
|
| 1973 |
-
# Ensure the name field is included in the input_user object
|
| 1974 |
-
user = Users.query.filter_by(user_id=user_id).first()
|
| 1975 |
-
input_user = {
|
| 1976 |
-
"user_id": src.user_id,
|
| 1977 |
-
"role": src.role,
|
| 1978 |
-
"blue": src.blue,
|
| 1979 |
-
"green": src.green,
|
| 1980 |
-
"yellow": src.yellow,
|
| 1981 |
-
"red": src.red,
|
| 1982 |
-
"created_at": src.created_at.isoformat() if src.created_at else None,
|
| 1983 |
-
"name": user.name if user else "Unknown" # Add name field
|
| 1984 |
-
}
|
| 1985 |
-
|
| 1986 |
-
return jsonify({
|
| 1987 |
-
"input_user": input_user,
|
| 1988 |
-
"matches": top,
|
| 1989 |
-
"count": len(top),
|
| 1990 |
-
})
|
| 1991 |
-
|
| 1992 |
-
@app.get("/api/match")
|
| 1993 |
-
def compat_match():
|
| 1994 |
-
with app.app_context():
|
| 1995 |
-
try:
|
| 1996 |
-
user_id = int(request.args.get("user_id", ""))
|
| 1997 |
-
except ValueError:
|
| 1998 |
-
return jsonify({"error": "Missing or invalid user_id"}), 400
|
| 1999 |
-
|
| 2000 |
-
role = request.args.get("role", None)
|
| 2001 |
-
limit = int(request.args.get("limit", "10"))
|
| 2002 |
-
exclude_self = request.args.get("exclude_self", "yes").lower() == "yes"
|
| 2003 |
-
|
| 2004 |
-
src, top = _compute_matches(user_id, role, limit, exclude_self)
|
| 2005 |
-
if src is None:
|
| 2006 |
-
return jsonify({"error": f"No data found for user_id={user_id}"}), 404
|
| 2007 |
-
|
| 2008 |
-
return jsonify({
|
| 2009 |
-
"input_user": {
|
| 2010 |
-
"user_id": src.user_id,
|
| 2011 |
-
"role": src.role,
|
| 2012 |
-
"blue": src.blue,
|
| 2013 |
-
"green": src.green,
|
| 2014 |
-
"yellow": src.yellow,
|
| 2015 |
-
"red": src.red,
|
| 2016 |
-
"created_at": src.created_at.isoformat() if src.created_at else None,
|
| 2017 |
-
},
|
| 2018 |
-
"matches": top,
|
| 2019 |
-
"count": len(top),
|
| 2020 |
-
})
|
| 2021 |
-
|
| 2022 |
-
|
| 2023 |
-
# =========
|
| 2024 |
-
# Run app
|
| 2025 |
-
# =========
|
| 2026 |
-
if __name__ == "__main__":
|
| 2027 |
-
# If you want to force-load faiss on startup, set FAISS_INDEX_PATH env var
|
| 2028 |
-
if HAS_FAISS and FAISS_INDEX is None and os.path.exists(FAISS_INDEX_PATH):
|
| 2029 |
-
load_faiss_index(FAISS_INDEX_PATH)
|
| 2030 |
-
|
| 2031 |
-
# Initialize the knowledge base for matching.py functionality
|
| 2032 |
-
knowledge = KnowledgeSource()
|
| 2033 |
-
|
| 2034 |
-
with app.app_context():
|
| 2035 |
-
print(f"LLM Chain Available: {llm_chain is not None}")
|
| 2036 |
-
print(f"Knowledge Base Loaded: {len(knowledge.indices)} indices")
|
| 2037 |
-
print(f"FAISS Available: {faiss is not None}")
|
| 2038 |
-
|
| 2039 |
-
app.run(host="0.0.0.0", port=5000, debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
surrounded_by_idiots.index
CHANGED
|
Binary files a/surrounded_by_idiots.index and b/surrounded_by_idiots.index differ
|
|
|