Update app.py
Browse files
app.py
CHANGED
|
@@ -6,6 +6,7 @@ import base64
|
|
| 6 |
import os
|
| 7 |
import logging
|
| 8 |
import traceback
|
|
|
|
| 9 |
from datetime import datetime
|
| 10 |
from simple_salesforce import Salesforce
|
| 11 |
|
|
@@ -21,6 +22,10 @@ SF_PASSWORD = "Internal@1"
|
|
| 21 |
SF_SECURITY_TOKEN = "NbUKcTx45azba5HEdntE9YAh"
|
| 22 |
SF_DOMAIN = "login"
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
# Initialize Salesforce connection
|
| 25 |
try:
|
| 26 |
sf = Salesforce(
|
|
@@ -50,37 +55,71 @@ class VendorLog(BaseModel):
|
|
| 50 |
# Store vendor logs for display
|
| 51 |
vendor_logs = []
|
| 52 |
|
| 53 |
-
def
|
|
|
|
| 54 |
try:
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
#
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
-
#
|
| 73 |
-
|
|
|
|
| 74 |
|
| 75 |
-
return
|
| 76 |
-
'qualityScore': round(quality_score, 2),
|
| 77 |
-
'timelinessScore': round(timeliness_score, 2),
|
| 78 |
-
'safetyScore': round(safety_score, 2),
|
| 79 |
-
'communicationScore': round(communication_score, 2),
|
| 80 |
-
'finalScore': round(final_score, 2)
|
| 81 |
-
}
|
| 82 |
except Exception as e:
|
| 83 |
-
logger.error(f"Error calculating scores: {str(e)}")
|
| 84 |
raise
|
| 85 |
|
| 86 |
def get_feedback(score: float, metric: str) -> str:
|
|
@@ -167,8 +206,8 @@ def score_vendor():
|
|
| 167 |
if not sf.session_id:
|
| 168 |
return jsonify({'error': 'Salesforce session invalid'}), 401
|
| 169 |
|
| 170 |
-
# Calculate scores
|
| 171 |
-
scores =
|
| 172 |
|
| 173 |
# Generate PDF
|
| 174 |
pdf_content = generate_pdf(log.vendorId, log.vendorLogName, scores)
|
|
@@ -177,7 +216,7 @@ def score_vendor():
|
|
| 177 |
# Determine alert flag
|
| 178 |
alert_flag = determine_alert_flag(scores['finalScore'], vendor_logs)
|
| 179 |
|
| 180 |
-
# Store the log and scores
|
| 181 |
vendor_logs.append({
|
| 182 |
'vendorLogId': log.vendorLogId,
|
| 183 |
'vendorId': log.vendorId,
|
|
@@ -193,19 +232,18 @@ def score_vendor():
|
|
| 193 |
'extracted': True
|
| 194 |
})
|
| 195 |
|
| 196 |
-
#
|
| 197 |
try:
|
| 198 |
-
sf.
|
| 199 |
-
'
|
| 200 |
-
'
|
| 201 |
-
'Vendor_Log_Name__c': log.vendorLogName,
|
| 202 |
'Quality_Score__c': scores['qualityScore'],
|
| 203 |
'Timeliness_Score__c': scores['timelinessScore'],
|
| 204 |
'Safety_Score__c': scores['safetyScore'],
|
| 205 |
'Communication_Score__c': scores['communicationScore'],
|
| 206 |
-
'Final_Score__c': scores['finalScore'],
|
| 207 |
-
'
|
| 208 |
-
'
|
| 209 |
})
|
| 210 |
logger.info(f"Successfully saved scores to Salesforce for Vendor Log: {log.vendorLogId}")
|
| 211 |
except Exception as e:
|
|
@@ -232,6 +270,30 @@ def score_vendor():
|
|
| 232 |
@app.route('/', methods=['GET'])
|
| 233 |
def get_dashboard():
|
| 234 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
# Calculate summary metrics
|
| 236 |
total_vendors = len(vendor_logs)
|
| 237 |
performance_alerts = sum(1 for log in vendor_logs if determine_alert_flag(log['scores']['finalScore'], vendor_logs))
|
|
@@ -283,5 +345,14 @@ def get_dashboard():
|
|
| 283 |
logger.error(f"Error in / endpoint: {str(e)}\nStack trace:\n{error_trace}")
|
| 284 |
return jsonify({'error': f"Error generating dashboard: {str(e)}"}), 500
|
| 285 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
if __name__ == "__main__":
|
| 287 |
app.run(host="0.0.0.0", port=7860, debug=True)
|
|
|
|
| 6 |
import os
|
| 7 |
import logging
|
| 8 |
import traceback
|
| 9 |
+
import requests
|
| 10 |
from datetime import datetime
|
| 11 |
from simple_salesforce import Salesforce
|
| 12 |
|
|
|
|
| 22 |
SF_SECURITY_TOKEN = "NbUKcTx45azba5HEdntE9YAh"
|
| 23 |
SF_DOMAIN = "login"
|
| 24 |
|
| 25 |
+
# Hugging Face API details (mocked for now)
|
| 26 |
+
HUGGING_FACE_API_URL = "https://api-inference.huggingface.co/models/your-model-name"
|
| 27 |
+
HUGGING_FACE_API_TOKEN = "your-hugging-face-api-token"
|
| 28 |
+
|
| 29 |
# Initialize Salesforce connection
|
| 30 |
try:
|
| 31 |
sf = Salesforce(
|
|
|
|
| 55 |
# Store vendor logs for display
|
| 56 |
vendor_logs = []
|
| 57 |
|
| 58 |
+
def fetch_vendor_logs_from_salesforce():
|
| 59 |
+
"""Fetch vendor logs from Salesforce to send to Hugging Face."""
|
| 60 |
try:
|
| 61 |
+
query = """
|
| 62 |
+
SELECT Id, Vendor_Log_Id__c, Vendor_Id__c, Work_Details__c, Quality_Report__c,
|
| 63 |
+
Incident_Log__c, Work_Completion_Date__c, Actual_Completion_Date__c,
|
| 64 |
+
Vendor_Log_Name__c, Delay_Days__c, Project__c
|
| 65 |
+
FROM Vendor_Log__c
|
| 66 |
+
WHERE CreatedDate = THIS_MONTH
|
| 67 |
+
"""
|
| 68 |
+
result = sf.query(query)
|
| 69 |
+
logs = []
|
| 70 |
+
for record in result['records']:
|
| 71 |
+
log = VendorLog(
|
| 72 |
+
vendorLogId=record['Vendor_Log_Id__c'],
|
| 73 |
+
vendorId=record['Vendor_Id__c'],
|
| 74 |
+
workDetails=record['Work_Details__c'],
|
| 75 |
+
qualityReport=record['Quality_Report__c'],
|
| 76 |
+
incidentLog=record['Incident_Log__c'],
|
| 77 |
+
workCompletionDate=record['Work_Completion_Date__c'],
|
| 78 |
+
actualCompletionDate=record['Actual_Completion_Date__c'],
|
| 79 |
+
vendorLogName=record['Vendor_Log_Name__c'],
|
| 80 |
+
delayDays=record['Delay_Days__c'],
|
| 81 |
+
project=record['Project__c']
|
| 82 |
+
)
|
| 83 |
+
logs.append(log)
|
| 84 |
+
return logs
|
| 85 |
+
except Exception as e:
|
| 86 |
+
logger.error(f"Error fetching vendor logs from Salesforce: {str(e)}")
|
| 87 |
+
raise
|
| 88 |
|
| 89 |
+
def calculate_scores_with_hugging_face(log: VendorLog):
|
| 90 |
+
"""Send data to Hugging Face and get scores (mocked for now)."""
|
| 91 |
+
try:
|
| 92 |
+
# Prepare data for Hugging Face
|
| 93 |
+
payload = {
|
| 94 |
+
"workDetails": log.workDetails,
|
| 95 |
+
"qualityReport": log.qualityReport,
|
| 96 |
+
"incidentLog": log.incidentLog,
|
| 97 |
+
"delayDays": log.delayDays
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
# Mocked Hugging Face API call (replace with actual API call)
|
| 101 |
+
# headers = {"Authorization": f"Bearer {HUGGING_FACE_API_TOKEN}"}
|
| 102 |
+
# response = requests.post(HUGGING_FACE_API_URL, json=payload, headers=headers)
|
| 103 |
+
# scores = response.json()
|
| 104 |
|
| 105 |
+
# Mocked response for now
|
| 106 |
+
scores = {
|
| 107 |
+
'qualityScore': float(log.qualityReport.replace('% quality', '')),
|
| 108 |
+
'timelinessScore': 100.0 if log.delayDays <= 0 else 80.0 if log.delayDays <= 3 else 60.0 if log.delayDays <= 7 else 40.0,
|
| 109 |
+
'safetyScore': {'None': 100.0, 'Low': 80.0, 'Minor': 80.0, 'Medium': 50.0, 'High': 20.0}.get(log.incidentLog, 100.0),
|
| 110 |
+
'communicationScore': 0.0, # Will be calculated below
|
| 111 |
+
'finalScore': 0.0 # Will be calculated below
|
| 112 |
+
}
|
| 113 |
+
scores['communicationScore'] = (scores['qualityScore'] * 0.33 + scores['timelinessScore'] * 0.33 + scores['safetyScore'] * 0.33)
|
| 114 |
+
scores['finalScore'] = (scores['qualityScore'] + scores['timelinessScore'] + scores['safetyScore'] + scores['communicationScore']) / 4
|
| 115 |
|
| 116 |
+
# Round scores
|
| 117 |
+
for key in scores:
|
| 118 |
+
scores[key] = round(scores[key], 2)
|
| 119 |
|
| 120 |
+
return scores
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
except Exception as e:
|
| 122 |
+
logger.error(f"Error calculating scores with Hugging Face: {str(e)}")
|
| 123 |
raise
|
| 124 |
|
| 125 |
def get_feedback(score: float, metric: str) -> str:
|
|
|
|
| 206 |
if not sf.session_id:
|
| 207 |
return jsonify({'error': 'Salesforce session invalid'}), 401
|
| 208 |
|
| 209 |
+
# Calculate scores using Hugging Face
|
| 210 |
+
scores = calculate_scores_with_hugging_face(log)
|
| 211 |
|
| 212 |
# Generate PDF
|
| 213 |
pdf_content = generate_pdf(log.vendorId, log.vendorLogName, scores)
|
|
|
|
| 216 |
# Determine alert flag
|
| 217 |
alert_flag = determine_alert_flag(scores['finalScore'], vendor_logs)
|
| 218 |
|
| 219 |
+
# Store the log and scores in memory
|
| 220 |
vendor_logs.append({
|
| 221 |
'vendorLogId': log.vendorLogId,
|
| 222 |
'vendorId': log.vendorId,
|
|
|
|
| 232 |
'extracted': True
|
| 233 |
})
|
| 234 |
|
| 235 |
+
# Save scores and PDF to Salesforce (Subcontractor_Performance_Score__c object)
|
| 236 |
try:
|
| 237 |
+
sf.Subcontractor_Performance_Score__c.create({
|
| 238 |
+
'Vendor_ID__c': log.vendorId,
|
| 239 |
+
'Month__c': datetime.now().strftime('%Y-%m-%d'),
|
|
|
|
| 240 |
'Quality_Score__c': scores['qualityScore'],
|
| 241 |
'Timeliness_Score__c': scores['timelinessScore'],
|
| 242 |
'Safety_Score__c': scores['safetyScore'],
|
| 243 |
'Communication_Score__c': scores['communicationScore'],
|
| 244 |
+
'Final_Score__c': scores['finalScore'], # Note: This may be a formula field in Salesforce
|
| 245 |
+
'Certification_URL__c': pdf_base64, # Store base64 PDF (or upload to Salesforce Files and store URL)
|
| 246 |
+
'Alert_Flag__c': alert_flag
|
| 247 |
})
|
| 248 |
logger.info(f"Successfully saved scores to Salesforce for Vendor Log: {log.vendorLogId}")
|
| 249 |
except Exception as e:
|
|
|
|
| 270 |
@app.route('/', methods=['GET'])
|
| 271 |
def get_dashboard():
|
| 272 |
try:
|
| 273 |
+
# Fetch data from Salesforce (Subcontractor_Performance_Score__c)
|
| 274 |
+
query = """
|
| 275 |
+
SELECT Vendor_ID__c, Month__c, Quality_Score__c, Timeliness_Score__c,
|
| 276 |
+
Safety_Score__c, Communication_Score__c, Final_Score__c, Certification_URL__c,
|
| 277 |
+
Alert_Flag__c
|
| 278 |
+
FROM Subcontractor_Performance_Score__c
|
| 279 |
+
WHERE Month__c = '2025-05-01'
|
| 280 |
+
"""
|
| 281 |
+
result = sf.query(query)
|
| 282 |
+
vendor_logs.clear() # Clear existing logs and repopulate from Salesforce
|
| 283 |
+
for record in result['records']:
|
| 284 |
+
vendor_logs.append({
|
| 285 |
+
'vendorId': record['Vendor_ID__c'],
|
| 286 |
+
'vendorLogName': f"Vendor {record['Vendor_ID__c']}", # Placeholder; fetch actual name if needed
|
| 287 |
+
'scores': {
|
| 288 |
+
'qualityScore': record['Quality_Score__c'],
|
| 289 |
+
'timelinessScore': record['Timeliness_Score__c'],
|
| 290 |
+
'safetyScore': record['Safety_Score__c'],
|
| 291 |
+
'communicationScore': record['Communication_Score__c'],
|
| 292 |
+
'finalScore': record['Final_Score__c']
|
| 293 |
+
},
|
| 294 |
+
'extracted': True
|
| 295 |
+
})
|
| 296 |
+
|
| 297 |
# Calculate summary metrics
|
| 298 |
total_vendors = len(vendor_logs)
|
| 299 |
performance_alerts = sum(1 for log in vendor_logs if determine_alert_flag(log['scores']['finalScore'], vendor_logs))
|
|
|
|
| 345 |
logger.error(f"Error in / endpoint: {str(e)}\nStack trace:\n{error_trace}")
|
| 346 |
return jsonify({'error': f"Error generating dashboard: {str(e)}"}), 500
|
| 347 |
|
| 348 |
+
@app.route('/document', methods=['GET'])
|
| 349 |
+
def get_document():
|
| 350 |
+
try:
|
| 351 |
+
return render_template('document.html')
|
| 352 |
+
except Exception as e:
|
| 353 |
+
error_trace = traceback.format_exc()
|
| 354 |
+
logger.error(f"Error in /document endpoint: {str(e)}\nStack trace:\n{error_trace}")
|
| 355 |
+
return jsonify({'error': f"Error generating document: {str(e)}"}), 500
|
| 356 |
+
|
| 357 |
if __name__ == "__main__":
|
| 358 |
app.run(host="0.0.0.0", port=7860, debug=True)
|