Update app.py
Browse files
app.py
CHANGED
|
@@ -3,8 +3,9 @@ import re
|
|
| 3 |
from reportlab.lib.pagesizes import letter
|
| 4 |
from reportlab.pdfgen import canvas
|
| 5 |
import bleach
|
| 6 |
-
import
|
| 7 |
import logging
|
|
|
|
| 8 |
|
| 9 |
# Print statement to confirm script initialization
|
| 10 |
print("Starting Project Closure Readiness Evaluator app...")
|
|
@@ -87,41 +88,46 @@ def sanitize_input(text):
|
|
| 87 |
return ""
|
| 88 |
return bleach.clean(text.strip())
|
| 89 |
|
| 90 |
-
# Rule-based completeness engine
|
| 91 |
-
def
|
| 92 |
try:
|
| 93 |
-
#
|
| 94 |
-
score = 0
|
| 95 |
missing_items = []
|
| 96 |
checklist_details = []
|
| 97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
# Sanitize inputs
|
| 99 |
logs = sanitize_input(logs)
|
| 100 |
qa_report = sanitize_input(qa_report)
|
| 101 |
punch_list_text = sanitize_input(punch_list_text)
|
| 102 |
|
| 103 |
-
# Process Project Logs
|
| 104 |
log_keywords = r"complete|handover done|finished|closed|successful"
|
| 105 |
if logs and re.search(log_keywords, logs.lower()):
|
| 106 |
-
score +=
|
| 107 |
checklist_details.append("Logs: Completed")
|
| 108 |
else:
|
| 109 |
missing_items.append("Project Logs Incomplete")
|
| 110 |
checklist_details.append("Logs: Pending")
|
| 111 |
|
| 112 |
-
# Process QA Report
|
| 113 |
qa_keywords = r"approved|passed|cleared"
|
| 114 |
if qa_report and re.search(qa_keywords, qa_report.lower()):
|
| 115 |
-
score +=
|
| 116 |
checklist_details.append("QA Report: Approved")
|
| 117 |
else:
|
| 118 |
missing_items.append("QA Approval Missing")
|
| 119 |
checklist_details.append("QA Report: Pending")
|
| 120 |
|
| 121 |
-
# Process Punch List
|
| 122 |
punch_keywords = r"none|resolved|closed|no issues"
|
| 123 |
if punch_list_text and re.search(punch_keywords, punch_list_text.lower()):
|
| 124 |
-
score +=
|
| 125 |
checklist_details.append("Punch List: Resolved")
|
| 126 |
else:
|
| 127 |
missing_items.append("Open Punch Points Detected")
|
|
@@ -144,14 +150,15 @@ def evaluate_readiness_and_save(logs, qa_report, punch_list_text):
|
|
| 144 |
|
| 145 |
return score, checklist_summary, missing_summary, status, progress_bar
|
| 146 |
except Exception as e:
|
| 147 |
-
logging.error(f"Error in
|
| 148 |
raise
|
| 149 |
|
| 150 |
-
# Generate PDF report with signature slots
|
| 151 |
def generate_pdf(score, checklist_summary, missing_summary, status):
|
| 152 |
try:
|
| 153 |
-
|
| 154 |
-
|
|
|
|
| 155 |
c.setFont("Helvetica-Bold", 16)
|
| 156 |
c.drawString(50, 750, "Project Closure Readiness Report")
|
| 157 |
c.setFont("Helvetica", 12)
|
|
@@ -167,7 +174,8 @@ def generate_pdf(score, checklist_summary, missing_summary, status):
|
|
| 167 |
c.drawString(50, y-80, "Stakeholder Signature: ____________________")
|
| 168 |
c.drawString(50, y-100, "Date: ____________________")
|
| 169 |
c.save()
|
| 170 |
-
|
|
|
|
| 171 |
except Exception as e:
|
| 172 |
logging.error(f"Error in generate_pdf: {str(e)}")
|
| 173 |
raise
|
|
@@ -201,9 +209,9 @@ with gr.Blocks(css="""progress { background-color: #f0f0f0; } .red { background-
|
|
| 201 |
pdf_btn = gr.Button("Download PDF Report")
|
| 202 |
pdf_output = gr.File(label="PDF Report")
|
| 203 |
|
| 204 |
-
# Connect inputs to
|
| 205 |
submit_btn.click(
|
| 206 |
-
fn=
|
| 207 |
inputs=[logs_input, qa_input, punch_input],
|
| 208 |
outputs=[score_output, checklist_output, missing_output, status_output, progress_output]
|
| 209 |
)
|
|
@@ -212,7 +220,7 @@ with gr.Blocks(css="""progress { background-color: #f0f0f0; } .red { background-
|
|
| 212 |
pdf_btn.click(
|
| 213 |
fn=generate_pdf,
|
| 214 |
inputs=[score_output, checklist_output, missing_output, status_output],
|
| 215 |
-
outputs=pdf_output
|
| 216 |
)
|
| 217 |
|
| 218 |
# Launch the app (commented out for Hugging Face deployment)
|
|
|
|
| 3 |
from reportlab.lib.pagesizes import letter
|
| 4 |
from reportlab.pdfgen import canvas
|
| 5 |
import bleach
|
| 6 |
+
import io
|
| 7 |
import logging
|
| 8 |
+
import os
|
| 9 |
|
| 10 |
# Print statement to confirm script initialization
|
| 11 |
print("Starting Project Closure Readiness Evaluator app...")
|
|
|
|
| 88 |
return ""
|
| 89 |
return bleach.clean(text.strip())
|
| 90 |
|
| 91 |
+
# Rule-based completeness engine with weighted scoring
|
| 92 |
+
def evaluate_readiness(logs, qa_report, punch_list_text):
|
| 93 |
try:
|
| 94 |
+
# Initialize score and lists for tracking
|
| 95 |
+
score = 0
|
| 96 |
missing_items = []
|
| 97 |
checklist_details = []
|
| 98 |
|
| 99 |
+
# Define weights for scoring
|
| 100 |
+
LOGS_WEIGHT = 30 # 30% weight for logs
|
| 101 |
+
QA_WEIGHT = 40 # 40% weight for QA report
|
| 102 |
+
PUNCH_WEIGHT = 30 # 30% weight for punch list
|
| 103 |
+
|
| 104 |
# Sanitize inputs
|
| 105 |
logs = sanitize_input(logs)
|
| 106 |
qa_report = sanitize_input(qa_report)
|
| 107 |
punch_list_text = sanitize_input(punch_list_text)
|
| 108 |
|
| 109 |
+
# Process Project Logs (30% weight)
|
| 110 |
log_keywords = r"complete|handover done|finished|closed|successful"
|
| 111 |
if logs and re.search(log_keywords, logs.lower()):
|
| 112 |
+
score += LOGS_WEIGHT
|
| 113 |
checklist_details.append("Logs: Completed")
|
| 114 |
else:
|
| 115 |
missing_items.append("Project Logs Incomplete")
|
| 116 |
checklist_details.append("Logs: Pending")
|
| 117 |
|
| 118 |
+
# Process QA Report (40% weight)
|
| 119 |
qa_keywords = r"approved|passed|cleared"
|
| 120 |
if qa_report and re.search(qa_keywords, qa_report.lower()):
|
| 121 |
+
score += QA_WEIGHT
|
| 122 |
checklist_details.append("QA Report: Approved")
|
| 123 |
else:
|
| 124 |
missing_items.append("QA Approval Missing")
|
| 125 |
checklist_details.append("QA Report: Pending")
|
| 126 |
|
| 127 |
+
# Process Punch List (30% weight)
|
| 128 |
punch_keywords = r"none|resolved|closed|no issues"
|
| 129 |
if punch_list_text and re.search(punch_keywords, punch_list_text.lower()):
|
| 130 |
+
score += PUNCH_WEIGHT
|
| 131 |
checklist_details.append("Punch List: Resolved")
|
| 132 |
else:
|
| 133 |
missing_items.append("Open Punch Points Detected")
|
|
|
|
| 150 |
|
| 151 |
return score, checklist_summary, missing_summary, status, progress_bar
|
| 152 |
except Exception as e:
|
| 153 |
+
logging.error(f"Error in evaluate_readiness: {str(e)}")
|
| 154 |
raise
|
| 155 |
|
| 156 |
+
# Generate PDF report with signature slots using in-memory buffer
|
| 157 |
def generate_pdf(score, checklist_summary, missing_summary, status):
|
| 158 |
try:
|
| 159 |
+
# Use an in-memory buffer instead of writing to disk
|
| 160 |
+
buffer = io.BytesIO()
|
| 161 |
+
c = canvas.Canvas(buffer, pagesize=letter)
|
| 162 |
c.setFont("Helvetica-Bold", 16)
|
| 163 |
c.drawString(50, 750, "Project Closure Readiness Report")
|
| 164 |
c.setFont("Helvetica", 12)
|
|
|
|
| 174 |
c.drawString(50, y-80, "Stakeholder Signature: ____________________")
|
| 175 |
c.drawString(50, y-100, "Date: ____________________")
|
| 176 |
c.save()
|
| 177 |
+
buffer.seek(0)
|
| 178 |
+
return buffer, "readiness_report.pdf"
|
| 179 |
except Exception as e:
|
| 180 |
logging.error(f"Error in generate_pdf: {str(e)}")
|
| 181 |
raise
|
|
|
|
| 209 |
pdf_btn = gr.Button("Download PDF Report")
|
| 210 |
pdf_output = gr.File(label="PDF Report")
|
| 211 |
|
| 212 |
+
# Connect inputs to evaluate_readiness
|
| 213 |
submit_btn.click(
|
| 214 |
+
fn=evaluate_readiness,
|
| 215 |
inputs=[logs_input, qa_input, punch_input],
|
| 216 |
outputs=[score_output, checklist_output, missing_output, status_output, progress_output]
|
| 217 |
)
|
|
|
|
| 220 |
pdf_btn.click(
|
| 221 |
fn=generate_pdf,
|
| 222 |
inputs=[score_output, checklist_output, missing_output, status_output],
|
| 223 |
+
outputs=[pdf_output]
|
| 224 |
)
|
| 225 |
|
| 226 |
# Launch the app (commented out for Hugging Face deployment)
|