Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -134,11 +134,13 @@ def create_salesforce_reports(df):
|
|
| 134 |
"reportMetadata": {
|
| 135 |
"name": f"SmartLog_Usage_Report_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 136 |
"developerName": f"SmartLog_Usage_Report_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 137 |
-
"reportType": "CustomEntity
|
| 138 |
"reportFormat": "TABULAR",
|
| 139 |
-
"
|
| 140 |
-
|
| 141 |
-
|
|
|
|
|
|
|
| 142 |
],
|
| 143 |
"folderId": LABOPS_REPORTS_FOLDER_ID
|
| 144 |
}
|
|
@@ -153,11 +155,13 @@ def create_salesforce_reports(df):
|
|
| 153 |
"reportMetadata": {
|
| 154 |
"name": f"SmartLog_AMC_Reminders_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 155 |
"developerName": f"SmartLog_AMC_Reminders_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 156 |
-
"reportType": "CustomEntity
|
| 157 |
"reportFormat": "TABULAR",
|
| 158 |
-
"
|
| 159 |
-
|
| 160 |
-
|
|
|
|
|
|
|
| 161 |
],
|
| 162 |
"folderId": LABOPS_REPORTS_FOLDER_ID
|
| 163 |
}
|
|
@@ -170,15 +174,20 @@ def create_salesforce_reports(df):
|
|
| 170 |
logging.error(f"Failed to create Salesforce reports: {str(e)}")
|
| 171 |
|
| 172 |
# Save results to Salesforce SmartLog__c (runs in backend, result not displayed)
|
| 173 |
-
def save_to_salesforce(df, summary, anomalies, amc_reminders, insights):
|
| 174 |
if sf is None:
|
| 175 |
logging.info("Salesforce connection not available for saving records.")
|
| 176 |
return
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
# Validate and map picklist values
|
| 183 |
status = str(row['status'])
|
| 184 |
log_type = str(row['log_type'])
|
|
@@ -187,32 +196,28 @@ def save_to_salesforce(df, summary, anomalies, amc_reminders, insights):
|
|
| 187 |
if status not in status_values:
|
| 188 |
status = picklist_mapping['Status__c'].get(status.lower(), status_values[0] if status_values else None)
|
| 189 |
if status is None:
|
| 190 |
-
logging.warning(f"Skipping record with invalid Status__c: {row['status']}")
|
| 191 |
continue
|
| 192 |
|
| 193 |
# Map Log_Type__c
|
| 194 |
if log_type not in log_type_values:
|
| 195 |
log_type = picklist_mapping['Log_Type__c'].get(log_type.lower(), log_type_values[0] if log_type_values else None)
|
| 196 |
if log_type is None:
|
| 197 |
-
logging.warning(f"Skipping record with invalid Log_Type__c: {row['log_type']}")
|
| 198 |
continue
|
| 199 |
|
| 200 |
# Handle AMC_Date__c with proper formatting
|
| 201 |
amc_date_str = None
|
| 202 |
if pd.notna(row['amc_date']):
|
| 203 |
try:
|
| 204 |
-
|
| 205 |
-
amc_date = pd.to_datetime(row['amc_date'], format='%d-%m-%Y', errors='coerce')
|
| 206 |
-
if pd.isna(amc_date):
|
| 207 |
-
# If that fails, try YYYY-MM-DD
|
| 208 |
-
amc_date = pd.to_datetime(row['amc_date'], format='%Y-%m-%d', errors='coerce')
|
| 209 |
if pd.isna(amc_date):
|
| 210 |
logging.error(f"Failed to parse AMC Date for Device ID {row['device_id']}: {row['amc_date']}")
|
| 211 |
else:
|
| 212 |
amc_date_str = amc_date.strftime('%Y-%m-%d')
|
| 213 |
amc_date_dt = datetime.strptime(amc_date_str, '%Y-%m-%d')
|
| 214 |
if status == "Active" and current_date.date() <= amc_date_dt.date() <= next_30_days.date():
|
| 215 |
-
logging.info(f"
|
| 216 |
except Exception as e:
|
| 217 |
logging.error(f"Failed to parse AMC Date for Device ID {row['device_id']}: {str(e)}")
|
| 218 |
amc_date_str = None
|
|
@@ -226,12 +231,68 @@ def save_to_salesforce(df, summary, anomalies, amc_reminders, insights):
|
|
| 226 |
'Downtime__c': float(row['downtime']) if pd.notna(row['downtime']) else 0.0,
|
| 227 |
'AMC_Date__c': amc_date_str
|
| 228 |
}
|
| 229 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
|
| 231 |
-
#
|
| 232 |
-
if
|
| 233 |
-
|
| 234 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
except Exception as e:
|
| 236 |
logging.error(f"Failed to save to Salesforce: {str(e)}")
|
| 237 |
|
|
@@ -252,7 +313,7 @@ def summarize_logs(df):
|
|
| 252 |
def detect_anomalies(df):
|
| 253 |
try:
|
| 254 |
if "usage_hours" not in df.columns or "downtime" not in df.columns:
|
| 255 |
-
return "Anomaly detection requires 'usage_hours' and 'downtime' columns."
|
| 256 |
if len(df) > 1000:
|
| 257 |
df = df.sample(n=1000, random_state=42)
|
| 258 |
features = df[["usage_hours", "downtime"]].fillna(0)
|
|
@@ -260,17 +321,17 @@ def detect_anomalies(df):
|
|
| 260 |
df["anomaly"] = iso_forest.fit_predict(features)
|
| 261 |
anomalies = df[df["anomaly"] == -1][["device_id", "usage_hours", "downtime", "timestamp"]]
|
| 262 |
if anomalies.empty:
|
| 263 |
-
return "No anomalies detected."
|
| 264 |
anomaly_lines = ["Detected Anomalies:"]
|
| 265 |
for _, row in anomalies.head(5).iterrows():
|
| 266 |
anomaly_lines.append(
|
| 267 |
f"- Device ID: {row['device_id']}, Usage Hours: {row['usage_hours']}, "
|
| 268 |
f"Downtime: {row['downtime']}, Timestamp: {row['timestamp']}"
|
| 269 |
)
|
| 270 |
-
return "\n".join(anomaly_lines)
|
| 271 |
except Exception as e:
|
| 272 |
logging.error(f"Anomaly detection failed: {str(e)}")
|
| 273 |
-
return f"Anomaly detection failed: {str(e)}"
|
| 274 |
|
| 275 |
# AMC reminders (for display purposes only, email handled by Salesforce trigger)
|
| 276 |
def check_amc_reminders(df, current_date):
|
|
@@ -280,7 +341,7 @@ def check_amc_reminders(df, current_date):
|
|
| 280 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
| 281 |
current_date = pd.to_datetime(current_date)
|
| 282 |
df["days_to_amc"] = (df["amc_date"] - current_date).dt.days
|
| 283 |
-
reminders = df[(df["days_to_amc"] >= 0) & (df["days_to_amc"] <= 30)][["device_id", "amc_date"]]
|
| 284 |
if reminders.empty:
|
| 285 |
logging.info("No AMC reminders found within the next 30 days.")
|
| 286 |
return "No AMC reminders due within the next 30 days.", reminders
|
|
@@ -305,7 +366,7 @@ def generate_dashboard_insights(df):
|
|
| 305 |
logging.error(f"Dashboard insights generation failed: {str(e)}")
|
| 306 |
return f"Dashboard insights generation failed: {str(e)}"
|
| 307 |
|
| 308 |
-
# Create usage chart
|
| 309 |
def create_usage_chart(df):
|
| 310 |
try:
|
| 311 |
usage_data = df.groupby("device_id")["usage_hours"].sum().reset_index()
|
|
@@ -333,8 +394,133 @@ def create_usage_chart(df):
|
|
| 333 |
logging.error(f"Failed to create usage chart: {str(e)}")
|
| 334 |
return None
|
| 335 |
|
| 336 |
-
#
|
| 337 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
if not reportlab_available:
|
| 339 |
return None
|
| 340 |
try:
|
|
@@ -358,6 +544,12 @@ def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights):
|
|
| 358 |
story.append(safe_paragraph(preview or "No preview available.", styles['Normal']))
|
| 359 |
story.append(Spacer(1, 12))
|
| 360 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 361 |
story.append(Paragraph("Anomaly Detection", styles['Heading2']))
|
| 362 |
story.append(safe_paragraph(anomalies or "No anomalies detected.", styles['Normal']))
|
| 363 |
story.append(Spacer(1, 12))
|
|
@@ -368,6 +560,19 @@ def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights):
|
|
| 368 |
|
| 369 |
story.append(Paragraph("Dashboard Insights", styles['Heading2']))
|
| 370 |
story.append(safe_paragraph(insights or "No insights generated.", styles['Normal']))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 371 |
|
| 372 |
doc.build(story)
|
| 373 |
logging.info(f"PDF generated at {pdf_path}")
|
|
@@ -377,17 +582,17 @@ def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights):
|
|
| 377 |
return None
|
| 378 |
|
| 379 |
# Main Gradio function with optimized performance
|
| 380 |
-
async def process_logs(file_obj):
|
| 381 |
try:
|
| 382 |
start_time = datetime.now()
|
| 383 |
if not file_obj:
|
| 384 |
-
return "No file uploaded.", "No data to preview.", None, "No anomalies detected.", "No AMC reminders.", "No insights generated.", None
|
| 385 |
|
| 386 |
file_name = file_obj.name
|
| 387 |
logging.info(f"Processing file: {file_name}")
|
| 388 |
|
| 389 |
if not file_name.endswith(".csv"):
|
| 390 |
-
return "Please upload a CSV file.", "", None, "", "", "", None
|
| 391 |
|
| 392 |
required_columns = ["device_id", "log_type", "status", "timestamp", "usage_hours", "downtime", "amc_date"]
|
| 393 |
dtypes = {
|
|
@@ -401,30 +606,63 @@ async def process_logs(file_obj):
|
|
| 401 |
df = pd.read_csv(file_obj, dtype=dtypes)
|
| 402 |
missing_columns = [col for col in required_columns if col not in df.columns]
|
| 403 |
if missing_columns:
|
| 404 |
-
return f"Missing columns: {missing_columns}", None, None, None, None, None, None
|
|
|
|
|
|
|
| 405 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors='coerce')
|
| 406 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
| 407 |
if df.empty:
|
| 408 |
-
return "No data available.", None, None, None, None, None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 409 |
|
| 410 |
# Run tasks concurrently
|
| 411 |
with ThreadPoolExecutor(max_workers=4) as executor:
|
| 412 |
-
future_summary = executor.submit(summarize_logs,
|
| 413 |
-
future_anomalies = executor.submit(detect_anomalies,
|
| 414 |
-
future_amc = executor.submit(check_amc_reminders,
|
| 415 |
-
future_insights = executor.submit(generate_dashboard_insights,
|
| 416 |
-
|
| 417 |
-
|
|
|
|
|
|
|
|
|
|
| 418 |
|
| 419 |
summary = f"Step 1: Summary Report\n{future_summary.result()}"
|
| 420 |
-
anomalies =
|
|
|
|
| 421 |
amc_reminders, reminders_df = future_amc.result()
|
| 422 |
amc_reminders = f"AMC Reminders\n{amc_reminders}"
|
| 423 |
insights = f"Dashboard Insights (AI)\n{future_insights.result()}"
|
| 424 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
|
| 426 |
preview_lines = ["Step 2: Log Preview (First 5 Rows)"]
|
| 427 |
-
for idx, row in
|
| 428 |
preview_lines.append(
|
| 429 |
f"Row {idx + 1}: Device ID: {row['device_id']}, "
|
| 430 |
f"Log Type: {row['log_type']}, Status: {row['status']}, "
|
|
@@ -434,15 +672,17 @@ async def process_logs(file_obj):
|
|
| 434 |
preview = "\n".join(preview_lines)
|
| 435 |
|
| 436 |
# Save to Salesforce in the backend
|
| 437 |
-
save_to_salesforce(
|
| 438 |
-
|
|
|
|
|
|
|
| 439 |
|
| 440 |
elapsed_time = (datetime.now() - start_time).total_seconds()
|
| 441 |
-
logging.info(f"Processing completed in {elapsed_time:.2f}
|
| 442 |
-
return summary, preview,
|
| 443 |
except Exception as e:
|
| 444 |
logging.error(f"Failed to process file: {str(e)}")
|
| 445 |
-
return f"Error: {str(e)}", None, None, None, None, None, None
|
| 446 |
|
| 447 |
# Gradio Interface
|
| 448 |
try:
|
|
@@ -461,6 +701,34 @@ try:
|
|
| 461 |
with gr.Row():
|
| 462 |
with gr.Column(scale=1):
|
| 463 |
file_input = gr.File(label="Upload Logs (CSV)", file_types=[".csv"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
submit_button = gr.Button("Analyze", variant="primary")
|
| 465 |
|
| 466 |
with gr.Column(scale=2):
|
|
@@ -476,8 +744,19 @@ try:
|
|
| 476 |
preview_output = gr.Markdown()
|
| 477 |
|
| 478 |
with gr.Group(elem_classes="dashboard-section"):
|
| 479 |
-
gr.Markdown("###
|
| 480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 481 |
|
| 482 |
with gr.Group(elem_classes="dashboard-section"):
|
| 483 |
gr.Markdown("### Step 4: Anomaly Detection")
|
|
@@ -492,16 +771,44 @@ try:
|
|
| 492 |
insights_output = gr.Markdown()
|
| 493 |
|
| 494 |
with gr.Group(elem_classes="dashboard-section"):
|
| 495 |
-
gr.Markdown("###
|
| 496 |
pdf_output = gr.File(label="Download Analysis Report as PDF")
|
| 497 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 498 |
submit_button.click(
|
| 499 |
fn=process_logs,
|
| 500 |
-
inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 501 |
outputs=[
|
| 502 |
summary_output,
|
| 503 |
preview_output,
|
| 504 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 505 |
anomaly_output,
|
| 506 |
amc_output,
|
| 507 |
insights_output,
|
|
|
|
| 134 |
"reportMetadata": {
|
| 135 |
"name": f"SmartLog_Usage_Report_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 136 |
"developerName": f"SmartLog_Usage_Report_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 137 |
+
"reportType": {"type": "CustomEntity", "value": "SmartLog__c"},
|
| 138 |
"reportFormat": "TABULAR",
|
| 139 |
+
"reportBooleanFilter": None,
|
| 140 |
+
"reportFilters": [],
|
| 141 |
+
"detailColumns": [
|
| 142 |
+
"SmartLog__c.Device_Id__c",
|
| 143 |
+
"SmartLog__c.Usage_Hours__c"
|
| 144 |
],
|
| 145 |
"folderId": LABOPS_REPORTS_FOLDER_ID
|
| 146 |
}
|
|
|
|
| 155 |
"reportMetadata": {
|
| 156 |
"name": f"SmartLog_AMC_Reminders_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 157 |
"developerName": f"SmartLog_AMC_Reminders_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
| 158 |
+
"reportType": {"type": "CustomEntity", "value": "SmartLog__c"},
|
| 159 |
"reportFormat": "TABULAR",
|
| 160 |
+
"reportBooleanFilter": None,
|
| 161 |
+
"reportFilters": [],
|
| 162 |
+
"detailColumns": [
|
| 163 |
+
"SmartLog__c.Device_Id__c",
|
| 164 |
+
"SmartLog__c.AMC_Date__c"
|
| 165 |
],
|
| 166 |
"folderId": LABOPS_REPORTS_FOLDER_ID
|
| 167 |
}
|
|
|
|
| 174 |
logging.error(f"Failed to create Salesforce reports: {str(e)}")
|
| 175 |
|
| 176 |
# Save results to Salesforce SmartLog__c (runs in backend, result not displayed)
|
| 177 |
+
def save_to_salesforce(df, reminders_df, summary, anomalies, amc_reminders, insights):
|
| 178 |
if sf is None:
|
| 179 |
logging.info("Salesforce connection not available for saving records.")
|
| 180 |
return
|
| 181 |
+
|
| 182 |
+
# First, process and save AMC reminder records
|
| 183 |
+
reminder_records = []
|
| 184 |
+
current_date = datetime.now()
|
| 185 |
+
next_30_days = current_date + timedelta(days=30)
|
| 186 |
+
|
| 187 |
+
# Process reminders_df (records that qualify for AMC reminders)
|
| 188 |
+
if not reminders_df.empty:
|
| 189 |
+
logging.info(f"Processing {len(reminders_df)} AMC reminder records for saving to Salesforce")
|
| 190 |
+
for _, row in reminders_df.iterrows():
|
| 191 |
# Validate and map picklist values
|
| 192 |
status = str(row['status'])
|
| 193 |
log_type = str(row['log_type'])
|
|
|
|
| 196 |
if status not in status_values:
|
| 197 |
status = picklist_mapping['Status__c'].get(status.lower(), status_values[0] if status_values else None)
|
| 198 |
if status is None:
|
| 199 |
+
logging.warning(f"Skipping reminder record with invalid Status__c: {row['status']}")
|
| 200 |
continue
|
| 201 |
|
| 202 |
# Map Log_Type__c
|
| 203 |
if log_type not in log_type_values:
|
| 204 |
log_type = picklist_mapping['Log_Type__c'].get(log_type.lower(), log_type_values[0] if log_type_values else None)
|
| 205 |
if log_type is None:
|
| 206 |
+
logging.warning(f"Skipping reminder record with invalid Log_Type__c: {row['log_type']}")
|
| 207 |
continue
|
| 208 |
|
| 209 |
# Handle AMC_Date__c with proper formatting
|
| 210 |
amc_date_str = None
|
| 211 |
if pd.notna(row['amc_date']):
|
| 212 |
try:
|
| 213 |
+
amc_date = pd.to_datetime(row['amc_date'], errors='coerce')
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
if pd.isna(amc_date):
|
| 215 |
logging.error(f"Failed to parse AMC Date for Device ID {row['device_id']}: {row['amc_date']}")
|
| 216 |
else:
|
| 217 |
amc_date_str = amc_date.strftime('%Y-%m-%d')
|
| 218 |
amc_date_dt = datetime.strptime(amc_date_str, '%Y-%m-%d')
|
| 219 |
if status == "Active" and current_date.date() <= amc_date_dt.date() <= next_30_days.date():
|
| 220 |
+
logging.info(f"AMC Reminder record saved: Device ID {row['device_id']}, AMC Date {amc_date_str}")
|
| 221 |
except Exception as e:
|
| 222 |
logging.error(f"Failed to parse AMC Date for Device ID {row['device_id']}: {str(e)}")
|
| 223 |
amc_date_str = None
|
|
|
|
| 231 |
'Downtime__c': float(row['downtime']) if pd.notna(row['downtime']) else 0.0,
|
| 232 |
'AMC_Date__c': amc_date_str
|
| 233 |
}
|
| 234 |
+
reminder_records.append(record)
|
| 235 |
+
|
| 236 |
+
# Then, process remaining records from df (excluding those already in reminders_df)
|
| 237 |
+
other_records = []
|
| 238 |
+
reminder_device_ids = set(reminders_df['device_id']) if not reminders_df.empty else set()
|
| 239 |
+
for _, row in df.iterrows():
|
| 240 |
+
if row['device_id'] in reminder_device_ids:
|
| 241 |
+
continue # Skip records already processed as reminders
|
| 242 |
+
|
| 243 |
+
# Validate and map picklist values
|
| 244 |
+
status = str(row['status'])
|
| 245 |
+
log_type = str(row['log_type'])
|
| 246 |
+
|
| 247 |
+
# Map Status__c
|
| 248 |
+
if status not in status_values:
|
| 249 |
+
status = picklist_mapping['Status__c'].get(status.lower(), status_values[0] if status_values else None)
|
| 250 |
+
if status is None:
|
| 251 |
+
logging.warning(f"Skipping record with invalid Status__c: {row['status']}")
|
| 252 |
+
continue
|
| 253 |
|
| 254 |
+
# Map Log_Type__c
|
| 255 |
+
if log_type not in log_type_values:
|
| 256 |
+
log_type = picklist_mapping['Log_Type__c'].get(log_type.lower(), log_type_values[0] if log_type_values else None)
|
| 257 |
+
if log_type is None:
|
| 258 |
+
logging.warning(f"Skipping record with invalid Log_Type__c: {row['log_type']}")
|
| 259 |
+
continue
|
| 260 |
+
|
| 261 |
+
# Handle AMC_Date__c with proper formatting
|
| 262 |
+
amc_date_str = None
|
| 263 |
+
if pd.notna(row['amc_date']):
|
| 264 |
+
try:
|
| 265 |
+
amc_date = pd.to_datetime(row['amc_date'], errors='coerce')
|
| 266 |
+
if pd.isna(amc_date):
|
| 267 |
+
logging.error(f"Failed to parse AMC Date for Device ID {row['device_id']}: {row['amc_date']}")
|
| 268 |
+
else:
|
| 269 |
+
amc_date_str = amc_date.strftime('%Y-%m-%d')
|
| 270 |
+
amc_date_dt = datetime.strptime(amc_date_str, '%Y-%m-%d')
|
| 271 |
+
if status == "Active" and current_date.date() <= amc_date_dt.date() <= next_30_days.date():
|
| 272 |
+
logging.info(f"Record qualifies for AMC Reminders: Device ID {row['device_id']}, AMC Date {amc_date_str}")
|
| 273 |
+
except Exception as e:
|
| 274 |
+
logging.error(f"Failed to parse AMC Date for Device ID {row['device_id']}: {str(e)}")
|
| 275 |
+
amc_date_str = None
|
| 276 |
+
|
| 277 |
+
record = {
|
| 278 |
+
'Device_Id__c': str(row['device_id'])[:50],
|
| 279 |
+
'Log_Type__c': log_type,
|
| 280 |
+
'Status__c': status,
|
| 281 |
+
'Timestamp__c': row['timestamp'].isoformat() if pd.notna(row['timestamp']) else None,
|
| 282 |
+
'Usage_Hours__c': float(row['usage_hours']) if pd.notna(row['usage_hours']) else 0.0,
|
| 283 |
+
'Downtime__c': float(row['downtime']) if pd.notna(row['downtime']) else 0.0,
|
| 284 |
+
'AMC_Date__c': amc_date_str
|
| 285 |
+
}
|
| 286 |
+
other_records.append(record)
|
| 287 |
+
|
| 288 |
+
# Combine reminder records and other records
|
| 289 |
+
all_records = reminder_records + other_records
|
| 290 |
+
|
| 291 |
+
# Bulk insert to reduce API calls
|
| 292 |
+
try:
|
| 293 |
+
if all_records:
|
| 294 |
+
sf.bulk.SmartLog__c.insert(all_records)
|
| 295 |
+
logging.info(f"Saved {len(all_records)} total records to Salesforce (including {len(reminder_records)} AMC reminders)")
|
| 296 |
except Exception as e:
|
| 297 |
logging.error(f"Failed to save to Salesforce: {str(e)}")
|
| 298 |
|
|
|
|
| 313 |
def detect_anomalies(df):
|
| 314 |
try:
|
| 315 |
if "usage_hours" not in df.columns or "downtime" not in df.columns:
|
| 316 |
+
return "Anomaly detection requires 'usage_hours' and 'downtime' columns.", pd.DataFrame()
|
| 317 |
if len(df) > 1000:
|
| 318 |
df = df.sample(n=1000, random_state=42)
|
| 319 |
features = df[["usage_hours", "downtime"]].fillna(0)
|
|
|
|
| 321 |
df["anomaly"] = iso_forest.fit_predict(features)
|
| 322 |
anomalies = df[df["anomaly"] == -1][["device_id", "usage_hours", "downtime", "timestamp"]]
|
| 323 |
if anomalies.empty:
|
| 324 |
+
return "No anomalies detected.", anomalies
|
| 325 |
anomaly_lines = ["Detected Anomalies:"]
|
| 326 |
for _, row in anomalies.head(5).iterrows():
|
| 327 |
anomaly_lines.append(
|
| 328 |
f"- Device ID: {row['device_id']}, Usage Hours: {row['usage_hours']}, "
|
| 329 |
f"Downtime: {row['downtime']}, Timestamp: {row['timestamp']}"
|
| 330 |
)
|
| 331 |
+
return "\n".join(anomaly_lines), anomalies
|
| 332 |
except Exception as e:
|
| 333 |
logging.error(f"Anomaly detection failed: {str(e)}")
|
| 334 |
+
return f"Anomaly detection failed: {str(e)}", pd.DataFrame()
|
| 335 |
|
| 336 |
# AMC reminders (for display purposes only, email handled by Salesforce trigger)
|
| 337 |
def check_amc_reminders(df, current_date):
|
|
|
|
| 341 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
| 342 |
current_date = pd.to_datetime(current_date)
|
| 343 |
df["days_to_amc"] = (df["amc_date"] - current_date).dt.days
|
| 344 |
+
reminders = df[(df["days_to_amc"] >= 0) & (df["days_to_amc"] <= 30)][["device_id", "log_type", "status", "timestamp", "usage_hours", "downtime", "amc_date"]]
|
| 345 |
if reminders.empty:
|
| 346 |
logging.info("No AMC reminders found within the next 30 days.")
|
| 347 |
return "No AMC reminders due within the next 30 days.", reminders
|
|
|
|
| 366 |
logging.error(f"Dashboard insights generation failed: {str(e)}")
|
| 367 |
return f"Dashboard insights generation failed: {str(e)}"
|
| 368 |
|
| 369 |
+
# Create usage chart (existing chart for usage hours per device)
|
| 370 |
def create_usage_chart(df):
|
| 371 |
try:
|
| 372 |
usage_data = df.groupby("device_id")["usage_hours"].sum().reset_index()
|
|
|
|
| 394 |
logging.error(f"Failed to create usage chart: {str(e)}")
|
| 395 |
return None
|
| 396 |
|
| 397 |
+
# Create daily log trends chart
|
| 398 |
+
def create_daily_log_trends_chart(df):
|
| 399 |
+
try:
|
| 400 |
+
df['date'] = df['timestamp'].dt.date
|
| 401 |
+
daily_logs = df.groupby('date').size().reset_index(name='log_count')
|
| 402 |
+
fig = px.line(
|
| 403 |
+
daily_logs,
|
| 404 |
+
x='date',
|
| 405 |
+
y='log_count',
|
| 406 |
+
title="Daily Log Trends",
|
| 407 |
+
labels={"date": "Date", "log_count": "Number of Logs"},
|
| 408 |
+
color_discrete_sequence=['#FF6B6B']
|
| 409 |
+
)
|
| 410 |
+
fig.update_layout(
|
| 411 |
+
title_font_size=16,
|
| 412 |
+
margin=dict(l=20, r=20, t=40, b=20),
|
| 413 |
+
plot_bgcolor="white",
|
| 414 |
+
paper_bgcolor="white",
|
| 415 |
+
font=dict(size=12)
|
| 416 |
+
)
|
| 417 |
+
return fig
|
| 418 |
+
except Exception as e:
|
| 419 |
+
logging.error(f"Failed to create daily log trends chart: {str(e)}")
|
| 420 |
+
return None
|
| 421 |
+
|
| 422 |
+
# Create weekly uptime percentage chart
|
| 423 |
+
def create_weekly_uptime_chart(df):
|
| 424 |
+
try:
|
| 425 |
+
df['week'] = df['timestamp'].dt.isocalendar().week
|
| 426 |
+
df['year'] = df['timestamp'].dt.year
|
| 427 |
+
weekly_data = df.groupby(['year', 'week']).agg({
|
| 428 |
+
'usage_hours': 'sum',
|
| 429 |
+
'downtime': 'sum'
|
| 430 |
+
}).reset_index()
|
| 431 |
+
weekly_data['uptime_percent'] = (weekly_data['usage_hours'] / (weekly_data['usage_hours'] + weekly_data['downtime'])) * 100
|
| 432 |
+
weekly_data['year_week'] = weekly_data['year'].astype(str) + '-W' + weekly_data['week'].astype(str)
|
| 433 |
+
fig = px.bar(
|
| 434 |
+
weekly_data,
|
| 435 |
+
x='year_week',
|
| 436 |
+
y='uptime_percent',
|
| 437 |
+
title="Weekly Uptime Percentage",
|
| 438 |
+
labels={"year_week": "Year-Week", "uptime_percent": "Uptime %"},
|
| 439 |
+
color_discrete_sequence=['#4ECDC4']
|
| 440 |
+
)
|
| 441 |
+
fig.update_layout(
|
| 442 |
+
title_font_size=16,
|
| 443 |
+
margin=dict(l=20, r=20, t=40, b=20),
|
| 444 |
+
plot_bgcolor="white",
|
| 445 |
+
paper_bgcolor="white",
|
| 446 |
+
font=dict(size=12)
|
| 447 |
+
)
|
| 448 |
+
return fig
|
| 449 |
+
except Exception as e:
|
| 450 |
+
logging.error(f"Failed to create weekly uptime chart: {str(e)}")
|
| 451 |
+
return None
|
| 452 |
+
|
| 453 |
+
# Create anomaly alerts chart
|
| 454 |
+
def create_anomaly_alerts_chart(anomalies_df):
|
| 455 |
+
try:
|
| 456 |
+
if anomalies_df.empty:
|
| 457 |
+
return None
|
| 458 |
+
anomalies_df['date'] = anomalies_df['timestamp'].dt.date
|
| 459 |
+
anomaly_counts = anomalies_df.groupby('date').size().reset_index(name='anomaly_count')
|
| 460 |
+
fig = px.scatter(
|
| 461 |
+
anomaly_counts,
|
| 462 |
+
x='date',
|
| 463 |
+
y='anomaly_count',
|
| 464 |
+
title="Anomaly Alerts Over Time",
|
| 465 |
+
labels={"date": "Date", "anomaly_count": "Number of Anomalies"},
|
| 466 |
+
color_discrete_sequence=['#45B7D1']
|
| 467 |
+
)
|
| 468 |
+
fig.update_layout(
|
| 469 |
+
title_font_size=16,
|
| 470 |
+
margin=dict(l=20, r=20, t=40, b=20),
|
| 471 |
+
plot_bgcolor="white",
|
| 472 |
+
paper_bgcolor="white",
|
| 473 |
+
font=dict(size=12)
|
| 474 |
+
)
|
| 475 |
+
return fig
|
| 476 |
+
except Exception as e:
|
| 477 |
+
logging.error(f"Failed to create anomaly alerts chart: {str(e)}")
|
| 478 |
+
return None
|
| 479 |
+
|
| 480 |
+
# Generate Device Cards HTML
|
| 481 |
+
def generate_device_cards(df):
|
| 482 |
+
try:
|
| 483 |
+
# Calculate device stats
|
| 484 |
+
device_stats = df.groupby('device_id').agg({
|
| 485 |
+
'status': 'last', # Most recent status
|
| 486 |
+
'timestamp': 'max', # Last log timestamp
|
| 487 |
+
'device_id': 'count' # Usage count (number of logs)
|
| 488 |
+
}).reset_index()
|
| 489 |
+
device_stats.rename(columns={'device_id': 'usage_count'}, inplace=True)
|
| 490 |
+
|
| 491 |
+
# Map status to health
|
| 492 |
+
device_stats['health'] = device_stats['status'].map({
|
| 493 |
+
'Active': 'Healthy',
|
| 494 |
+
'Inactive': 'Unhealthy',
|
| 495 |
+
'Pending': 'Warning'
|
| 496 |
+
}).fillna('Unknown')
|
| 497 |
+
|
| 498 |
+
# Generate HTML for device cards
|
| 499 |
+
cards_html = '<div style="display: flex; flex-wrap: wrap; gap: 20px;">'
|
| 500 |
+
for _, row in device_stats.iterrows():
|
| 501 |
+
health_color = {
|
| 502 |
+
'Healthy': 'green',
|
| 503 |
+
'Unhealthy': 'red',
|
| 504 |
+
'Warning': 'orange',
|
| 505 |
+
'Unknown': 'gray'
|
| 506 |
+
}.get(row['health'], 'gray')
|
| 507 |
+
card = f"""
|
| 508 |
+
<div style="border: 1px solid #e0e0e0; padding: 10px; border-radius: 5px; width: 200px;">
|
| 509 |
+
<h4>Device: {row['device_id']}</h4>
|
| 510 |
+
<p><b>Health:</b> <span style="color: {health_color}">{row['health']}</span></p>
|
| 511 |
+
<p><b>Usage Count:</b> {row['usage_count']}</p>
|
| 512 |
+
<p><b>Last Log:</b> {row['timestamp']}</p>
|
| 513 |
+
</div>
|
| 514 |
+
"""
|
| 515 |
+
cards_html += card
|
| 516 |
+
cards_html += '</div>'
|
| 517 |
+
return cards_html
|
| 518 |
+
except Exception as e:
|
| 519 |
+
logging.error(f"Failed to generate device cards: {str(e)}")
|
| 520 |
+
return '<p>Error generating device cards.</p>'
|
| 521 |
+
|
| 522 |
+
# Generate PDF content (updated to include new dashboard data)
|
| 523 |
+
def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, device_cards_html, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart):
|
| 524 |
if not reportlab_available:
|
| 525 |
return None
|
| 526 |
try:
|
|
|
|
| 544 |
story.append(safe_paragraph(preview or "No preview available.", styles['Normal']))
|
| 545 |
story.append(Spacer(1, 12))
|
| 546 |
|
| 547 |
+
story.append(Paragraph("Device Cards", styles['Heading2']))
|
| 548 |
+
# Simplify HTML for PDF (reportlab doesn't render HTML well, so we extract text)
|
| 549 |
+
device_cards_text = device_cards_html.replace('<div>', '').replace('</div>', '\n').replace('<h4>', '').replace('</h4>', '\n').replace('<p>', '').replace('</p>', '\n').replace('<b>', '').replace('</b>', '').replace('<span style="color: green">', '').replace('<span style="color: red">', '').replace('<span style="color: orange">', '').replace('<span style="color: gray">', '').replace('</span>', '')
|
| 550 |
+
story.append(safe_paragraph(device_cards_text or "No device cards available.", styles['Normal']))
|
| 551 |
+
story.append(Spacer(1, 12))
|
| 552 |
+
|
| 553 |
story.append(Paragraph("Anomaly Detection", styles['Heading2']))
|
| 554 |
story.append(safe_paragraph(anomalies or "No anomalies detected.", styles['Normal']))
|
| 555 |
story.append(Spacer(1, 12))
|
|
|
|
| 560 |
|
| 561 |
story.append(Paragraph("Dashboard Insights", styles['Heading2']))
|
| 562 |
story.append(safe_paragraph(insights or "No insights generated.", styles['Normal']))
|
| 563 |
+
story.append(Spacer(1, 12))
|
| 564 |
+
|
| 565 |
+
# Charts can't be directly embedded in PDF, so we add placeholders
|
| 566 |
+
story.append(Paragraph("Daily Log Trends Chart", styles['Heading2']))
|
| 567 |
+
story.append(Paragraph("[Chart placeholder - see dashboard for Daily Log Trends]", styles['Normal']))
|
| 568 |
+
story.append(Spacer(1, 12))
|
| 569 |
+
|
| 570 |
+
story.append(Paragraph("Weekly Uptime Percentage Chart", styles['Heading2']))
|
| 571 |
+
story.append(Paragraph("[Chart placeholder - see dashboard for Weekly Uptime Percentage]", styles['Normal']))
|
| 572 |
+
story.append(Spacer(1, 12))
|
| 573 |
+
|
| 574 |
+
story.append(Paragraph("Anomaly Alerts Chart", styles['Heading2']))
|
| 575 |
+
story.append(Paragraph("[Chart placeholder - see dashboard for Anomaly Alerts]", styles['Normal']))
|
| 576 |
|
| 577 |
doc.build(story)
|
| 578 |
logging.info(f"PDF generated at {pdf_path}")
|
|
|
|
| 582 |
return None
|
| 583 |
|
| 584 |
# Main Gradio function with optimized performance
|
| 585 |
+
async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_range):
|
| 586 |
try:
|
| 587 |
start_time = datetime.now()
|
| 588 |
if not file_obj:
|
| 589 |
+
return "No file uploaded.", "No data to preview.", None, '<p>No device cards available.</p>', None, None, None, "No anomalies detected.", "No AMC reminders.", "No insights generated.", None
|
| 590 |
|
| 591 |
file_name = file_obj.name
|
| 592 |
logging.info(f"Processing file: {file_name}")
|
| 593 |
|
| 594 |
if not file_name.endswith(".csv"):
|
| 595 |
+
return "Please upload a CSV file.", "", None, '<p>No device cards available.</p>', None, None, None, "", "", "", None
|
| 596 |
|
| 597 |
required_columns = ["device_id", "log_type", "status", "timestamp", "usage_hours", "downtime", "amc_date"]
|
| 598 |
dtypes = {
|
|
|
|
| 606 |
df = pd.read_csv(file_obj, dtype=dtypes)
|
| 607 |
missing_columns = [col for col in required_columns if col not in df.columns]
|
| 608 |
if missing_columns:
|
| 609 |
+
return f"Missing columns: {missing_columns}", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None
|
| 610 |
+
|
| 611 |
+
# Convert timestamp and amc_date to datetime
|
| 612 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors='coerce')
|
| 613 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
| 614 |
if df.empty:
|
| 615 |
+
return "No data available.", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None
|
| 616 |
+
|
| 617 |
+
# Apply filters
|
| 618 |
+
filtered_df = df.copy()
|
| 619 |
+
|
| 620 |
+
# Lab site filter
|
| 621 |
+
if lab_site_filter and 'lab_site' in filtered_df.columns:
|
| 622 |
+
filtered_df = filtered_df[filtered_df['lab_site'] == lab_site_filter]
|
| 623 |
+
|
| 624 |
+
# Equipment type filter
|
| 625 |
+
if equipment_type_filter and 'equipment_type' in filtered_df.columns:
|
| 626 |
+
filtered_df = filtered_df[filtered_df['equipment_type'] == equipment_type_filter]
|
| 627 |
+
|
| 628 |
+
# Date range filter
|
| 629 |
+
if date_range and len(date_range) == 2:
|
| 630 |
+
start_date, end_date = date_range
|
| 631 |
+
start_date = pd.to_datetime(start_date)
|
| 632 |
+
end_date = pd.to_datetime(end_date)
|
| 633 |
+
filtered_df = filtered_df[(filtered_df['timestamp'] >= start_date) & (filtered_df['timestamp'] <= end_date)]
|
| 634 |
+
|
| 635 |
+
if filtered_df.empty:
|
| 636 |
+
return "No data after applying filters.", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None
|
| 637 |
|
| 638 |
# Run tasks concurrently
|
| 639 |
with ThreadPoolExecutor(max_workers=4) as executor:
|
| 640 |
+
future_summary = executor.submit(summarize_logs, filtered_df)
|
| 641 |
+
future_anomalies = executor.submit(detect_anomalies, filtered_df)
|
| 642 |
+
future_amc = executor.submit(check_amc_reminders, filtered_df, datetime.now())
|
| 643 |
+
future_insights = executor.submit(generate_dashboard_insights, filtered_df)
|
| 644 |
+
future_usage_chart = executor.submit(create_usage_chart, filtered_df)
|
| 645 |
+
future_daily_log_chart = executor.submit(create_daily_log_trends_chart, filtered_df)
|
| 646 |
+
future_weekly_uptime_chart = executor.submit(create_weekly_uptime_chart, filtered_df)
|
| 647 |
+
future_device_cards = executor.submit(generate_device_cards, filtered_df)
|
| 648 |
+
future_reports = executor.submit(create_salesforce_reports, filtered_df)
|
| 649 |
|
| 650 |
summary = f"Step 1: Summary Report\n{future_summary.result()}"
|
| 651 |
+
anomalies, anomalies_df = future_anomalies.result()
|
| 652 |
+
anomalies = f"Anomaly Detection\n{anomalies}"
|
| 653 |
amc_reminders, reminders_df = future_amc.result()
|
| 654 |
amc_reminders = f"AMC Reminders\n{amc_reminders}"
|
| 655 |
insights = f"Dashboard Insights (AI)\n{future_insights.result()}"
|
| 656 |
+
usage_chart = future_usage_chart.result()
|
| 657 |
+
daily_log_chart = future_daily_log_chart.result()
|
| 658 |
+
weekly_uptime_chart = future_weekly_uptime_chart.result()
|
| 659 |
+
device_cards = future_device_cards.result()
|
| 660 |
+
|
| 661 |
+
# Create anomaly alerts chart after getting anomalies_df
|
| 662 |
+
anomaly_alerts_chart = create_anomaly_alerts_chart(anomalies_df)
|
| 663 |
|
| 664 |
preview_lines = ["Step 2: Log Preview (First 5 Rows)"]
|
| 665 |
+
for idx, row in filtered_df.head(5).iterrows():
|
| 666 |
preview_lines.append(
|
| 667 |
f"Row {idx + 1}: Device ID: {row['device_id']}, "
|
| 668 |
f"Log Type: {row['log_type']}, Status: {row['status']}, "
|
|
|
|
| 672 |
preview = "\n".join(preview_lines)
|
| 673 |
|
| 674 |
# Save to Salesforce in the backend
|
| 675 |
+
save_to_salesforce(filtered_df, reminders_df, summary, anomalies, amc_reminders, insights)
|
| 676 |
+
|
| 677 |
+
# Generate PDF with updated content
|
| 678 |
+
pdf_file = generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart)
|
| 679 |
|
| 680 |
elapsed_time = (datetime.now() - start_time).total_seconds()
|
| 681 |
+
logging.info(f"Processing completed in {elapsed_time:.2f} seconds")
|
| 682 |
+
return summary, preview, usage_chart, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, anomalies, amc_reminders, insights, pdf_file
|
| 683 |
except Exception as e:
|
| 684 |
logging.error(f"Failed to process file: {str(e)}")
|
| 685 |
+
return f"Error: {str(e)}", None, None, '<p>Error processing data.</p>', None, None, None, None, None, None, None
|
| 686 |
|
| 687 |
# Gradio Interface
|
| 688 |
try:
|
|
|
|
| 701 |
with gr.Row():
|
| 702 |
with gr.Column(scale=1):
|
| 703 |
file_input = gr.File(label="Upload Logs (CSV)", file_types=[".csv"])
|
| 704 |
+
|
| 705 |
+
# Filters
|
| 706 |
+
with gr.Group():
|
| 707 |
+
gr.Markdown("### Filters")
|
| 708 |
+
# Lab site filter (assuming lab_site column exists)
|
| 709 |
+
lab_site_options = ['All'] # Default option
|
| 710 |
+
# Equipment type filter (assuming equipment_type column exists)
|
| 711 |
+
equipment_type_options = ['All']
|
| 712 |
+
# We'll populate these dynamically if possible, but for now, assume static options
|
| 713 |
+
lab_site_filter = gr.Dropdown(
|
| 714 |
+
label="Lab Site",
|
| 715 |
+
choices=lab_site_options,
|
| 716 |
+
value='All'
|
| 717 |
+
)
|
| 718 |
+
equipment_type_filter = gr.Dropdown(
|
| 719 |
+
label="Equipment Type",
|
| 720 |
+
choices=equipment_type_options,
|
| 721 |
+
value='All'
|
| 722 |
+
)
|
| 723 |
+
date_range_filter = gr.Slider(
|
| 724 |
+
label="Date Range (Days from Today)",
|
| 725 |
+
minimum=-365,
|
| 726 |
+
maximum=0,
|
| 727 |
+
step=1,
|
| 728 |
+
value=[-30, 0], # Default: last 30 days
|
| 729 |
+
info="Select the range of days relative to today (e.g., -30 to 0 for the last 30 days)."
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
submit_button = gr.Button("Analyze", variant="primary")
|
| 733 |
|
| 734 |
with gr.Column(scale=2):
|
|
|
|
| 744 |
preview_output = gr.Markdown()
|
| 745 |
|
| 746 |
with gr.Group(elem_classes="dashboard-section"):
|
| 747 |
+
gr.Markdown("### Device Cards")
|
| 748 |
+
device_cards_output = gr.HTML()
|
| 749 |
+
|
| 750 |
+
with gr.Group(elem_classes="dashboard-section"):
|
| 751 |
+
gr.Markdown("### Charts")
|
| 752 |
+
with gr.Tab("Usage Hours per Device"):
|
| 753 |
+
usage_chart_output = gr.Plot()
|
| 754 |
+
with gr.Tab("Daily Log Trends"):
|
| 755 |
+
daily_log_trends_output = gr.Plot()
|
| 756 |
+
with gr.Tab("Weekly Uptime Percentage"):
|
| 757 |
+
weekly_uptime_output = gr.Plot()
|
| 758 |
+
with gr.Tab("Anomaly Alerts"):
|
| 759 |
+
anomaly_alerts_output = gr.Plot()
|
| 760 |
|
| 761 |
with gr.Group(elem_classes="dashboard-section"):
|
| 762 |
gr.Markdown("### Step 4: Anomaly Detection")
|
|
|
|
| 771 |
insights_output = gr.Markdown()
|
| 772 |
|
| 773 |
with gr.Group(elem_classes="dashboard-section"):
|
| 774 |
+
gr.Markdown("### Export Report")
|
| 775 |
pdf_output = gr.File(label="Download Analysis Report as PDF")
|
| 776 |
|
| 777 |
+
# Dynamically update filter options (if columns exist in the uploaded CSV)
|
| 778 |
+
def update_filters(file_obj):
|
| 779 |
+
if not file_obj:
|
| 780 |
+
return ['All'], ['All']
|
| 781 |
+
try:
|
| 782 |
+
df = pd.read_csv(file_obj)
|
| 783 |
+
lab_site_options = ['All'] + (df['lab_site'].dropna().unique().tolist() if 'lab_site' in df.columns else [])
|
| 784 |
+
equipment_type_options = ['All'] + (df['equipment_type'].dropna().unique().tolist() if 'equipment_type' in df.columns else [])
|
| 785 |
+
return lab_site_options, equipment_type_options
|
| 786 |
+
except Exception as e:
|
| 787 |
+
logging.error(f"Failed to update filters: {str(e)}")
|
| 788 |
+
return ['All'], ['All']
|
| 789 |
+
|
| 790 |
+
file_input.change(
|
| 791 |
+
fn=update_filters,
|
| 792 |
+
inputs=[file_input],
|
| 793 |
+
outputs=[lab_site_filter, equipment_type_filter]
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
submit_button.click(
|
| 797 |
fn=process_logs,
|
| 798 |
+
inputs=[
|
| 799 |
+
file_input,
|
| 800 |
+
lab_site_filter,
|
| 801 |
+
equipment_type_filter,
|
| 802 |
+
date_range_filter
|
| 803 |
+
],
|
| 804 |
outputs=[
|
| 805 |
summary_output,
|
| 806 |
preview_output,
|
| 807 |
+
usage_chart_output,
|
| 808 |
+
device_cards_output,
|
| 809 |
+
daily_log_trends_output,
|
| 810 |
+
weekly_uptime_output,
|
| 811 |
+
anomaly_alerts_output,
|
| 812 |
anomaly_output,
|
| 813 |
amc_output,
|
| 814 |
insights_output,
|