Spaces:
Sleeping
Sleeping
| ## Batch Evaluation Code | |
| ##Import Libraries | |
| import pandas as pd | |
| import json | |
| from joblib import load | |
| import gradio as gr | |
| from sklearn.metrics import accuracy_score, classification_report | |
| ## Load Model | |
| model = load("rf_activity_model.pkl") # ensure uploaded to HF Space | |
| # Load from code(A–S) to int mapping | |
| with open("label_map.json", "r") as f: | |
| label_to_int = json.load(f) | |
| ## Reverse mapping from integer to code(A-S) | |
| int_to_label = {v: k for k, v in label_to_int.items()} | |
| # Load json containing activity name mapping (A–S) to the layman activity | |
| with open("activity_names.json", "r") as f: | |
| activity_names = json.load(f) | |
| # Risk map of A-S to risk tier for alerting logic | |
| risk_map = { | |
| "A": "Medium", "B": "High", "C": "High", "D": "Low", "E": "Medium", | |
| "F": "Low", "G": "Low", "H": "Medium", "I": "Medium", "J": "Medium", | |
| "K": "Medium", "L": "Medium", "M": "High", "O": "High", "P": "High", | |
| "Q": "Low", "R": "Low", "S": "Low" | |
| } | |
| # Threshold for triggering alert in samples | |
| HIGH_THRESHOLD = 30 | |
| MEDIUM_THRESHOLD = 180 | |
| LOW_THRESHOLD = 90 | |
| def analyze_csv(file): | |
| ## Read CSV | |
| df = pd.read_csv(file) | |
| ## Drop activity column, only taking the columns which are the features for inference | |
| X = df.drop(columns=["ACTIVITY"]) | |
| preds_int = model.predict(X) | |
| ## Convert integers to code(A-S) | |
| preds_code = [int_to_label[i] for i in preds_int] | |
| ## Convert code to human readable activity names | |
| preds_activity = [activity_names[c] for c in preds_code] | |
| ## Convert codes to risk tiers(Low, Medium, High) | |
| preds_risk = [risk_map[c] for c in preds_code] | |
| ## Build the result table for the user interface, showing the predicted activity, risk and the actual activity | |
| results_df = pd.DataFrame({ | |
| #"pred_code": preds_code, | |
| "Predicted_Activity": preds_activity, | |
| "Risk Level": preds_risk | |
| }) | |
| ## Default message if the CSV lacks ground-truth label("ACTIVITY") | |
| metrics_text = "⚠️ Ground truth labels not found in CSV" | |
| ## If CSV contain the ground-truth label("ACTIVITY") | |
| if "ACTIVITY" in df.columns: | |
| ## Convert Ground Truth integer to code (A-S) | |
| actual_codes = df["ACTIVITY"].map(int_to_label) | |
| ## Convert codes to human friendly readable activity names | |
| actual_names = actual_codes.map(activity_names) | |
| ## Add ground-truth activity for side-by-side viewing | |
| results_df["Actual_Activity"] = actual_names | |
| # Checks the model's accuracy and generates a detailed report for each activity class | |
| acc = accuracy_score(actual_codes, preds_code) | |
| report = classification_report(actual_codes, preds_code, zero_division=0) | |
| metrics_text = f"✅ Accuracy: {acc:.3f}\n\nClassification Report:\n{report}" | |
| ## Tracks consecutive counts per risk | |
| high_count = medium_count = low_count = 0 | |
| ## Collect alert messages for display | |
| alerts = [] | |
| ##Iterate through risk predictions in order to simulate timeline | |
| for i, risk in enumerate(preds_risk): | |
| if risk == "High": | |
| ## If high count exceeds the high threshold, print alert | |
| high_count += 1; medium_count = 0; low_count = 0 | |
| if high_count >= HIGH_THRESHOLD: | |
| alerts.append(f"⚠️ ALERT: Prolonged HIGH-RISK activity at index {i}") | |
| high_count = 0 | |
| elif risk == "Medium": | |
| ## If medium count exceeds the medium threshold, print alert | |
| medium_count += 1; high_count = 0; low_count = 0 | |
| if medium_count >= MEDIUM_THRESHOLD: | |
| alerts.append(f"⚠️ ALERT: Prolonged MEDIUM-RISK activity at index {i}") | |
| medium_count = 0 | |
| elif risk == "Low": | |
| ## If low count exceeds the low threshold, print alert | |
| low_count += 1; high_count = 0; medium_count = 0 | |
| if low_count >= LOW_THRESHOLD: | |
| alerts.append(f"⚠️ ALERT: Prolonged LOW-RISK activity at index {i}") | |
| low_count = 0 | |
| # Return table, metrics summary, and alerts, or no alert message | |
| return results_df, metrics_text, "\n".join(alerts) if alerts else "✅ No alerts triggered." | |
| ## Gradio User Interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## Healthcare Activity Recognition") | |
| gr.Markdown("Upload a CSV of sensor features. The model predicts activities, assigns risk levels, " | |
| "and compares them to actuals if provided. Metrics (accuracy + classification report) are also displayed.") | |
| with gr.Row(): | |
| ##Row to input the file | |
| file_input = gr.File(type="filepath", file_types=[".csv"], label="Upload CSV") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| results_df = gr.Dataframe( | |
| headers=["Predicted_Activity", "Risk Level", "Actual_Activity"], | |
| label="Predictions vs Actuals", | |
| wrap=True, | |
| ##This is for fixing only 3 columns for consistency | |
| datatype=["str", "str", "str"], | |
| col_count=(3, "fixed"), | |
| interactive=False | |
| ) | |
| with gr.Column(scale=1): | |
| ##Show accuracy and classification report | |
| metrics_box = gr.Textbox(label="Metrics", lines=8) | |
| ##Shows alert list or “No alerts” | |
| alerts_box = gr.Textbox(label="Alerts", lines=6) | |
| # When a file is uploaded, run analyze_csv() and show results in the table, metrics, and alerts | |
| file_input.change( | |
| fn=analyze_csv, | |
| inputs=file_input, | |
| outputs=[results_df, metrics_box, alerts_box] | |
| ) | |
| ##Start the Gradio app if this script is the main entry point | |
| if __name__ == "__main__": | |
| demo.launch() | |