# Load necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay import warnings warnings.filterwarnings("ignore") df = pd.read_csv("pre_deployment_mental_health_dataset_balanced.csv") df.head() print(f"Shape of dataset: {df.shape}") print("\nData types:") print(df.dtypes) print("\nMissing values:") print(df.isnull().sum()) df.describe(include='all') num_cols = df.select_dtypes(include=['int64', 'float64']).columns cat_cols = df.select_dtypes(include='object').columns.drop('Soldier_ID') for col in num_cols: plt.figure(figsize=(6, 4)) sns.histplot(df[col], kde=True) plt.title(f'Distribution of {col}') plt.show() for col in cat_cols: plt.figure(figsize=(6, 4)) sns.countplot(data=df, x=col) plt.title(f'Counts of {col}') plt.xticks(rotation=45) plt.show() df_cleaned = df.drop(columns=['Soldier_ID']) # Outlier treatment for col in ['Anxiety_Score', 'Depression_Score', 'Stress_Level']: upper = df_cleaned[col].quantile(0.99) df_cleaned[col] = np.where(df_cleaned[col] > upper, upper, df_cleaned[col]) encoded_df = df_cleaned.copy() label_encoders = {} for col in encoded_df.select_dtypes(include='object').columns: le = LabelEncoder() encoded_df[col] = le.fit_transform(encoded_df[col]) label_encoders[col] = le plt.figure(figsize=(12, 8)) sns.heatmap(encoded_df.corr(), annot=True, cmap='coolwarm', fmt='.2f') plt.title('Correlation Matrix') plt.show() X = encoded_df.drop(columns=['Risk_Level']) y = encoded_df['Risk_Level'] X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2, random_state=42) models = { 'Random Forest': RandomForestClassifier(random_state=42), 'Logistic Regression': LogisticRegression(max_iter=1000), 'Gradient Boosting': GradientBoostingClassifier(random_state=42) } for name, model in models.items(): model.fit(X_train, y_train) y_pred = model.predict(X_test) print(f"\n{name} Classification Report:\n") print(classification_report(y_test, y_pred)) # Hyperparameter tuning with multiple models from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler model_configs = { "Logistic Regression": { "model": LogisticRegression(max_iter=1000), "params": {"model__C": [0.01, 0.1, 1, 10], "model__solver": ["liblinear", "lbfgs"]} }, "Random Forest": { "model": RandomForestClassifier(random_state=42), "params": {"model__n_estimators": [100, 200], "model__max_depth": [None, 10, 20]} }, "Gradient Boosting": { "model": GradientBoostingClassifier(random_state=42), "params": {"model__n_estimators": [100, 200], "model__learning_rate": [0.05, 0.1], "model__max_depth": [3, 5]} }, "SVM": { "model": SVC(probability=True), "params": {"model__C": [0.1, 1, 10], "model__kernel": ["linear", "rbf"]} }, "K-Nearest Neighbors": { "model": KNeighborsClassifier(), "params": {"model__n_neighbors": [3, 5, 7], "model__weights": ["uniform", "distance"]} }, "Naive Bayes": { "model": GaussianNB(), "params": {} } } best_models = {} for name, config in model_configs.items(): pipe = Pipeline([("scaler", StandardScaler()), ("model", config["model"])]) grid = GridSearchCV(pipe, config["params"], cv=5, scoring="f1_macro", n_jobs=-1) grid.fit(X_train, y_train) best_models[name] = {"best_estimator": grid.best_estimator_, "best_score": grid.best_score_} best_model_name = max(best_models, key=lambda name: best_models[name]['best_score']) print(f"\nBest Model: {best_model_name} — F1 Score: {best_models[best_model_name]['best_score']:.4f}") best_model = LogisticRegression(max_iter=1000) best_model.fit(X_train, y_train) y_pred = best_model.predict(X_test) ConfusionMatrixDisplay.from_estimator(best_model, X_test, y_test, cmap='Blues') plt.title('Confusion Matrix - Logistic Regression') plt.show() import pandas as pd import numpy as np import gradio as gr from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # Load the dataset df = pd.read_csv("pre_deployment_mental_health_dataset_balanced.csv") df_clean = df.drop(columns=["Soldier_ID"]) # Encode categorical variables label_encoders = {} for col in df_clean.select_dtypes(include='object').columns: le = LabelEncoder() df_clean[col] = le.fit_transform(df_clean[col]) label_encoders[col] = le # Define features and target X = df_clean.drop("Risk_Level", axis=1) y = df_clean["Risk_Level"] # Split and train model X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42) model = LogisticRegression(max_iter=1000) model.fit(X_train, y_train) # Create test profile dropdown (first 5 samples) sample_data = df.head(5).set_index("Soldier_ID").drop(columns=["Risk_Level"]) # Define prediction function def predict_risk(*args): input_dict = dict(zip(X.columns, args)) # Encode string values before passing to model for col in input_dict: if col in label_encoders: val = input_dict[col] # If value is not already a known label, skip encoding if val not in label_encoders[col].classes_: # Decode the most frequent value instead fallback = label_encoders[col].classes_[0] input_dict[col] = label_encoders[col].transform([fallback])[0] else: input_dict[col] = label_encoders[col].transform([val])[0] input_df = pd.DataFrame([input_dict]) pred = model.predict(input_df)[0] risk_label = label_encoders["Risk_Level"].inverse_transform([pred])[0] return f"🔎 Predicted Mental Health Risk: **{risk_label}**" def autofill(soldier_id): row = sample_data.loc[soldier_id].copy() return row.tolist() # Create Gradio interface with gr.Blocks(title="Military Mental Health Risk Predictor") as demo: gr.Markdown("# 🧠 Mental Health Risk Predictor (Military)") gr.Markdown("Predict a soldier's mental health risk using full or quick input modes.") with gr.Tabs(): # ----------------- Tab 1: Full Input Mode ----------------- with gr.Tab("Full Input Mode"): gr.Markdown("### 📋 Full Feature Input") dropdown = gr.Dropdown(choices=list(sample_data.index), label="Auto-Fill From Soldier ID") autofill_btn = gr.Button("Auto-Fill") # Original full input fields age = gr.Slider(20, 60, step=1, label="Age") gender = gr.Radio(["Male", "Female"], label="Gender") rank = gr.Dropdown(["E1", "E2", "E3", "E4", "E5", "O1", "O2", "O3"], label="Rank") service_years = gr.Slider(1, 25, step=1, label="Service Years") deployment_count = gr.Slider(0, 10, step=1, label="Deployment Count") mental_issues = gr.Radio(["Yes", "No"], label="Previous Mental Health Issues") sleep = gr.Slider(0.0, 10.0, step=0.1, label="Sleep Hours") stress = gr.Slider(0, 10, step=1, label="Stress Level") anxiety = gr.Slider(0, 21, step=1, label="Anxiety Score") depression = gr.Slider(0, 27, step=1, label="Depression Score") fitness = gr.Slider(1.0, 10.0, step=0.1, label="Physical Fitness Score") support = gr.Slider(1.0, 10.0, step=0.1, label="Social Support Score") substance_use = gr.Radio(["Yes", "No"], label="Substance Use") combat_intensity = gr.Radio(["Low", "Moderate", "High"], label="Combat Training Intensity") family_status = gr.Radio(["Single", "Married", "Divorced", "Engaged"], label="Family Status") output_full = gr.Textbox(label="Prediction", interactive=False) all_inputs = [ age, gender, rank, service_years, deployment_count, mental_issues, sleep, stress, anxiety, depression, fitness, support, substance_use, combat_intensity, family_status ] gr.Button("Submit").click( fn=predict_risk, inputs=all_inputs, outputs=output_full ) autofill_btn.click( fn=autofill, inputs=dropdown, outputs=all_inputs ) gr.Markdown("⚠️ Tip: Use the dropdown to quickly fill test data.") gr.Markdown(""" ### 🟢🟠🔴 Risk Level Legend - 🟢 **Low Risk**: Soldier shows strong mental and physical indicators. Fit for deployment without concern. - 🟠 **Moderate Risk**: Some factors suggest reduced resilience or emerging stress. Monitor or support as needed. - 🔴 **High Risk**: Significant signs of mental strain. Recommend further evaluation or support intervention. """) # ----------------- Tab 2: Quick Input Mode ----------------- with gr.Tab("Quick Input Mode"): gr.Markdown("### ⚡ Quick Mode (Important Features Only)") stress_q = gr.Slider(0, 10, step=1, label="Stress Level") anxiety_q = gr.Slider(0, 21, step=1, label="Anxiety Score") depression_q = gr.Slider(0, 27, step=1, label="Depression Score") fitness_q = gr.Slider(1.0, 10.0, step=0.1, label="Physical Fitness Score") support_q = gr.Slider(1.0, 10.0, step=0.1, label="Social Support Score") sleep_q = gr.Slider(0.0, 10.0, step=0.1, label="Sleep Hours") service_q = gr.Slider(1, 25, step=1, label="Service Years") quick_inputs = [stress_q, anxiety_q, depression_q, fitness_q, support_q, sleep_q, service_q] output_quick = gr.Textbox(label="Prediction", interactive=False) def predict_quick(stress, anxiety, depression, fitness, support, sleep, service): input_dict = { "Stress_Level": stress, "Anxiety_Score": anxiety, "Depression_Score": depression, "Physical_Fitness_Score": fitness, "Social_Support_Score": support, "Sleep_Hours": sleep, "Service_Years": service } # Fill missing fields for col in X.columns: if col not in input_dict: if col in label_encoders: mode_encoded = X[col].mode()[0] input_dict[col] = label_encoders[col].inverse_transform([mode_encoded])[0] else: input_dict[col] = X[col].median() # ENSURE THE ORDER MATCHES X.columns ordered_values = [input_dict[col] for col in X.columns] return predict_risk(*ordered_values) gr.Button("Submit").click( fn=predict_quick, inputs=quick_inputs, outputs=output_quick ) gr.Markdown(""" ### 🟢🟠🔴 Risk Level Legend - 🟢 **Low Risk**: Soldier shows strong mental and physical indicators. Fit for deployment without concern. - 🟠 **Moderate Risk**: Some factors suggest reduced resilience or emerging stress. Monitor or support as needed. - 🔴 **High Risk**: Significant signs of mental strain. Recommend further evaluation or support intervention. """) demo.launch()