HBIQ commited on
Commit
b8db789
·
verified ·
1 Parent(s): b200803

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +206 -0
app.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Load necessary libraries
2
+ import pandas as pd
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+ from sklearn.preprocessing import LabelEncoder
7
+ from sklearn.model_selection import train_test_split
8
+ from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
9
+ from sklearn.linear_model import LogisticRegression
10
+ from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay
11
+ import warnings
12
+ warnings.filterwarnings("ignore")
13
+
14
+ df = pd.read_csv("/Users/hassan/Desktop/pre_deployment_mental_health_dataset_balanced.csv")
15
+ df.head()
16
+
17
+ print(f"Shape of dataset: {df.shape}")
18
+ print("\nData types:")
19
+ print(df.dtypes)
20
+ print("\nMissing values:")
21
+ print(df.isnull().sum())
22
+ df.describe(include='all')
23
+
24
+ num_cols = df.select_dtypes(include=['int64', 'float64']).columns
25
+ cat_cols = df.select_dtypes(include='object').columns.drop('Soldier_ID')
26
+
27
+ for col in num_cols:
28
+ plt.figure(figsize=(6, 4))
29
+ sns.histplot(df[col], kde=True)
30
+ plt.title(f'Distribution of {col}')
31
+ plt.show()
32
+
33
+ for col in cat_cols:
34
+ plt.figure(figsize=(6, 4))
35
+ sns.countplot(data=df, x=col)
36
+ plt.title(f'Counts of {col}')
37
+ plt.xticks(rotation=45)
38
+ plt.show()
39
+
40
+ df_cleaned = df.drop(columns=['Soldier_ID'])
41
+
42
+ # Outlier treatment
43
+ for col in ['Anxiety_Score', 'Depression_Score', 'Stress_Level']:
44
+ upper = df_cleaned[col].quantile(0.99)
45
+ df_cleaned[col] = np.where(df_cleaned[col] > upper, upper, df_cleaned[col])
46
+
47
+ encoded_df = df_cleaned.copy()
48
+ label_encoders = {}
49
+ for col in encoded_df.select_dtypes(include='object').columns:
50
+ le = LabelEncoder()
51
+ encoded_df[col] = le.fit_transform(encoded_df[col])
52
+ label_encoders[col] = le
53
+
54
+ plt.figure(figsize=(12, 8))
55
+ sns.heatmap(encoded_df.corr(), annot=True, cmap='coolwarm', fmt='.2f')
56
+ plt.title('Correlation Matrix')
57
+ plt.show()
58
+
59
+ X = encoded_df.drop(columns=['Risk_Level'])
60
+ y = encoded_df['Risk_Level']
61
+ X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2, random_state=42)
62
+
63
+ models = {
64
+ 'Random Forest': RandomForestClassifier(random_state=42),
65
+ 'Logistic Regression': LogisticRegression(max_iter=1000),
66
+ 'Gradient Boosting': GradientBoostingClassifier(random_state=42)
67
+ }
68
+
69
+ for name, model in models.items():
70
+ model.fit(X_train, y_train)
71
+ y_pred = model.predict(X_test)
72
+ print(f"\n{name} Classification Report:\n")
73
+ print(classification_report(y_test, y_pred))
74
+
75
+ # Hyperparameter tuning with multiple models
76
+ from sklearn.linear_model import LogisticRegression
77
+ from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
78
+ from sklearn.svm import SVC
79
+ from sklearn.naive_bayes import GaussianNB
80
+ from sklearn.neighbors import KNeighborsClassifier
81
+ from sklearn.model_selection import GridSearchCV
82
+ from sklearn.pipeline import Pipeline
83
+ from sklearn.preprocessing import StandardScaler
84
+
85
+ model_configs = {
86
+ "Logistic Regression": {
87
+ "model": LogisticRegression(max_iter=1000),
88
+ "params": {"model__C": [0.01, 0.1, 1, 10], "model__solver": ["liblinear", "lbfgs"]}
89
+ },
90
+ "Random Forest": {
91
+ "model": RandomForestClassifier(random_state=42),
92
+ "params": {"model__n_estimators": [100, 200], "model__max_depth": [None, 10, 20]}
93
+ },
94
+ "Gradient Boosting": {
95
+ "model": GradientBoostingClassifier(random_state=42),
96
+ "params": {"model__n_estimators": [100, 200], "model__learning_rate": [0.05, 0.1], "model__max_depth": [3, 5]}
97
+ },
98
+ "SVM": {
99
+ "model": SVC(probability=True),
100
+ "params": {"model__C": [0.1, 1, 10], "model__kernel": ["linear", "rbf"]}
101
+ },
102
+ "K-Nearest Neighbors": {
103
+ "model": KNeighborsClassifier(),
104
+ "params": {"model__n_neighbors": [3, 5, 7], "model__weights": ["uniform", "distance"]}
105
+ },
106
+ "Naive Bayes": {
107
+ "model": GaussianNB(),
108
+ "params": {}
109
+ }
110
+ }
111
+
112
+ best_models = {}
113
+ for name, config in model_configs.items():
114
+ pipe = Pipeline([("scaler", StandardScaler()), ("model", config["model"])])
115
+ grid = GridSearchCV(pipe, config["params"], cv=5, scoring="f1_macro", n_jobs=-1)
116
+ grid.fit(X_train, y_train)
117
+ best_models[name] = {"best_estimator": grid.best_estimator_, "best_score": grid.best_score_}
118
+
119
+ best_model_name = max(best_models, key=lambda name: best_models[name]['best_score'])
120
+ print(f"\nBest Model: {best_model_name} — F1 Score: {best_models[best_model_name]['best_score']:.4f}")
121
+
122
+ best_model = LogisticRegression(max_iter=1000)
123
+ best_model.fit(X_train, y_train)
124
+ y_pred = best_model.predict(X_test)
125
+ ConfusionMatrixDisplay.from_estimator(best_model, X_test, y_test, cmap='Blues')
126
+ plt.title('Confusion Matrix - Logistic Regression')
127
+ plt.show()
128
+
129
+ import pandas as pd
130
+ import numpy as np
131
+ import gradio as gr
132
+ from sklearn.linear_model import LogisticRegression
133
+ from sklearn.model_selection import train_test_split
134
+ from sklearn.preprocessing import LabelEncoder
135
+
136
+ # Load the dataset
137
+ df_clean = df.drop(columns=["Soldier_ID"])
138
+
139
+ # Encode categorical variables
140
+ label_encoders = {}
141
+ for col in df_clean.select_dtypes(include='object').columns:
142
+ le = LabelEncoder()
143
+ df_clean[col] = le.fit_transform(df_clean[col])
144
+ label_encoders[col] = le
145
+
146
+ # Define features and target
147
+ X = df_clean.drop("Risk_Level", axis=1)
148
+ y = df_clean["Risk_Level"]
149
+
150
+ # Split and train model
151
+ X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
152
+ model = LogisticRegression(max_iter=1000)
153
+ model.fit(X_train, y_train)
154
+
155
+ # Gradio prediction function
156
+ def predict_risk(age, gender, rank, service_years, deployment_count, mental_issues, sleep, stress, anxiety, depression, fitness, support, substance_use, combat_intensity, family_status):
157
+ input_df = pd.DataFrame([{
158
+ "Age": age,
159
+ "Gender": label_encoders["Gender"].transform([gender])[0],
160
+ "Rank": label_encoders["Rank"].transform([rank])[0],
161
+ "Service_Years": service_years,
162
+ "Deployment_Count": deployment_count,
163
+ "Previous_Mental_Health_Issues": label_encoders["Previous_Mental_Health_Issues"].transform([mental_issues])[0],
164
+ "Sleep_Hours": sleep,
165
+ "Stress_Level": stress,
166
+ "Anxiety_Score": anxiety,
167
+ "Depression_Score": depression,
168
+ "Physical_Fitness_Score": fitness,
169
+ "Social_Support_Score": support,
170
+ "Substance_Use": label_encoders["Substance_Use"].transform([substance_use])[0],
171
+ "Combat_Training_Intensity": label_encoders["Combat_Training_Intensity"].transform([combat_intensity])[0],
172
+ "Family_Status": label_encoders["Family_Status"].transform([family_status])[0]
173
+ }])
174
+
175
+ pred = model.predict(input_df)[0]
176
+ risk_label = label_encoders["Risk_Level"].inverse_transform([pred])[0]
177
+ return f"🔎 Predicted Mental Health Risk: **{risk_label}**"
178
+
179
+ # Gradio Interface
180
+ demo = gr.Interface(
181
+ fn=predict_risk,
182
+ inputs=[
183
+ gr.Slider(20, 60, step=1, label="Age"),
184
+ gr.Radio(["Male", "Female"], label="Gender"),
185
+ gr.Dropdown(["E1", "E2", "E3", "E4", "E5", "O1", "O2", "O3"], label="Rank"),
186
+ gr.Slider(1, 25, step=1, label="Service Years"),
187
+ gr.Slider(0, 10, step=1, label="Deployment Count"),
188
+ gr.Radio(["Yes", "No"], label="Previous Mental Health Issues"),
189
+ gr.Slider(0.0, 10.0, step=0.1, label="Sleep Hours"),
190
+ gr.Slider(0, 10, step=1, label="Stress Level"),
191
+ gr.Slider(0, 21, step=1, label="Anxiety Score"),
192
+ gr.Slider(0, 27, step=1, label="Depression Score"),
193
+ gr.Slider(1.0, 10.0, step=0.1, label="Physical Fitness Score"),
194
+ gr.Slider(1.0, 10.0, step=0.1, label="Social Support Score"),
195
+ gr.Radio(["Yes", "No"], label="Substance Use"),
196
+ gr.Radio(["Low", "Moderate", "High"], label="Combat Training Intensity"),
197
+ gr.Radio(["Single", "Married", "Divorced", "Engaged"], label="Family Status")
198
+ ],
199
+ outputs="text",
200
+ title="🧠 Mental Health Risk Predictor (Military)",
201
+ description="Enter a soldier's pre-deployment details to estimate mental health risk level using logistic regression."
202
+ )
203
+
204
+ # Run the app
205
+ demo.launch()
206
+