Rahul-Samedavar commited on
Commit
aabd335
·
1 Parent(s): c4dd78c

Track model file with Git LFS

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.keras filter=lfs diff=lfs merge=lfs -text
Weights/dropout_predictor.h5 → Models/model.keras RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d930d79a4c867d3ef823fa6c212c7e9faee5472b2410ac8f6c26e3e3f9eaeb7c
3
- size 213000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2215cea53fda3f6f615f50c4748b2ab7bcc758807cd980f2378d532877213d2b
3
+ size 220300
app.py CHANGED
@@ -3,111 +3,69 @@ import numpy as np
3
  import tensorflow as tf
4
  import pickle
5
 
6
- from tensorflow.keras import layers, models
7
-
8
  app = Flask(__name__)
9
 
10
- model = models.Sequential([
11
- layers.Input(shape=(9,)),
12
- layers.Dense(128, activation='relu'),
13
- layers.BatchNormalization(),
14
- layers.Dropout(0.3),
15
- layers.Dense(64, activation='relu'),
16
- layers.BatchNormalization(),
17
- layers.Dropout(0.2),
18
- layers.Dense(32, activation='relu'),
19
- layers.Dense(1, activation='sigmoid') # Output: probability of dropout
20
- ])
21
-
22
- model.load_weights(r"Weights/dropout_predictor.h5")
23
-
24
  with open("scaler.pkl", "rb") as f:
25
  scaler = pickle.load(f)
26
 
27
- gender_map = {"Male": 0, "Female": 1}
28
- extracurricular_map = {"No": 0, "Yes": 1}
 
29
 
30
- expected_features = [
31
- "Current_Sem", "Gender", "Extracurricular",
32
- "SGPA_Sem_1", "SGPA_Sem_2", "SGPA_Sem_3", "SGPA_Sem_4", "SGPA_Sem_5", "SGPA_Sem_6",
33
- "Attendance_Sem_1", "Attendance_Sem_2", "Attendance_Sem_3",
34
- "Attendance_Sem_4", "Attendance_Sem_5", "Attendance_Sem_6"
35
- ]
36
 
37
- from flask import request, jsonify
38
- import numpy as np
39
 
40
  @app.route("/predict", methods=["POST"])
41
  def predict():
42
  try:
43
  data = request.get_json()
44
-
45
  if not isinstance(data, list):
46
- return jsonify({"error": "Expected a list of student records"}), 400
47
-
48
- response_data = []
49
-
50
- for student in data:
51
- try:
52
- name = student["name"]
53
- usn = student["usn"]
54
- gender_str = student["gender"]
55
- extra_raw = student.get("extracurricular", 0)
56
- current_sem = student["current_sem"]
57
- semesters = student["semesters"]
58
-
59
- # Normalize gender
60
- gender = gender_map.get(gender_str.lower(), 0)
61
-
62
- # Normalize extracurricular (out of 10)
63
- extra = float(extra_raw) / 10 if extra_raw else 0.0
64
-
65
- # Aggregate SGPA and attendance per semester
66
- sgpas = []
67
- atts = []
68
-
69
- for sem in range(1, 7): # Semesters 1 to 6
70
- sem_data = next((s for s in semesters if s["semester_number"] == sem), None)
71
-
72
- if sem_data:
73
- subjects = sem_data.get("subjects", [])
74
- marks = [sub["marks"] / 100 for sub in subjects if "marks" in sub]
75
- attendance = [sub["attendance"] / 100 for sub in subjects if "attendance" in sub]
76
-
77
- sgpa = np.mean(marks) if marks else 0.0
78
- att = np.mean(attendance) if attendance else 0.0
79
- else:
80
- sgpa = 0.0
81
- att = 0.0
82
-
83
- sgpas.append(sgpa)
84
- atts.append(att)
85
-
86
- # Build feature vector
87
- features = [current_sem, gender, extra] + sgpas + atts
88
- X = scaler.transform([features])
89
- prob = float(model.predict(X).flatten()[0])
90
-
91
- # Enrich student JSON
92
- enriched_student = {
93
- **student,
94
- "sgpa_per_semester": sgpas,
95
- "attendance_per_semester": atts,
96
- "risk": prob
97
- }
98
-
99
- response_data.append(enriched_student)
100
-
101
- except Exception as student_error:
102
- response_data.append({
103
- "error": f"Error processing student {student.get('usn', 'unknown')}: {str(student_error)}"
104
- })
105
-
106
- return jsonify(response_data)
107
 
108
  except Exception as e:
109
  return jsonify({"error": str(e)}), 500
110
 
111
-
112
  if __name__ == "__main__":
113
  app.run(debug=True)
 
3
  import tensorflow as tf
4
  import pickle
5
 
 
 
6
  app = Flask(__name__)
7
 
8
+ # Load model and scaler
9
+ model = tf.keras.models.load_model(r"Models/model.keras")
 
 
 
 
 
 
 
 
 
 
 
 
10
  with open("scaler.pkl", "rb") as f:
11
  scaler = pickle.load(f)
12
 
13
+ # LabelEncoder mapping (as used during training)
14
+ gender_map = {"Male": 1, "Female": 0}
15
+ extra_map = {"Yes": 1, "No": 0}
16
 
17
+ # Expected input fields
18
+ semesters = range(1, 7)
19
+ base_features = ["Current_Sem", "Gender", "Extracurricular"]
20
+ sgpa_fields = [f"SGPA_Sem_{i}" for i in semesters]
21
+ att_fields = [f"Attendance_Sem_{i}" for i in semesters]
22
+ mask_fields = [f"{col}_mask" for col in sgpa_fields + att_fields]
23
 
24
+ input_order = base_features + sgpa_fields + att_fields + mask_fields
 
25
 
26
  @app.route("/predict", methods=["POST"])
27
  def predict():
28
  try:
29
  data = request.get_json()
 
30
  if not isinstance(data, list):
31
+ return jsonify({"error": "Expected a list of input records"}), 400
32
+
33
+ processed = []
34
+ for record in data:
35
+ # Validate required base fields
36
+ for field in base_features:
37
+ if field not in record:
38
+ return jsonify({"error": f"Missing required field: {field}"}), 400
39
+
40
+ # Encode categorical fields
41
+ gender = gender_map.get(record["Gender"], 0)
42
+ extra = extra_map.get(record["Extracurricular"], 0)
43
+
44
+ # Handle SGPA and Attendance with masks
45
+ sgpas = []
46
+ s_masks = []
47
+ for field in sgpa_fields:
48
+ val = record.get(field, None)
49
+ sgpas.append(float(val) if val is not None else 0.0)
50
+ s_masks.append(1 if val is not None else 0)
51
+
52
+ atts = []
53
+ a_masks = []
54
+ for field in att_fields:
55
+ val = record.get(field, None)
56
+ atts.append(float(val) if val is not None else 0.0)
57
+ a_masks.append(1 if val is not None else 0)
58
+
59
+ row = [record["Current_Sem"], gender, extra] + sgpas + atts + s_masks + a_masks
60
+ processed.append(row)
61
+
62
+ # Scale inputs
63
+ X = scaler.transform(np.array(processed))
64
+ probs = model.predict(X).flatten()
65
+ return jsonify([{"dropout_probability": float(p)} for p in probs])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  except Exception as e:
68
  return jsonify({"error": str(e)}), 500
69
 
 
70
  if __name__ == "__main__":
71
  app.run(debug=True)
scaler.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a72d56371da73a9575f07e125500f4bec157184e5c92fd880222b513ebd3705b
3
  size 1247
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db951be4c8d024a6f824113ce0e7951919eea9a79c6bfdba31c31c3c7a651bf
3
  size 1247