Create evaluation/evaluation.py
Browse files- evaluation/evaluation.py +18 -0
evaluation/evaluation.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.metrics import accuracy_score, classification_report
|
| 2 |
+
|
| 3 |
+
def evaluate_model(model, X_test, y_test):
|
| 4 |
+
"""
|
| 5 |
+
Evaluate the trained model on the test set.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
model: Trained model.
|
| 9 |
+
X_test (pd.DataFrame): Testing features.
|
| 10 |
+
y_test (pd.Series): Testing labels.
|
| 11 |
+
|
| 12 |
+
Returns:
|
| 13 |
+
dict: Evaluation metrics.
|
| 14 |
+
"""
|
| 15 |
+
y_pred = model.predict(X_test)
|
| 16 |
+
accuracy = accuracy_score(y_test, y_pred)
|
| 17 |
+
report = classification_report(y_test, y_pred)
|
| 18 |
+
return {"accuracy": accuracy, "classification_report": report}
|