File size: 3,682 Bytes
af7b60b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#!/usr/bin/env python3
"""
Evaluation script for IndoHoaxDetector model
This script computes various performance metrics for the hoax detection model.
"""

import pickle
import numpy as np
from sklearn.metrics import (
    accuracy_score,
    precision_score,
    recall_score,
    f1_score,
    confusion_matrix,
    classification_report
)

def load_model(model_path='logreg_model.pkl'):
    """Load the trained model."""
    with open(model_path, 'rb') as f:
        model = pickle.load(f)
    return model

def evaluate_model(model, X_test, y_test):
    """
    Evaluate the model on test data.

    Args:
        model: Trained sklearn model
        X_test: Test features (texts)
        y_test: True labels (0: legitimate, 1: hoax)

    Returns:
        dict: Dictionary containing evaluation metrics
    """
    # Make predictions
    y_pred = model.predict(X_test)
    y_pred_proba = model.predict_proba(X_test)

    # Calculate metrics
    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred, average='binary')
    recall = recall_score(y_test, y_pred, average='binary')
    f1 = f1_score(y_test, y_pred, average='binary')

    # Confusion matrix
    cm = confusion_matrix(y_test, y_pred)

    # Classification report
    report = classification_report(y_test, y_pred, target_names=['Legitimate', 'Hoax'])

    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1_score': f1,
        'confusion_matrix': cm,
        'classification_report': report,
        'predictions': y_pred,
        'probabilities': y_pred_proba
    }

def print_evaluation_results(results):
    """Print evaluation results in a formatted way."""
    print("=" * 60)
    print("IndoHoaxDetector Model Evaluation Results")
    print("=" * 60)

    print(".4f")
    print(".4f")
    print(".4f")
    print(".4f")

    print("\nConfusion Matrix:")
    print("[[True Negative, False Positive]")
    print(" [False Negative, True Positive]]")
    print(results['confusion_matrix'])

    print("\nDetailed Classification Report:")
    print(results['classification_report'])

def evaluate_on_sample_data(model):
    """Evaluate on sample data for demonstration."""
    # Sample test data (in a real scenario, this would be your actual test set)
    sample_texts = [
        "Pemerintah mengumumkan bantuan sosial untuk masyarakat terdampak pandemi.",  # Legitimate
        "Ditemukan cara instan kaya raya dalam semalam.",  # Hoax
        "Harga beras stabil di pasaran tradisional.",  # Legitimate
        "Vaksin COVID-19 mengandung chip pelacak.",  # Hoax
        "Bank Indonesia menaikkan suku bunga acuan.",  # Legitimate
        "Minum air rebusan daun jambu bisa menyembuhkan kanker.",  # Hoax
        "Jokowi bertemu dengan delegasi dari Amerika Serikat.",  # Legitimate
        "UFO mendarat di Istana Negara dan bertemu presiden."  # Hoax
    ]

    sample_labels = [0, 1, 0, 1, 0, 1, 0, 1]  # 0: legitimate, 1: hoax

    print("Evaluating on sample data...")
    results = evaluate_model(model, sample_texts, sample_labels)
    print_evaluation_results(results)

    return results

def main():
    """Main evaluation function."""
    # Load model
    print("Loading IndoHoaxDetector model...")
    model = load_model()

    # Evaluate on sample data
    evaluate_on_sample_data(model)

    print("\n" + "=" * 60)
    print("To evaluate on your own test data:")
    print("1. Prepare your test texts and labels")
    print("2. Call evaluate_model(model, X_test, y_test)")
    print("3. Use print_evaluation_results(results) to display metrics")
    print("=" * 60)

if __name__ == "__main__":
    main()