File size: 2,898 Bytes
3d48e06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# Template for model evaluation script for {{phase_name}}

from transformers import AutoModelForSequenceClassification, AutoTokenizer
from datasets import load_dataset # Example datasets library
from sklearn.metrics import accuracy_score, classification_report # Example metrics
import torch # Example PyTorch
# Add other necessary imports

def evaluate_model(model_path, dataset_path, model_name="bert-base-uncased"):
    """
    Evaluates a trained model on a dataset.
    """
    try:
        # Load dataset for evaluation (replace with your actual dataset loading)
        dataset = load_dataset('csv', data_files=dataset_path) # Example: CSV dataset loading, replace with your dataset format

        print("Evaluation dataset loaded. Loading model and tokenizer...")

        tokenizer = AutoTokenizer.from_pretrained(model_name) # Use base model tokenizer (or fine-tuned tokenizer if saved separately)
        model = AutoModelForSequenceClassification.from_pretrained(model_path)

        def tokenize_function(examples):
            return tokenizer(examples["text_column"], padding="max_length", truncation=True) # Example: tokenize 'text_column'

        tokenized_datasets = dataset.map(tokenize_function, batched=True)

        def compute_metrics(eval_pred):
            predictions, labels = eval_pred
            predictions = predictions.argmax(axis=-1)
            accuracy = accuracy_score(labels, predictions)
            report = classification_report(labels, predictions, output_dict=True) # Detailed report
            return {"accuracy": accuracy, "classification_report": report}

        training_args = TrainingArguments(
            output_dir="./evaluation_results",
            per_device_eval_batch_size=64,
            logging_dir='./eval_logs',
        )

        trainer = Trainer(
            model=model,
            args=training_args,
            eval_dataset=tokenized_datasets["validation"], # Assuming 'validation' split exists
            compute_metrics=compute_metrics,
            tokenizer=tokenizer
        )

        evaluation_results = trainer.evaluate()

        print("Model evaluation completed.")
        print("Evaluation Results:")
        print(f"Accuracy: {evaluation_results['eval_accuracy']}")
        print("Classification Report:\n", evaluation_results['eval_classification_report'])


    except FileNotFoundError:
        print(f"Error: Dataset file or model files not found.")
    except Exception as e:
        print(f"Error during model evaluation: {e}")


if __name__ == "__main__":
    model_filepath = "models/fine_tuned_model" # Replace with your model path
    evaluation_data_filepath = "data/evaluation_dataset.csv" # Replace with your evaluation data path
    base_model_name = "bert-base-uncased" # Replace with your base model name

    evaluate_model(model_filepath, evaluation_data_filepath, model_name=base_model_name)