File size: 3,882 Bytes
e89405a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65778eb
e89405a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# train.py

import torch
from datasets import load_dataset
from transformers import (
    AutoModelForSequenceClassification,
    AutoTokenizer,
    TrainingArguments,
    Trainer,
    DataCollatorWithPadding
)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support

# Step 1: Define our evaluation metrics
def compute_metrics(pred):
    """
    Calculate accuracy, precision, recall, and F1 score
    Args:
        pred: predictions from the model
    Returns:
        dict: containing all metrics
    """
    labels = pred.label_ids  # True labels
    preds = pred.predictions.argmax(-1)  # Model predictions
    
    # Calculate all metrics
    precision, recall, f1, _ = precision_recall_fscore_support(
        labels, 
        preds, 
        average='binary'
    )
    acc = accuracy_score(labels, preds)
    
    return {
        'accuracy': acc,
        'f1': f1,
        'precision': precision,
        'recall': recall
    }

def train_model():
    # Step 2: Load the IMDB dataset
    # This dataset contains movie reviews labeled as positive or negative
    print("Loading dataset...")
    dataset = load_dataset("imdb")
    
    # Step 3: Initialize our model and tokenizer
    # We use DistilBERT as it's smaller and faster than BERT
    print("Loading tokenizer and model...")
    model_name = "distilbert-base-uncased"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForSequenceClassification.from_pretrained(
        model_name,
        num_labels=2  # Binary classification: positive or negative
    )
    
    # Step 4: Create tokenization function
    def tokenize_function(examples):
        """
        Tokenize the input text data
        Args:
            examples: batch of examples from dataset
        Returns:
            tokenized examples
        """
        return tokenizer(
            examples["text"],
            truncation=True,
            padding="max_length",
            max_length=512  # Maximum length of input text
        )
    
    # Step 5: Tokenize the dataset
    print("Tokenizing dataset...")
    tokenized_datasets = dataset.map(
        tokenize_function,
        batched=True,
        remove_columns=dataset["train"].column_names
    )
    
    # Step 6: Define training arguments
    print("Setting up training arguments...")
    training_args = TrainingArguments(
        output_dir="./results",  # Directory to save model checkpoints
        learning_rate=2e-5,  # Learning rate
        per_device_train_batch_size=16,  # Batch size for training
        per_device_eval_batch_size=16,  # Batch size for evaluation
        num_train_epochs=3,  # Number of training epochs
        weight_decay=0.01,  # Weight decay for regularization
        evaluation_strategy="epoch",  # Evaluate after each epoch
        save_strategy="epoch",  # Save model after each epoch
        load_best_model_at_end=True,  # Load best model at end of training
        push_to_hub=True,  # Push model to Hugging Face Hub
        hub_model_id="shaheerawan3/Vibescribe"  # Replace with your username
    )
    
    # Step 7: Initialize the trainer
    print("Initializing trainer...")
    trainer = Trainer(
        model=model,  # The model to train
        args=training_args,  # Training arguments
        train_dataset=tokenized_datasets["train"],  # Training dataset
        eval_dataset=tokenized_datasets["test"],  # Evaluation dataset
        tokenizer=tokenizer,  # Tokenizer
        data_collator=DataCollatorWithPadding(tokenizer=tokenizer),  # Handles padding
        compute_metrics=compute_metrics  # Evaluation metrics
    )
    
    # Step 8: Train the model
    print("Starting training...")
    trainer.train()
    
    # Step 9: Push model to Hugging Face Hub
    print("Pushing model to Hugging Face Hub...")
    trainer.push_to_hub()

if __name__ == "__main__":
    train_model()