File size: 2,496 Bytes
3c0443b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from datasets import load_dataset
from transformers import DataCollatorForLanguageModeling
from transformers import Trainer, TrainingArguments
import os
import torch



def main():
      
    local_rank = int(os.environ['LOCAL_RANK'])    
    rank = int(os.environ['RANK'])    
    world_size = int(os.environ['WORLD_SIZE']) 

    torch.distributed.init_process_group("nccl")    
    print(f"Local Rank = {local_rank}/{world_size}")    



    # Load your JSONL file
    dataset = load_dataset('json', data_files='../../data/m2_250514_1150.jsonl', split='train')
    
    # Load a Model
    from transformers import AutoTokenizer, AutoModelForCausalLM
    
    model_name = "FacebookAI/roberta-base"
    
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    
    # Set pad token if not set
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    
    # Tokenize the dataset
    def tokenize_function(examples):
        return tokenizer(examples["text"], truncation=True, max_length=512)
    
    tokenized_dataset = dataset.map(tokenize_function, batched=True)
    
    # Split the dataset into training and validation sets
    split_dataset = tokenized_dataset.train_test_split(test_size=0.1)
    
    # Data collator, pad the inputs to the maximum length in the batch
    data_collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer, mlm=False  # mlm=False: causal language modeling
    )
    
    # Training
    training_args = TrainingArguments(
        output_dir="./results",
        overwrite_output_dir=True,
        num_train_epochs=3,
        per_device_train_batch_size=4,
        per_device_eval_batch_size=4,
        dataloader_num_workers=8,  
        eval_steps=500,
        save_steps=1000,
        warmup_steps=500,
        prediction_loss_only=True,
        logging_dir="./logs",
        logging_steps=100,
        learning_rate=5e-5,
        fp16=True,  # true for GPU
    )
    
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=split_dataset["train"],
        eval_dataset=split_dataset["test"],
        data_collator=data_collator,
    )
    
    # Start training
    trainer.train()

    torch.distributed.destroy_process_group()
    
    # Save the model and tokenizer
    model.save_pretrained("./fine_tuned_model")
    tokenizer.save_pretrained("./fine_tuned_model")

if __name__ == "__main__":
    main()