flpelerin commited on
Commit
0f2d26d
·
1 Parent(s): d638e2c

adding validation

Browse files
Files changed (1) hide show
  1. train.py +70 -43
train.py CHANGED
@@ -1,5 +1,3 @@
1
- #@title Utility functions for sampling
2
-
3
  import torch
4
  import math
5
  from transformers import GPT2Tokenizer
@@ -9,11 +7,9 @@ import numpy as np
9
  from model import minGRULM
10
  from util import generate_text
11
 
12
-
13
-
14
-
15
-
16
-
17
 
18
  dataset_path = 'flpelerin/tinystories-100k'
19
 
@@ -28,19 +24,19 @@ num_predict = 250
28
 
29
  reset_state_every = 16
30
 
31
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
- print(f"total context size is {batch_size * seq_length} tokens");
33
-
34
-
35
 
 
 
36
 
37
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
38
  tokenizer.pad_token = tokenizer.eos_token
39
  vocab_size = tokenizer.vocab_size
40
- print(f"tokenizer has {vocab_size} unique tokens")
41
-
42
-
43
 
 
 
 
44
 
45
  dataset = load_dataset(dataset_path)
46
 
@@ -48,17 +44,23 @@ def process_function(examples):
48
  return tokenizer(examples['text'], padding='longest', truncation=True)
49
 
50
  tokenized_datasets = dataset.map(process_function, batched=True)
51
- print(f"dataset has {tokenized_datasets['train'].num_rows} rows of {batch_size} times {seq_length} tokens")
52
 
 
 
 
53
 
 
 
 
 
54
 
 
 
55
 
56
- #model = minGRULM(
57
- # vocab_size = vocab_size,
58
- # d_model = 768,
59
- # d_inner = 1536,
60
- # n_layers = 12
61
- #)
62
 
63
  model = minGRULM(
64
  vocab_size = vocab_size,
@@ -67,29 +69,30 @@ model = minGRULM(
67
  n_layers = 6
68
  )
69
 
70
-
71
  model.to(device)
72
- print(f"model has {sum(p.numel() for p in model.parameters()):,} parameters")
73
-
74
-
75
-
76
 
77
  optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
78
 
79
  h_states = None
80
 
 
 
 
 
81
  step = 0
82
  for epoch in range(num_epochs):
83
- for i in range(0, len(tokenized_datasets['train']), batch_size):
84
- batch = tokenized_datasets['train'][i:i + batch_size]
 
85
  input_ids = torch.tensor(batch['input_ids']).to(device)
86
 
87
- #if (i / batch_size) % reset_state_every == 0:
88
- # print(f"resetting state, {(i / batch_size)} % {reset_state_every} == 0")
89
- # h_states = None
90
-
91
- h_states = h_states if (i / batch_size) % reset_state_every != 0 else None
92
- str_states = ''.join(['{:.3f}, '.format(h_states[0][0][0][i].item()) for i in range(10)]) if h_states is not None else 'None'
93
 
94
  optimizer.zero_grad()
95
  _, h_states, loss = model.forward(input_ids, h_states)
@@ -97,17 +100,41 @@ for epoch in range(num_epochs):
97
  optimizer.step()
98
 
99
  step += 1
100
- print(f"Epoch: {epoch} / {num_epochs}, Step: {step}, Loss: {loss.item():.4f}, Hidden State: {str_states}")
101
 
102
- if step % infer_step == 0:
 
103
  model.eval()
 
 
 
 
 
 
 
104
 
105
- ids = input_ids[0][:input_len]
106
- text = tokenizer.decode(ids)
107
- print(f"input: {text}")
 
108
 
109
- prompt = ids[None, ...]
110
- text = generate_text(model, tokenizer, prompt, num_predict)
111
- print(f"output: {text}")
 
112
 
113
- model.train()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  import math
3
  from transformers import GPT2Tokenizer
 
7
  from model import minGRULM
8
  from util import generate_text
9
 
10
+ # ============================
11
+ # Configuration Parameters
12
+ # ============================
 
 
13
 
14
  dataset_path = 'flpelerin/tinystories-100k'
15
 
 
24
 
25
  reset_state_every = 16
26
 
27
+ validate_every = 100 # Perform validation every 100 training steps
 
 
 
28
 
29
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
+ print(f"Total context size is {batch_size * seq_length} tokens")
31
 
32
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
33
  tokenizer.pad_token = tokenizer.eos_token
34
  vocab_size = tokenizer.vocab_size
35
+ print(f"Tokenzier has {vocab_size} unique tokens")
 
 
36
 
37
+ # ============================
38
+ # Load and Preprocess Dataset
39
+ # ============================
40
 
41
  dataset = load_dataset(dataset_path)
42
 
 
44
  return tokenizer(examples['text'], padding='longest', truncation=True)
45
 
46
  tokenized_datasets = dataset.map(process_function, batched=True)
47
+ print(f"Dataset has {tokenized_datasets['train'].num_rows} rows of {batch_size} times {seq_length} tokens")
48
 
49
+ # ============================
50
+ # Split Dataset into Train and Validation
51
+ # ============================
52
 
53
+ # Split the training set into 90% train and 10% validation
54
+ split_dataset = tokenized_datasets['train'].train_test_split(test_size=0.1)
55
+ train_dataset = split_dataset['train']
56
+ valid_dataset = split_dataset['test']
57
 
58
+ print(f"Training set size: {len(train_dataset)}")
59
+ print(f"Validation set size: {len(valid_dataset)}")
60
 
61
+ # ============================
62
+ # Initialize the Model
63
+ # ============================
 
 
 
64
 
65
  model = minGRULM(
66
  vocab_size = vocab_size,
 
69
  n_layers = 6
70
  )
71
 
 
72
  model.to(device)
73
+ print(f"Model has {sum(p.numel() for p in model.parameters()):,} parameters")
 
 
 
74
 
75
  optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
76
 
77
  h_states = None
78
 
79
+ # ============================
80
+ # Training Loop with Validation
81
+ # ============================
82
+
83
  step = 0
84
  for epoch in range(num_epochs):
85
+ print(f"Starting Epoch {epoch + 1}/{num_epochs}")
86
+ for i in range(0, len(train_dataset), batch_size):
87
+ batch = train_dataset[i:i + batch_size]
88
  input_ids = torch.tensor(batch['input_ids']).to(device)
89
 
90
+ # Reset hidden states if needed
91
+ h_states = h_states if (step % reset_state_every != 0) else None
92
+ str_states = (
93
+ ''.join(['{:.3f}, '.format(h_states[0][0][0][j].item()) for j in range(10)])
94
+ if h_states is not None else 'None'
95
+ )
96
 
97
  optimizer.zero_grad()
98
  _, h_states, loss = model.forward(input_ids, h_states)
 
100
  optimizer.step()
101
 
102
  step += 1
103
+ print(f"Epoch: {epoch + 1}/{num_epochs}, Step: {step}, Loss: {loss.item():.4f}, Hidden State: {str_states}")
104
 
105
+ # Perform validation at specified intervals
106
+ if step % validate_every == 0:
107
  model.eval()
108
+ validation_loss = 0.0
109
+ valid_steps = 0
110
+
111
+ with torch.no_grad():
112
+ for vi in range(0, len(valid_dataset), batch_size):
113
+ val_batch = valid_dataset[vi:vi + batch_size]
114
+ val_input_ids = torch.tensor(val_batch['input_ids']).to(device)
115
 
116
+ # Forward pass
117
+ _, _, val_loss = model.forward(val_input_ids, None)
118
+ validation_loss += val_loss.item()
119
+ valid_steps += 1
120
 
121
+ # Optionally, limit the number of batches for faster validation
122
+ # Uncomment the following lines to validate on only the first 100 batches
123
+ # if valid_steps >= 100:
124
+ # break
125
 
126
+ avg_validation_loss = validation_loss / valid_steps if valid_steps > 0 else float('inf')
127
+ print(f"----- Validation after Step {step}: Average Loss = {avg_validation_loss:.4f} -----")
128
+ model.train() # Switch back to training mode
129
+
130
+ # Perform inference at specified steps
131
+ if step % infer_step == 0:
132
+ with torch.no_grad():
133
+ # Select a single input from the current batch for inference
134
+ sample_ids = input_ids[0][:input_len]
135
+ input_text = tokenizer.decode(sample_ids, skip_special_tokens=True)
136
+ print(f"Input for Inference: {input_text}")
137
+
138
+ prompt = sample_ids.unsqueeze(0) # Shape: [1, input_len]
139
+ generated_text = generate_text(model, tokenizer, prompt, num_predict)
140
+ print(f"Generated Text:\n{generated_text}\n")