tinycrops commited on
Commit
797f8cc
·
verified ·
1 Parent(s): 5c3a0c9

Upload 4 files

Browse files
Files changed (4) hide show
  1. data_utils.py +70 -0
  2. experiment.py +267 -0
  3. main.py +30 -0
  4. models.py +32 -0
data_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pandas as pd
3
+ import numpy as np
4
+ from sklearn.model_selection import train_test_split
5
+ from datasets import load_dataset
6
+ from transformers import BertTokenizer
7
+
8
+ def load_chatbot_data(file_path, sample_frac=0.05):
9
+ df = pd.read_csv(file_path).sample(frac=sample_frac, random_state=42)
10
+ labels = np.argmax(df[['winner_model_a', 'winner_model_b', 'winner_tie']].values, axis=1)
11
+ return train_test_split(df, labels, test_size=0.2, random_state=42)
12
+
13
+ def load_personas():
14
+ datasets = ["instruction", "npc", "math", "tool", "reasoning", "knowledge"]
15
+ all_personas = []
16
+ for dataset in datasets:
17
+ data = load_dataset("proj-persona/PersonaHub", dataset, split="train")
18
+ all_personas.extend([(p['input persona'], p['synthesized text']) for p in data])
19
+ return all_personas
20
+
21
+ class ChatbotDataset(torch.utils.data.Dataset):
22
+ def __init__(self, prompts, responses_a, responses_b, labels, tokenizer, max_length=128):
23
+ self.tokenizer = tokenizer
24
+ self.max_length = max_length
25
+ self.prompts = prompts
26
+ self.responses_a = responses_a
27
+ self.responses_b = responses_b
28
+ self.labels = labels
29
+
30
+ def __len__(self):
31
+ return len(self.labels)
32
+
33
+ def __getitem__(self, idx):
34
+ prompt = self.prompts[idx]
35
+ response_a = self.responses_a[idx]
36
+ response_b = self.responses_b[idx]
37
+
38
+ encoded_prompt = self.tokenizer.encode_plus(
39
+ prompt,
40
+ add_special_tokens=True,
41
+ max_length=self.max_length,
42
+ padding='max_length',
43
+ truncation=True,
44
+ return_tensors='pt'
45
+ )
46
+
47
+ encoded_response_a = self.tokenizer.encode_plus(
48
+ response_a,
49
+ add_special_tokens=True,
50
+ max_length=self.max_length,
51
+ padding='max_length',
52
+ truncation=True,
53
+ return_tensors='pt'
54
+ )
55
+
56
+ encoded_response_b = self.tokenizer.encode_plus(
57
+ response_b,
58
+ add_special_tokens=True,
59
+ max_length=self.max_length,
60
+ padding='max_length',
61
+ truncation=True,
62
+ return_tensors='pt'
63
+ )
64
+
65
+ return {
66
+ 'prompt': encoded_prompt,
67
+ 'response_a': encoded_response_a,
68
+ 'response_b': encoded_response_b,
69
+ 'label': torch.tensor(self.labels[idx], dtype=torch.long)
70
+ }
experiment.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.data import DataLoader
3
+ from transformers import AdamW, get_linear_schedule_with_warmup
4
+ from tqdm import tqdm
5
+ import time
6
+ import json
7
+ import os
8
+ from models import PersonaAssigner, PreferencePredictor, BERTEncoder
9
+ from data_utils import load_chatbot_data, load_personas, ChatbotDataset
10
+ from torch.cuda.amp import autocast, GradScaler
11
+ import sys
12
+
13
+ class SimpleDashboard:
14
+ def __init__(self):
15
+ self.stats = {
16
+ 'iteration': 0,
17
+ 'total_iterations': 500,
18
+ 'batch': 0,
19
+ 'total_batches': 575,
20
+ 'baseline_loss': 0,
21
+ 'baseline_accuracy': 0,
22
+ 'enhanced_loss': 0,
23
+ 'enhanced_accuracy': 0,
24
+ 'last_save': '',
25
+ }
26
+ self.history = {
27
+ 'baseline_loss': [],
28
+ 'baseline_accuracy': [],
29
+ 'enhanced_loss': [],
30
+ 'enhanced_accuracy': [],
31
+ }
32
+
33
+ def update_stats(self, **kwargs):
34
+ self.stats.update(kwargs)
35
+ for key in ['baseline_loss', 'baseline_accuracy', 'enhanced_loss', 'enhanced_accuracy']:
36
+ if key in kwargs:
37
+ self.history[key].append(kwargs[key])
38
+ if len(self.history[key]) > 50: # Keep only last 50 points
39
+ self.history[key] = self.history[key][-50:]
40
+
41
+ def draw_ascii_chart(self, data, title, width=50, height=10):
42
+ if not data:
43
+ return ""
44
+
45
+ min_val, max_val = min(data), max(data)
46
+ range_val = max_val - min_val if max_val > min_val else 1
47
+
48
+ lines = [f"{title} (min: {min_val:.4f}, max: {max_val:.4f})"]
49
+ for i in range(height - 1, -1, -1):
50
+ line = ""
51
+ for val in data[-width:]:
52
+ if (val - min_val) / range_val > i / (height - 1):
53
+ line += "█"
54
+ else:
55
+ line += " "
56
+ lines.append(line)
57
+
58
+ return "\n".join(lines)
59
+
60
+ def draw(self):
61
+ os.system('cls' if os.name == 'nt' else 'clear')
62
+ print(f"Experiment Progress:")
63
+ print(f"Iteration: {self.stats['iteration']}/{self.stats['total_iterations']}")
64
+ print(f"Batch: {self.stats['batch']}/{self.stats['total_batches']}")
65
+ print(f"Baseline Loss: {self.stats['baseline_loss']:.4f}")
66
+ print(f"Baseline Accuracy: {self.stats['baseline_accuracy']:.2%}")
67
+ print(f"Enhanced Loss: {self.stats['enhanced_loss']:.4f}")
68
+ print(f"Enhanced Accuracy: {self.stats['enhanced_accuracy']:.2%}")
69
+ print(f"Last Save: {self.stats['last_save']}")
70
+ print("\n" + self.draw_ascii_chart(self.history['baseline_loss'], "Baseline Loss"))
71
+ print("\n" + self.draw_ascii_chart(self.history['enhanced_loss'], "Enhanced Loss"))
72
+ print("\n" + self.draw_ascii_chart(self.history['baseline_accuracy'], "Baseline Accuracy"))
73
+ print("\n" + self.draw_ascii_chart(self.history['enhanced_accuracy'], "Enhanced Accuracy"))
74
+
75
+ class ComparativeExperiment:
76
+ def __init__(self, batch_size=4, accumulation_steps=8, max_length=64):
77
+ print("Initializing ComparativeExperiment...")
78
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
79
+ print(f"Using device: {self.device}")
80
+ self.batch_size = batch_size
81
+ self.accumulation_steps = accumulation_steps
82
+ self.max_length = max_length
83
+ print("Initializing models...")
84
+ self.bert_encoder = BERTEncoder().to(self.device)
85
+ self.persona_assigner = PersonaAssigner(768, 256, 768).to(self.device)
86
+ self.enhanced_predictor = PreferencePredictor(768 * 3).to(self.device)
87
+ self.baseline_predictor = PreferencePredictor(768 * 3).to(self.device)
88
+ print("Initializing optimizer...")
89
+ self.optimizer = AdamW([
90
+ {'params': self.persona_assigner.parameters()},
91
+ {'params': self.enhanced_predictor.parameters()},
92
+ {'params': self.baseline_predictor.parameters()}
93
+ ], lr=1e-4)
94
+ self.criterion = torch.nn.CrossEntropyLoss()
95
+ self.scaler = GradScaler()
96
+ self.current_iteration = 0
97
+ self.current_step = 0
98
+ self.training_log = []
99
+ self.dashboard = SimpleDashboard()
100
+ print("ComparativeExperiment initialized.")
101
+
102
+ def train_iteration(self, batch):
103
+ self.persona_assigner.train()
104
+ self.enhanced_predictor.train()
105
+ self.baseline_predictor.train()
106
+
107
+ with autocast():
108
+ prompt_embeds = self.bert_encoder(
109
+ input_ids=batch['prompt']['input_ids'].squeeze(1),
110
+ attention_mask=batch['prompt']['attention_mask'].squeeze(1)
111
+ )
112
+ response_a_embeds = self.bert_encoder(
113
+ input_ids=batch['response_a']['input_ids'].squeeze(1),
114
+ attention_mask=batch['response_a']['attention_mask'].squeeze(1)
115
+ )
116
+ response_b_embeds = self.bert_encoder(
117
+ input_ids=batch['response_b']['input_ids'].squeeze(1),
118
+ attention_mask=batch['response_b']['attention_mask'].squeeze(1)
119
+ )
120
+ labels = batch['label'].to(self.device)
121
+
122
+ baseline_inputs = torch.cat([prompt_embeds, response_a_embeds, response_b_embeds], dim=1)
123
+ baseline_outputs = self.baseline_predictor(baseline_inputs)
124
+ baseline_loss = self.criterion(baseline_outputs, labels)
125
+ baseline_accuracy = (baseline_outputs.argmax(dim=1) == labels).float().mean().item()
126
+
127
+ prompt_personas = self.persona_assigner(prompt_embeds.detach())
128
+ response_a_personas = self.persona_assigner(response_a_embeds.detach())
129
+ response_b_personas = self.persona_assigner(response_b_embeds.detach())
130
+
131
+ combined_prompt = prompt_embeds + prompt_personas
132
+ combined_response_a = response_a_embeds + response_a_personas
133
+ combined_response_b = response_b_embeds + response_b_personas
134
+
135
+ enhanced_inputs = torch.cat([combined_prompt, combined_response_a, combined_response_b], dim=1)
136
+ enhanced_outputs = self.enhanced_predictor(enhanced_inputs)
137
+ enhanced_loss = self.criterion(enhanced_outputs, labels)
138
+ enhanced_accuracy = (enhanced_outputs.argmax(dim=1) == labels).float().mean().item()
139
+
140
+ total_loss = baseline_loss + enhanced_loss
141
+
142
+ scaled_loss = self.scaler.scale(total_loss)
143
+ scaled_loss.backward()
144
+
145
+ return baseline_loss.item(), baseline_accuracy, enhanced_loss.item(), enhanced_accuracy
146
+
147
+ def save_state(self, filename='experiment_state.pth'):
148
+ print(f"Saving state to {filename}...")
149
+ torch.save({
150
+ 'current_iteration': self.current_iteration,
151
+ 'current_step': self.current_step,
152
+ 'bert_encoder': self.bert_encoder.state_dict(),
153
+ 'persona_assigner': self.persona_assigner.state_dict(),
154
+ 'enhanced_predictor': self.enhanced_predictor.state_dict(),
155
+ 'baseline_predictor': self.baseline_predictor.state_dict(),
156
+ 'optimizer': self.optimizer.state_dict(),
157
+ 'scaler': self.scaler.state_dict(),
158
+ }, filename)
159
+ with open('training_log.json', 'w') as f:
160
+ json.dump(self.training_log, f)
161
+ self.dashboard.update_stats(last_save=time.strftime("%Y-%m-%d %H:%M:%S"))
162
+ print("State saved successfully.")
163
+
164
+ def load_state(self, filename='experiment_state.pth'):
165
+ if os.path.exists(filename):
166
+ print(f"Loading state from {filename}...")
167
+ state = torch.load(filename)
168
+ self.current_iteration = state['current_iteration']
169
+ self.current_step = state['current_step']
170
+ print(f"Loaded iteration: {self.current_iteration}, step: {self.current_step}")
171
+ self.bert_encoder.load_state_dict(state['bert_encoder'])
172
+ self.persona_assigner.load_state_dict(state['persona_assigner'])
173
+ self.enhanced_predictor.load_state_dict(state['enhanced_predictor'])
174
+ self.baseline_predictor.load_state_dict(state['baseline_predictor'])
175
+ self.optimizer.load_state_dict(state['optimizer'])
176
+ self.scaler.load_state_dict(state['scaler'])
177
+ if os.path.exists('training_log.json'):
178
+ with open('training_log.json', 'r') as f:
179
+ self.training_log = json.load(f)
180
+ print(f"Loaded training log with {len(self.training_log)} entries.")
181
+ print("State loaded successfully.")
182
+ return True
183
+ else:
184
+ print(f"No saved state found at {filename}.")
185
+ return False
186
+
187
+ def run_experiment(self, train_loader, num_iterations=500):
188
+ print(f"Starting experiment. Total iterations: {num_iterations}")
189
+ self.dashboard.update_stats(
190
+ total_iterations=num_iterations,
191
+ total_batches=len(train_loader)
192
+ )
193
+
194
+ try:
195
+ for iteration in range(self.current_iteration, num_iterations):
196
+ print(f"Starting iteration {iteration + 1}/{num_iterations}")
197
+ self.current_iteration = iteration
198
+ for i, batch in enumerate(train_loader):
199
+ batch = {k: v.to(self.device) for k, v in batch.items()}
200
+ baseline_loss, baseline_accuracy, enhanced_loss, enhanced_accuracy = self.train_iteration(batch)
201
+
202
+ self.current_step += 1
203
+ if self.current_step % self.accumulation_steps == 0:
204
+ self.scaler.step(self.optimizer)
205
+ self.scaler.update()
206
+ self.optimizer.zero_grad()
207
+
208
+ self.training_log.append({
209
+ 'iteration': iteration + 1,
210
+ 'batch': i + 1,
211
+ 'baseline_loss': baseline_loss,
212
+ 'baseline_accuracy': baseline_accuracy,
213
+ 'enhanced_loss': enhanced_loss,
214
+ 'enhanced_accuracy': enhanced_accuracy,
215
+ 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S")
216
+ })
217
+
218
+ self.dashboard.update_stats(
219
+ iteration=iteration + 1,
220
+ batch=i + 1,
221
+ baseline_loss=baseline_loss,
222
+ baseline_accuracy=baseline_accuracy,
223
+ enhanced_loss=enhanced_loss,
224
+ enhanced_accuracy=enhanced_accuracy
225
+ )
226
+
227
+ if (i + 1) % 10 == 0: # Update every 10 batches
228
+ self.dashboard.draw()
229
+
230
+ print(f"Completed iteration {iteration + 1}/{num_iterations}")
231
+ # Save state after each iteration
232
+ self.save_state()
233
+ self.dashboard.draw() # Draw dashboard after each iteration
234
+
235
+ self.save_state('final_experiment_state.pth')
236
+ print("Experiment completed.")
237
+ except KeyboardInterrupt:
238
+ print("Experiment interrupted. Saving state...")
239
+ self.save_state('interrupted_experiment_state.pth')
240
+ print("State saved. You can resume later by loading 'interrupted_experiment_state.pth'")
241
+ except Exception as e:
242
+ print(f"An error occurred: {str(e)}")
243
+ self.save_state('error_experiment_state.pth')
244
+ print("State saved due to error. You can resume later by loading 'error_experiment_state.pth'")
245
+ raise
246
+ def setup_experiment(train_data, train_labels, batch_size=4, max_length=64):
247
+ from transformers import BertTokenizer
248
+
249
+ print("Setting up experiment...")
250
+ print("Initializing tokenizer...")
251
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
252
+ print("Creating dataset...")
253
+ train_dataset = ChatbotDataset(
254
+ train_data['prompt'].tolist(),
255
+ train_data['response_a'].tolist(),
256
+ train_data['response_b'].tolist(),
257
+ train_labels,
258
+ tokenizer,
259
+ max_length=max_length
260
+ )
261
+ print(f"Creating DataLoader with batch size {batch_size}...")
262
+ train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
263
+
264
+ print("Initializing ComparativeExperiment...")
265
+ experiment = ComparativeExperiment(batch_size=batch_size, max_length=max_length)
266
+ print("Experiment setup completed.")
267
+ return experiment, train_loader
main.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from experiment import setup_experiment
2
+ from data_utils import load_chatbot_data, load_personas
3
+
4
+ def main():
5
+ print("Starting main function...")
6
+ # Load and prepare data
7
+ print("Loading chatbot data...")
8
+ train_data, val_data, train_labels, val_labels = load_chatbot_data("/home/ath/AI/lmsys_chatbot_arena_human/kaggle/input/lmsys-chatbot-arena/train.csv", sample_frac=0.05)
9
+ print(f"Loaded {len(train_data)} training samples and {len(val_data)} validation samples.")
10
+ print("Loading personas...")
11
+ personas = load_personas()
12
+ print(f"Loaded {len(personas)} personas.")
13
+
14
+ # Setup experiment
15
+ print("Setting up experiment...")
16
+ experiment, train_loader = setup_experiment(train_data, train_labels, batch_size=4, max_length=64)
17
+
18
+ # Try to load previous state
19
+ if experiment.load_state():
20
+ print("Resuming experiment from saved state.")
21
+ else:
22
+ print("No saved state found. Starting new experiment.")
23
+
24
+ # Run experiment
25
+ print("Running experiment...")
26
+ experiment.run_experiment(train_loader, num_iterations=500)
27
+ print("Main function completed.")
28
+
29
+ if __name__ == "__main__":
30
+ main()
models.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import BertModel
4
+
5
+ class PersonaAssigner(nn.Module):
6
+ def __init__(self, input_dim, hidden_dim, output_dim):
7
+ super(PersonaAssigner, self).__init__()
8
+ self.fc1 = nn.Linear(input_dim, hidden_dim)
9
+ self.fc2 = nn.Linear(hidden_dim, output_dim)
10
+
11
+ def forward(self, x):
12
+ x = torch.relu(self.fc1(x))
13
+ return self.fc2(x)
14
+
15
+ class PreferencePredictor(nn.Module):
16
+ def __init__(self, input_dim):
17
+ super(PreferencePredictor, self).__init__()
18
+ self.fc1 = nn.Linear(input_dim, 256)
19
+ self.fc2 = nn.Linear(256, 3)
20
+
21
+ def forward(self, x):
22
+ x = torch.relu(self.fc1(x))
23
+ return self.fc2(x)
24
+
25
+ class BERTEncoder(nn.Module):
26
+ def __init__(self, model_name='bert-base-uncased'):
27
+ super(BERTEncoder, self).__init__()
28
+ self.bert = BertModel.from_pretrained(model_name)
29
+
30
+ def forward(self, input_ids, attention_mask, token_type_ids=None):
31
+ outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
32
+ return outputs.last_hidden_state.mean(dim=1)