doctorPersona / experiment.py
tinycrops's picture
Upload 4 files
797f8cc verified
import torch
from torch.utils.data import DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
import time
import json
import os
from models import PersonaAssigner, PreferencePredictor, BERTEncoder
from data_utils import load_chatbot_data, load_personas, ChatbotDataset
from torch.cuda.amp import autocast, GradScaler
import sys
class SimpleDashboard:
def __init__(self):
self.stats = {
'iteration': 0,
'total_iterations': 500,
'batch': 0,
'total_batches': 575,
'baseline_loss': 0,
'baseline_accuracy': 0,
'enhanced_loss': 0,
'enhanced_accuracy': 0,
'last_save': '',
}
self.history = {
'baseline_loss': [],
'baseline_accuracy': [],
'enhanced_loss': [],
'enhanced_accuracy': [],
}
def update_stats(self, **kwargs):
self.stats.update(kwargs)
for key in ['baseline_loss', 'baseline_accuracy', 'enhanced_loss', 'enhanced_accuracy']:
if key in kwargs:
self.history[key].append(kwargs[key])
if len(self.history[key]) > 50: # Keep only last 50 points
self.history[key] = self.history[key][-50:]
def draw_ascii_chart(self, data, title, width=50, height=10):
if not data:
return ""
min_val, max_val = min(data), max(data)
range_val = max_val - min_val if max_val > min_val else 1
lines = [f"{title} (min: {min_val:.4f}, max: {max_val:.4f})"]
for i in range(height - 1, -1, -1):
line = ""
for val in data[-width:]:
if (val - min_val) / range_val > i / (height - 1):
line += "█"
else:
line += " "
lines.append(line)
return "\n".join(lines)
def draw(self):
os.system('cls' if os.name == 'nt' else 'clear')
print(f"Experiment Progress:")
print(f"Iteration: {self.stats['iteration']}/{self.stats['total_iterations']}")
print(f"Batch: {self.stats['batch']}/{self.stats['total_batches']}")
print(f"Baseline Loss: {self.stats['baseline_loss']:.4f}")
print(f"Baseline Accuracy: {self.stats['baseline_accuracy']:.2%}")
print(f"Enhanced Loss: {self.stats['enhanced_loss']:.4f}")
print(f"Enhanced Accuracy: {self.stats['enhanced_accuracy']:.2%}")
print(f"Last Save: {self.stats['last_save']}")
print("\n" + self.draw_ascii_chart(self.history['baseline_loss'], "Baseline Loss"))
print("\n" + self.draw_ascii_chart(self.history['enhanced_loss'], "Enhanced Loss"))
print("\n" + self.draw_ascii_chart(self.history['baseline_accuracy'], "Baseline Accuracy"))
print("\n" + self.draw_ascii_chart(self.history['enhanced_accuracy'], "Enhanced Accuracy"))
class ComparativeExperiment:
def __init__(self, batch_size=4, accumulation_steps=8, max_length=64):
print("Initializing ComparativeExperiment...")
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {self.device}")
self.batch_size = batch_size
self.accumulation_steps = accumulation_steps
self.max_length = max_length
print("Initializing models...")
self.bert_encoder = BERTEncoder().to(self.device)
self.persona_assigner = PersonaAssigner(768, 256, 768).to(self.device)
self.enhanced_predictor = PreferencePredictor(768 * 3).to(self.device)
self.baseline_predictor = PreferencePredictor(768 * 3).to(self.device)
print("Initializing optimizer...")
self.optimizer = AdamW([
{'params': self.persona_assigner.parameters()},
{'params': self.enhanced_predictor.parameters()},
{'params': self.baseline_predictor.parameters()}
], lr=1e-4)
self.criterion = torch.nn.CrossEntropyLoss()
self.scaler = GradScaler()
self.current_iteration = 0
self.current_step = 0
self.training_log = []
self.dashboard = SimpleDashboard()
print("ComparativeExperiment initialized.")
def train_iteration(self, batch):
self.persona_assigner.train()
self.enhanced_predictor.train()
self.baseline_predictor.train()
with autocast():
prompt_embeds = self.bert_encoder(
input_ids=batch['prompt']['input_ids'].squeeze(1),
attention_mask=batch['prompt']['attention_mask'].squeeze(1)
)
response_a_embeds = self.bert_encoder(
input_ids=batch['response_a']['input_ids'].squeeze(1),
attention_mask=batch['response_a']['attention_mask'].squeeze(1)
)
response_b_embeds = self.bert_encoder(
input_ids=batch['response_b']['input_ids'].squeeze(1),
attention_mask=batch['response_b']['attention_mask'].squeeze(1)
)
labels = batch['label'].to(self.device)
baseline_inputs = torch.cat([prompt_embeds, response_a_embeds, response_b_embeds], dim=1)
baseline_outputs = self.baseline_predictor(baseline_inputs)
baseline_loss = self.criterion(baseline_outputs, labels)
baseline_accuracy = (baseline_outputs.argmax(dim=1) == labels).float().mean().item()
prompt_personas = self.persona_assigner(prompt_embeds.detach())
response_a_personas = self.persona_assigner(response_a_embeds.detach())
response_b_personas = self.persona_assigner(response_b_embeds.detach())
combined_prompt = prompt_embeds + prompt_personas
combined_response_a = response_a_embeds + response_a_personas
combined_response_b = response_b_embeds + response_b_personas
enhanced_inputs = torch.cat([combined_prompt, combined_response_a, combined_response_b], dim=1)
enhanced_outputs = self.enhanced_predictor(enhanced_inputs)
enhanced_loss = self.criterion(enhanced_outputs, labels)
enhanced_accuracy = (enhanced_outputs.argmax(dim=1) == labels).float().mean().item()
total_loss = baseline_loss + enhanced_loss
scaled_loss = self.scaler.scale(total_loss)
scaled_loss.backward()
return baseline_loss.item(), baseline_accuracy, enhanced_loss.item(), enhanced_accuracy
def save_state(self, filename='experiment_state.pth'):
print(f"Saving state to {filename}...")
torch.save({
'current_iteration': self.current_iteration,
'current_step': self.current_step,
'bert_encoder': self.bert_encoder.state_dict(),
'persona_assigner': self.persona_assigner.state_dict(),
'enhanced_predictor': self.enhanced_predictor.state_dict(),
'baseline_predictor': self.baseline_predictor.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scaler': self.scaler.state_dict(),
}, filename)
with open('training_log.json', 'w') as f:
json.dump(self.training_log, f)
self.dashboard.update_stats(last_save=time.strftime("%Y-%m-%d %H:%M:%S"))
print("State saved successfully.")
def load_state(self, filename='experiment_state.pth'):
if os.path.exists(filename):
print(f"Loading state from {filename}...")
state = torch.load(filename)
self.current_iteration = state['current_iteration']
self.current_step = state['current_step']
print(f"Loaded iteration: {self.current_iteration}, step: {self.current_step}")
self.bert_encoder.load_state_dict(state['bert_encoder'])
self.persona_assigner.load_state_dict(state['persona_assigner'])
self.enhanced_predictor.load_state_dict(state['enhanced_predictor'])
self.baseline_predictor.load_state_dict(state['baseline_predictor'])
self.optimizer.load_state_dict(state['optimizer'])
self.scaler.load_state_dict(state['scaler'])
if os.path.exists('training_log.json'):
with open('training_log.json', 'r') as f:
self.training_log = json.load(f)
print(f"Loaded training log with {len(self.training_log)} entries.")
print("State loaded successfully.")
return True
else:
print(f"No saved state found at {filename}.")
return False
def run_experiment(self, train_loader, num_iterations=500):
print(f"Starting experiment. Total iterations: {num_iterations}")
self.dashboard.update_stats(
total_iterations=num_iterations,
total_batches=len(train_loader)
)
try:
for iteration in range(self.current_iteration, num_iterations):
print(f"Starting iteration {iteration + 1}/{num_iterations}")
self.current_iteration = iteration
for i, batch in enumerate(train_loader):
batch = {k: v.to(self.device) for k, v in batch.items()}
baseline_loss, baseline_accuracy, enhanced_loss, enhanced_accuracy = self.train_iteration(batch)
self.current_step += 1
if self.current_step % self.accumulation_steps == 0:
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
self.training_log.append({
'iteration': iteration + 1,
'batch': i + 1,
'baseline_loss': baseline_loss,
'baseline_accuracy': baseline_accuracy,
'enhanced_loss': enhanced_loss,
'enhanced_accuracy': enhanced_accuracy,
'timestamp': time.strftime("%Y-%m-%d %H:%M:%S")
})
self.dashboard.update_stats(
iteration=iteration + 1,
batch=i + 1,
baseline_loss=baseline_loss,
baseline_accuracy=baseline_accuracy,
enhanced_loss=enhanced_loss,
enhanced_accuracy=enhanced_accuracy
)
if (i + 1) % 10 == 0: # Update every 10 batches
self.dashboard.draw()
print(f"Completed iteration {iteration + 1}/{num_iterations}")
# Save state after each iteration
self.save_state()
self.dashboard.draw() # Draw dashboard after each iteration
self.save_state('final_experiment_state.pth')
print("Experiment completed.")
except KeyboardInterrupt:
print("Experiment interrupted. Saving state...")
self.save_state('interrupted_experiment_state.pth')
print("State saved. You can resume later by loading 'interrupted_experiment_state.pth'")
except Exception as e:
print(f"An error occurred: {str(e)}")
self.save_state('error_experiment_state.pth')
print("State saved due to error. You can resume later by loading 'error_experiment_state.pth'")
raise
def setup_experiment(train_data, train_labels, batch_size=4, max_length=64):
from transformers import BertTokenizer
print("Setting up experiment...")
print("Initializing tokenizer...")
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
print("Creating dataset...")
train_dataset = ChatbotDataset(
train_data['prompt'].tolist(),
train_data['response_a'].tolist(),
train_data['response_b'].tolist(),
train_labels,
tokenizer,
max_length=max_length
)
print(f"Creating DataLoader with batch size {batch_size}...")
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
print("Initializing ComparativeExperiment...")
experiment = ComparativeExperiment(batch_size=batch_size, max_length=max_length)
print("Experiment setup completed.")
return experiment, train_loader