ImageCaptioningProject / text_files /validation_script.py
Varsha Dewangan
Initial clean commit for project deployment
ee1d4aa
import re
import pandas as pd
import os
# Define a list of your log file names
log_files = [
'training (2).txt',
'training_log_1_18.txt',
'training_log_17_27.txt',
'training_log_21_30.txt'
]
# Create an empty list to store parsed validation data
validation_data = []
# Regex to capture the Epoch number from training progress lines
epoch_pattern = re.compile(r"Epoch\s\[(\d+)/\d+],")
# Regex to capture Validation Avg Loss and Perplexity
validation_pattern = re.compile(
r"Validation Avg Loss:\s([\d.]+),\sPerplexity:\s([\d.]+)"
)
current_epoch = None # Variable to keep track of the current epoch
print("Starting validation metrics parsing...")
# Loop through each log file
for file_name in log_files:
if not os.path.exists(file_name):
print(f"Warning: File not found - {file_name}. Skipping.")
continue
print(f"Processing {file_name} for validation metrics...")
with open(file_name, 'r', encoding='utf-8') as f: # Use UTF-8 encoding
for line in f:
# Check for epoch line first to update current_epoch
epoch_match = epoch_pattern.search(line)
if epoch_match:
current_epoch = int(epoch_match.group(1))
# Check for validation metrics line
validation_match = validation_pattern.search(line)
if validation_match:
val_loss = float(validation_match.group(1))
val_perplexity = float(validation_match.group(2))
# Only add if we have an associated epoch
if current_epoch is not None:
validation_data.append({
'Epoch': current_epoch,
'Validation_Loss': val_loss,
'Validation_Perplexity': val_perplexity
})
else:
print(f"Warning: Found validation metrics without a preceding epoch in {file_name}. Skipping this entry.")
# Create a Pandas DataFrame from the parsed validation data
df_validation = pd.DataFrame(validation_data)
# In case multiple validation metrics are logged per epoch (e.g., if re-running part of a log),
# we'll keep the last entry for that epoch.
df_validation_unique = df_validation.drop_duplicates(subset=['Epoch'], keep='last')
# Sort the data by Epoch
df_validation_sorted = df_validation_unique.sort_values(by=['Epoch']).reset_index(drop=True)
# Save the DataFrame to a CSV file
output_csv_file = 'validation_metrics.csv'
df_validation_sorted.to_csv(output_csv_file, index=False)
print(f"\nSuccessfully parsed validation metrics and saved data to {output_csv_file}")
print("You can now import this CSV file into Power BI to create your visualizations.")
print("\nFirst few rows of the generated CSV:")
print(df_validation_sorted.head())