|
|
#!/bin/bash |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
source /home/groups/nsgray01/MoLE/.venv/bin/activate |
|
|
|
|
|
target="GB1" |
|
|
base_model_path="/scratch/groups/rotskoff/sebastian/era/protein_era/111225BenchmarkWithMegascalePretrain/megascale_ckpts/step_step_100000.ckpt" |
|
|
round=0 |
|
|
|
|
|
|
|
|
for i in {0..1}; do |
|
|
echo "Running iteration $i for target $target" |
|
|
|
|
|
python sample_esm_first_round.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--replicate "$i" |
|
|
done |
|
|
|
|
|
|
|
|
echo "Creating alignment dataset for target $target" |
|
|
python create_alignment_dataset_first_round.py --target $target |
|
|
|
|
|
|
|
|
cd $target |
|
|
|
|
|
for i in {0..1}; do |
|
|
echo "Training model for target $target, round $round, replicate $i" |
|
|
pera_train \ |
|
|
"train.lightning_model_args.eval_type=era" \ |
|
|
"train.lightning_model_args.beta=10.0" \ |
|
|
"train.lightning_model_args.gamma=0" \ |
|
|
"train.trainer_args.devices=4" \ |
|
|
"train.trainer_args.max_epochs=2" \ |
|
|
"train.trainer_args.log_every_n_steps=1" \ |
|
|
"train.trainer_args.enable_progress_bar=True" \ |
|
|
"train.logger.logger_args.version=${i}" \ |
|
|
"train.lightning_model_args.interval=epoch" \ |
|
|
"train.lightning_model_args.monitor=train/ERALoss" \ |
|
|
"train.best_checkpoint_args.monitor=train/ERALoss" \ |
|
|
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \ |
|
|
"++train.lightning_model_args.lr_scheduler_args.patience=5" \ |
|
|
"train.lightning_model_args.optimizer=AdamW" \ |
|
|
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \ |
|
|
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \ |
|
|
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \ |
|
|
"train.lightning_model_args.on_step=false" \ |
|
|
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \ |
|
|
"nn.batch_size=4" \ |
|
|
"nn.load_model=${base_model_path}" \ |
|
|
"nn.dataset_split_args.train=1.0" \ |
|
|
"nn.dataset_split_args.val=0.0" \ |
|
|
"nn.dataset_split_args.test=0.0" \ |
|
|
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]" |
|
|
done |
|
|
|
|
|
|
|
|
mv lightning_logs lightning_logs_round_${round} |
|
|
cd .. |
|
|
|
|
|
|
|
|
BASE_DIR="./${target}/lightning_logs_round_${round}" |
|
|
subdirs=($(ls -d "$BASE_DIR"/*/ | sort)) |
|
|
|
|
|
for i in {0..1}; do |
|
|
version_number=$(basename "${subdirs[$i]}") |
|
|
echo "$version_number" |
|
|
|
|
|
python sample_esm.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--alignment_round $round \ |
|
|
--version_number "$version_number" \ |
|
|
--replicate "$i" |
|
|
|
|
|
python compute_updated_logps.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--alignment_round $round \ |
|
|
--version_number "$version_number" \ |
|
|
--replicate "$i" |
|
|
done |
|
|
|
|
|
|
|
|
round=1 |
|
|
echo "Creating alignment dataset for target $target, round $round" |
|
|
python make_alignment_dataset_second_round.py --target $target |
|
|
|
|
|
|
|
|
cd $target |
|
|
for i in {0..1}; do |
|
|
version_number=$(basename "${subdirs[$i]}") |
|
|
echo "Training model for target $target, round $round, replicate $i, version_number $version_number" |
|
|
pera_train \ |
|
|
"train.lightning_model_args.eval_type=era" \ |
|
|
"train.lightning_model_args.beta=10.0" \ |
|
|
"train.lightning_model_args.gamma=0" \ |
|
|
"train.trainer_args.devices=4" \ |
|
|
"train.trainer_args.max_epochs=25" \ |
|
|
"train.trainer_args.log_every_n_steps=1" \ |
|
|
"train.trainer_args.enable_progress_bar=True" \ |
|
|
"train.logger.logger_args.version=${i}" \ |
|
|
"train.lightning_model_args.interval=epoch" \ |
|
|
"train.lightning_model_args.monitor=train/ERALoss" \ |
|
|
"train.best_checkpoint_args.monitor=train/ERALoss" \ |
|
|
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \ |
|
|
"++train.lightning_model_args.lr_scheduler_args.patience=5" \ |
|
|
"train.lightning_model_args.optimizer=AdamW" \ |
|
|
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \ |
|
|
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \ |
|
|
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \ |
|
|
"train.lightning_model_args.on_step=false" \ |
|
|
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \ |
|
|
"nn.batch_size=4" \ |
|
|
"nn.load_model=lightning_logs_round_0/${version_number}/checkpoints/best_model.ckpt" \ |
|
|
"nn.dataset_split_args.train=1.0" \ |
|
|
"nn.dataset_split_args.val=0.0" \ |
|
|
"nn.dataset_split_args.test=0.0" \ |
|
|
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]" |
|
|
done |
|
|
|
|
|
|
|
|
mv lightning_logs lightning_logs_round_${round} |
|
|
cd .. |
|
|
|
|
|
|
|
|
BASE_DIR="./${target}/lightning_logs_round_${round}" |
|
|
subdirs=($(ls -d "$BASE_DIR"/*/ | sort)) |
|
|
|
|
|
for i in {0..9}; do |
|
|
version_number=$(basename "${subdirs[$i]}") |
|
|
|
|
|
python sample_esm.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--alignment_round $round \ |
|
|
--version_number "$version_number" \ |
|
|
--replicate "$i" |
|
|
|
|
|
python compute_updated_logps.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--alignment_round $round \ |
|
|
--version_number "$version_number" \ |
|
|
--replicate "$i" |
|
|
done |
|
|
|
|
|
|
|
|
round=2 |
|
|
echo "Creating alignment dataset for target $target, round $round" |
|
|
python make_alignment_dataset_third_round.py --target $target |
|
|
|
|
|
|
|
|
cd $target |
|
|
for i in {0..9}; do |
|
|
version_number=$(basename "${subdirs[$i]}") |
|
|
echo "Training model for target $target, round $round, replicate $i" |
|
|
pera_train \ |
|
|
"train.lightning_model_args.eval_type=era" \ |
|
|
"train.lightning_model_args.beta=10.0" \ |
|
|
"train.lightning_model_args.gamma=0" \ |
|
|
"train.trainer_args.devices=4" \ |
|
|
"train.trainer_args.max_epochs=25" \ |
|
|
"train.trainer_args.log_every_n_steps=1" \ |
|
|
"train.trainer_args.enable_progress_bar=True" \ |
|
|
"train.logger.logger_args.version=${i}" \ |
|
|
"train.lightning_model_args.interval=epoch" \ |
|
|
"train.lightning_model_args.monitor=train/ERALoss" \ |
|
|
"train.best_checkpoint_args.monitor=train/ERALoss" \ |
|
|
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \ |
|
|
"++train.lightning_model_args.lr_scheduler_args.patience=5" \ |
|
|
"train.lightning_model_args.optimizer=AdamW" \ |
|
|
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \ |
|
|
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \ |
|
|
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \ |
|
|
"train.lightning_model_args.on_step=false" \ |
|
|
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \ |
|
|
"nn.batch_size=4" \ |
|
|
"nn.load_model=${version_number}/checkpoints/best_model.ckpt" \ |
|
|
"nn.dataset_split_args.train=1.0" \ |
|
|
"nn.dataset_split_args.val=0.0" \ |
|
|
"nn.dataset_split_args.test=0.0" \ |
|
|
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]" |
|
|
done |
|
|
|
|
|
|
|
|
mv lightning_logs lightning_logs_round_${round} |
|
|
cd .. |
|
|
|
|
|
BASE_DIR="./${target}/lightning_logs_round_${round}" |
|
|
subdirs=($(ls -d "$BASE_DIR"/*/ | sort)) |
|
|
for i in {0..9}; do |
|
|
version_number=$(basename "${subdirs[$i]}") |
|
|
|
|
|
python sample_esm.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--alignment_round $round \ |
|
|
--version_number "$version_number" \ |
|
|
--replicate "$i" |
|
|
|
|
|
python compute_updated_logps.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--alignment_round $round \ |
|
|
--version_number "$version_number" \ |
|
|
--replicate "$i" |
|
|
done |
|
|
|
|
|
|
|
|
round=3 |
|
|
echo "Creating alignment dataset for target $target, round $round" |
|
|
python make_alignment_dataset_fourth_round.py --target $target --alignment_round $round |
|
|
|
|
|
cd $target |
|
|
for i in {0..9}; do |
|
|
version_number=$(basename "${subdirs[$i]}") |
|
|
echo "Training model for target $target, round $round, replicate $i" |
|
|
pera_train \ |
|
|
"train.lightning_model_args.eval_type=era" \ |
|
|
"train.lightning_model_args.beta=10.0" \ |
|
|
"train.lightning_model_args.gamma=0" \ |
|
|
"train.trainer_args.devices=4" \ |
|
|
"train.trainer_args.max_epochs=25" \ |
|
|
"train.trainer_args.log_every_n_steps=1" \ |
|
|
"train.trainer_args.enable_progress_bar=True" \ |
|
|
"train.logger.logger_args.version=${i}" \ |
|
|
"train.lightning_model_args.interval=epoch" \ |
|
|
"train.lightning_model_args.monitor=train/ERALoss" \ |
|
|
"train.best_checkpoint_args.monitor=train/ERALoss" \ |
|
|
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \ |
|
|
"++train.lightning_model_args.lr_scheduler_args.patience=5" \ |
|
|
"train.lightning_model_args.optimizer=AdamW" \ |
|
|
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \ |
|
|
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \ |
|
|
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \ |
|
|
"train.lightning_model_args.on_step=false" \ |
|
|
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \ |
|
|
"nn.batch_size=4" \ |
|
|
"nn.load_model=${version_number}/checkpoints/best_model.ckpt" \ |
|
|
"nn.dataset_split_args.train=1.0" \ |
|
|
"nn.dataset_split_args.val=0.0" \ |
|
|
"nn.dataset_split_args.test=0.0" \ |
|
|
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]" |
|
|
done |
|
|
|
|
|
|
|
|
mv lightning_logs lightning_logs_round_${round} |
|
|
cd .. |
|
|
|
|
|
BASE_DIR="./${target}/lightning_logs_round_${round}" |
|
|
subdirs=($(ls -d "$BASE_DIR"/*/ | sort)) |
|
|
for i in {0..9}; do |
|
|
version_number=$(basename "${subdirs[$i]}") |
|
|
python sample_esm.py \ |
|
|
--target $target \ |
|
|
--num_samples 96 \ |
|
|
--alignment_round $round \ |
|
|
--version_number "$version_number" \ |
|
|
--replicate "$i" |
|
|
done |
|
|
|
|
|
echo "All rounds completed for target $target" |
|
|
|