si264's picture
Upload 760 files
78eff33 verified
#!/bin/bash
#SBATCH -N 1
#SBATCH -G 4
#SBATCH -C GPU_MEM:80GB
#SBATCH -p rotskoff
#SBATCH -t 07-00:00:00
#SBATCH --mem=320GB
#SBATCH -J alignment
#source /home/groups/nsgray01/anaconda3/etc/profile.d/conda.sh
#conda activate proera
source /home/groups/nsgray01/MoLE/.venv/bin/activate
target="GB1"
base_model_path="/scratch/groups/rotskoff/sebastian/era/protein_era/111225BenchmarkWithMegascalePretrain/megascale_ckpts/step_step_100000.ckpt"
round=0
# sample from the base model (in this case the Megascale pretrained model) 10 times
for i in {0..1}; do
echo "Running iteration $i for target $target"
# Run the Python script with the specified arguments
python sample_esm_first_round.py \
--target $target \
--num_samples 96 \
--replicate "$i"
done
# Make the first round alignment datasets for each model
echo "Creating alignment dataset for target $target"
python create_alignment_dataset_first_round.py --target $target
# Train the first round models
cd $target
for i in {0..1}; do
echo "Training model for target $target, round $round, replicate $i"
pera_train \
"train.lightning_model_args.eval_type=era" \
"train.lightning_model_args.beta=10.0" \
"train.lightning_model_args.gamma=0" \
"train.trainer_args.devices=4" \
"train.trainer_args.max_epochs=2" \
"train.trainer_args.log_every_n_steps=1" \
"train.trainer_args.enable_progress_bar=True" \
"train.logger.logger_args.version=${i}" \
"train.lightning_model_args.interval=epoch" \
"train.lightning_model_args.monitor=train/ERALoss" \
"train.best_checkpoint_args.monitor=train/ERALoss" \
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \
"++train.lightning_model_args.lr_scheduler_args.patience=5" \
"train.lightning_model_args.optimizer=AdamW" \
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \
"train.lightning_model_args.on_step=false" \
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \
"nn.batch_size=4" \
"nn.load_model=${base_model_path}" \
"nn.dataset_split_args.train=1.0" \
"nn.dataset_split_args.val=0.0" \
"nn.dataset_split_args.test=0.0" \
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]"
done
# move lightning_logs to be specific to this round
mv lightning_logs lightning_logs_round_${round}
cd ..
# sample from the first round trained models and update the logps
BASE_DIR="./${target}/lightning_logs_round_${round}"
subdirs=($(ls -d "$BASE_DIR"/*/ | sort))
for i in {0..1}; do
version_number=$(basename "${subdirs[$i]}")
echo "$version_number"
python sample_esm.py \
--target $target \
--num_samples 96 \
--alignment_round $round \
--version_number "$version_number" \
--replicate "$i"
python compute_updated_logps.py \
--target $target \
--num_samples 96 \
--alignment_round $round \
--version_number "$version_number" \
--replicate "$i"
done
# Make the second round alignment datasets for each model
round=1
echo "Creating alignment dataset for target $target, round $round"
python make_alignment_dataset_second_round.py --target $target
# Train the second round models
cd $target
for i in {0..1}; do
version_number=$(basename "${subdirs[$i]}")
echo "Training model for target $target, round $round, replicate $i, version_number $version_number"
pera_train \
"train.lightning_model_args.eval_type=era" \
"train.lightning_model_args.beta=10.0" \
"train.lightning_model_args.gamma=0" \
"train.trainer_args.devices=4" \
"train.trainer_args.max_epochs=25" \
"train.trainer_args.log_every_n_steps=1" \
"train.trainer_args.enable_progress_bar=True" \
"train.logger.logger_args.version=${i}" \
"train.lightning_model_args.interval=epoch" \
"train.lightning_model_args.monitor=train/ERALoss" \
"train.best_checkpoint_args.monitor=train/ERALoss" \
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \
"++train.lightning_model_args.lr_scheduler_args.patience=5" \
"train.lightning_model_args.optimizer=AdamW" \
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \
"train.lightning_model_args.on_step=false" \
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \
"nn.batch_size=4" \
"nn.load_model=lightning_logs_round_0/${version_number}/checkpoints/best_model.ckpt" \
"nn.dataset_split_args.train=1.0" \
"nn.dataset_split_args.val=0.0" \
"nn.dataset_split_args.test=0.0" \
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]"
done
# move lightning_logs to be specific to this round
mv lightning_logs lightning_logs_round_${round}
cd ..
# sample from the second round trained models and update the logps
BASE_DIR="./${target}/lightning_logs_round_${round}"
subdirs=($(ls -d "$BASE_DIR"/*/ | sort))
for i in {0..9}; do
version_number=$(basename "${subdirs[$i]}")
python sample_esm.py \
--target $target \
--num_samples 96 \
--alignment_round $round \
--version_number "$version_number" \
--replicate "$i"
python compute_updated_logps.py \
--target $target \
--num_samples 96 \
--alignment_round $round \
--version_number "$version_number" \
--replicate "$i"
done
# Make the third round alignment datasets for each model
round=2
echo "Creating alignment dataset for target $target, round $round"
python make_alignment_dataset_third_round.py --target $target
# Train the third round models
cd $target
for i in {0..9}; do
version_number=$(basename "${subdirs[$i]}")
echo "Training model for target $target, round $round, replicate $i"
pera_train \
"train.lightning_model_args.eval_type=era" \
"train.lightning_model_args.beta=10.0" \
"train.lightning_model_args.gamma=0" \
"train.trainer_args.devices=4" \
"train.trainer_args.max_epochs=25" \
"train.trainer_args.log_every_n_steps=1" \
"train.trainer_args.enable_progress_bar=True" \
"train.logger.logger_args.version=${i}" \
"train.lightning_model_args.interval=epoch" \
"train.lightning_model_args.monitor=train/ERALoss" \
"train.best_checkpoint_args.monitor=train/ERALoss" \
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \
"++train.lightning_model_args.lr_scheduler_args.patience=5" \
"train.lightning_model_args.optimizer=AdamW" \
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \
"train.lightning_model_args.on_step=false" \
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \
"nn.batch_size=4" \
"nn.load_model=${version_number}/checkpoints/best_model.ckpt" \
"nn.dataset_split_args.train=1.0" \
"nn.dataset_split_args.val=0.0" \
"nn.dataset_split_args.test=0.0" \
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]"
done
# move lightning_logs to be specific to this round
mv lightning_logs lightning_logs_round_${round}
cd ..
# sample from the third round trained models and update the logps
BASE_DIR="./${target}/lightning_logs_round_${round}"
subdirs=($(ls -d "$BASE_DIR"/*/ | sort))
for i in {0..9}; do
version_number=$(basename "${subdirs[$i]}")
python sample_esm.py \
--target $target \
--num_samples 96 \
--alignment_round $round \
--version_number "$version_number" \
--replicate "$i"
python compute_updated_logps.py \
--target $target \
--num_samples 96 \
--alignment_round $round \
--version_number "$version_number" \
--replicate "$i"
done
# make the fourth round alignment datasets for each model
round=3
echo "Creating alignment dataset for target $target, round $round"
python make_alignment_dataset_fourth_round.py --target $target --alignment_round $round
# Train the fourth round models
cd $target
for i in {0..9}; do
version_number=$(basename "${subdirs[$i]}")
echo "Training model for target $target, round $round, replicate $i"
pera_train \
"train.lightning_model_args.eval_type=era" \
"train.lightning_model_args.beta=10.0" \
"train.lightning_model_args.gamma=0" \
"train.trainer_args.devices=4" \
"train.trainer_args.max_epochs=25" \
"train.trainer_args.log_every_n_steps=1" \
"train.trainer_args.enable_progress_bar=True" \
"train.logger.logger_args.version=${i}" \
"train.lightning_model_args.interval=epoch" \
"train.lightning_model_args.monitor=train/ERALoss" \
"train.best_checkpoint_args.monitor=train/ERALoss" \
"train.lightning_model_args.lr_scheduler=ReduceLROnPlateau" \
"++train.lightning_model_args.lr_scheduler_args.patience=5" \
"train.lightning_model_args.optimizer=AdamW" \
"train.lightning_model_args.optimizer_args.lr=1.0e-6" \
"++train.lightning_model_args.optimizer_args.betas=[0.9,0.99]" \
"++train.lightning_model_args.optimizer_args.weight_decay=0.01" \
"train.lightning_model_args.on_step=false" \
"global_args.dataset_filename=alignment_dataset_${round}_96_from_ESM3_${i}.hdf5" \
"nn.batch_size=4" \
"nn.load_model=${version_number}/checkpoints/best_model.ckpt" \
"nn.dataset_split_args.train=1.0" \
"nn.dataset_split_args.val=0.0" \
"nn.dataset_split_args.test=0.0" \
"++nn.model_args.unified_transformer_args.ida_layer_indices=[]"
done
# move lightning_logs to be specific to this round
mv lightning_logs lightning_logs_round_${round}
cd ..
# sample from the fourth round trained models
BASE_DIR="./${target}/lightning_logs_round_${round}"
subdirs=($(ls -d "$BASE_DIR"/*/ | sort))
for i in {0..9}; do
version_number=$(basename "${subdirs[$i]}")
python sample_esm.py \
--target $target \
--num_samples 96 \
--alignment_round $round \
--version_number "$version_number" \
--replicate "$i"
done
echo "All rounds completed for target $target"