langsplat-backup / process.sh
tusharsangam's picture
Upload folder using huggingface_hub
9205b56 verified
#!/bin/bash
#SBATCH --job-name=diffusion_policy_ablations_24
#SBATCH --output=./logs/diffusion_training_sd15-%J.log
#SBATCH --error=./logs/diffusion_training_sd15-%J.err
#SBATCH --time=12:00:00
#SBATCH --nodes 1
#SBATCH --gres=gpu:1
#SBATCH --ntasks-per-node 1
#SBATCH --cpus-per-task=12
#SBATCH --mem=100G
#SBATCH --account=siro
#SBATCH --qos=siro_high
export casename="office_scene_50"
export dataset_path="/fsx-siro/sangamtushar/LangSplat/data/examples/office_scene_50"
# get the language feature of the scene
# python preprocess.py --dataset_path $dataset_path
# train the autoencoder
# cd autoencoder
# python train.py --dataset_path $dataset_path \
# --encoder_dims 256 128 64 32 3 \
# --decoder_dims 16 32 64 128 256 256 512 \
# --lr 0.0007 --dataset_name $casename
# # e.g. python train.py --dataset_path ../data/sofa --encoder_dims 256 128 64 32 3 --decoder_dims 16 32 64 128 256 256 512 --lr 0.0007 --dataset_name sofa
# # get the 3-dims language feature of the scene
# python test.py --dataset_name $casename --dataset_path $dataset_path
# # e.g. python test.py --dataset_path ../data/sofa --dataset_name sofa
# # ATTENTION: Before you train the LangSplat, please follow https://github.com/graphdeco-inria/gaussian-splatting
# # to train the RGB 3D Gaussian Splatting model.
# # put the path of your RGB model after '--start_checkpoint'
# cd ..
# for level in 1 2 3
# do
# python train.py -s $dataset_path -m output/${casename} --start_checkpoint $dataset_path/$casename/chkpnt30000.pth --feature_level ${level}
# # e.g. python train.py -s data/sofa -m output/sofa --start_checkpoint data/sofa/sofa/chkpnt30000.pth --feature_level 3
# done
for level in 3
do
# render rgb
# python render.py -m output/${casename}_${level}
# render language features
python render.py -m output/${casename}_${level} --include_feature
# e.g. python render.py -m output/sofa_3 --include_feature
done