add colab, remove unused requirements
Browse files- main.py +3 -2
- readme.md +2 -0
- requirements.txt +0 -1
main.py
CHANGED
|
@@ -17,6 +17,7 @@ if __name__ == '__main__':
|
|
| 17 |
parser.add_argument('-O2', action='store_true', help="equals --fp16 --dir_text")
|
| 18 |
parser.add_argument('--test', action='store_true', help="test mode")
|
| 19 |
parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture")
|
|
|
|
| 20 |
parser.add_argument('--workspace', type=str, default='workspace')
|
| 21 |
parser.add_argument('--guidance', type=str, default='stable-diffusion', help='choose from [stable-diffusion, clip]')
|
| 22 |
parser.add_argument('--seed', type=int, default=0)
|
|
@@ -38,7 +39,7 @@ if __name__ == '__main__':
|
|
| 38 |
# network backbone
|
| 39 |
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
|
| 40 |
parser.add_argument('--backbone', type=str, default='grid', help="nerf backbone, choose from [grid, tcnn, vanilla]")
|
| 41 |
-
# rendering resolution in training
|
| 42 |
parser.add_argument('--w', type=int, default=128, help="render width for NeRF in training")
|
| 43 |
parser.add_argument('--h', type=int, default=128, help="render height for NeRF in training")
|
| 44 |
|
|
@@ -129,7 +130,7 @@ if __name__ == '__main__':
|
|
| 129 |
# decay to 0.01 * init_lr at last iter step
|
| 130 |
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.01 ** min(iter / opt.iters, 1))
|
| 131 |
|
| 132 |
-
trainer = Trainer('ngp', opt, model, guidance, device=device, workspace=opt.workspace, optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, eval_interval=
|
| 133 |
|
| 134 |
if opt.gui:
|
| 135 |
trainer.train_loader = train_loader # attach dataloader to trainer
|
|
|
|
| 17 |
parser.add_argument('-O2', action='store_true', help="equals --fp16 --dir_text")
|
| 18 |
parser.add_argument('--test', action='store_true', help="test mode")
|
| 19 |
parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture")
|
| 20 |
+
parser.add_argument('--eval_interval', type=int, default=10, help="evaluate on the valid set every interval epochs")
|
| 21 |
parser.add_argument('--workspace', type=str, default='workspace')
|
| 22 |
parser.add_argument('--guidance', type=str, default='stable-diffusion', help='choose from [stable-diffusion, clip]')
|
| 23 |
parser.add_argument('--seed', type=int, default=0)
|
|
|
|
| 39 |
# network backbone
|
| 40 |
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
|
| 41 |
parser.add_argument('--backbone', type=str, default='grid', help="nerf backbone, choose from [grid, tcnn, vanilla]")
|
| 42 |
+
# rendering resolution in training, decrease this if CUDA OOM.
|
| 43 |
parser.add_argument('--w', type=int, default=128, help="render width for NeRF in training")
|
| 44 |
parser.add_argument('--h', type=int, default=128, help="render height for NeRF in training")
|
| 45 |
|
|
|
|
| 130 |
# decay to 0.01 * init_lr at last iter step
|
| 131 |
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.01 ** min(iter / opt.iters, 1))
|
| 132 |
|
| 133 |
+
trainer = Trainer('ngp', opt, model, guidance, device=device, workspace=opt.workspace, optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, eval_interval=opt.eval_interval)
|
| 134 |
|
| 135 |
if opt.gui:
|
| 136 |
trainer.train_loader = train_loader # attach dataloader to trainer
|
readme.md
CHANGED
|
@@ -4,6 +4,8 @@ A pytorch implementation of the text-to-3D model **Dreamfusion**, powered by the
|
|
| 4 |
|
| 5 |
The original paper's project page: [_DreamFusion: Text-to-3D using 2D Diffusion_](https://dreamfusion3d.github.io/).
|
| 6 |
|
|
|
|
|
|
|
| 7 |
Examples generated from text prompt `a high quality photo of a pineapple` viewed with the GUI in real time:
|
| 8 |
|
| 9 |
https://user-images.githubusercontent.com/25863658/194241493-f3e68f78-aefe-479e-a4a8-001424a61b37.mp4
|
|
|
|
| 4 |
|
| 5 |
The original paper's project page: [_DreamFusion: Text-to-3D using 2D Diffusion_](https://dreamfusion3d.github.io/).
|
| 6 |
|
| 7 |
+
Colab notebook for usage: [](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing)
|
| 8 |
+
|
| 9 |
Examples generated from text prompt `a high quality photo of a pineapple` viewed with the GUI in real time:
|
| 10 |
|
| 11 |
https://user-images.githubusercontent.com/25863658/194241493-f3e68f78-aefe-479e-a4a8-001424a61b37.mp4
|
requirements.txt
CHANGED
|
@@ -10,7 +10,6 @@ tqdm
|
|
| 10 |
matplotlib
|
| 11 |
PyMCubes
|
| 12 |
rich
|
| 13 |
-
pysdf
|
| 14 |
dearpygui
|
| 15 |
scipy
|
| 16 |
diffusers
|
|
|
|
| 10 |
matplotlib
|
| 11 |
PyMCubes
|
| 12 |
rich
|
|
|
|
| 13 |
dearpygui
|
| 14 |
scipy
|
| 15 |
diffusers
|