JiaMao commited on
Commit
40dd7f8
·
verified ·
1 Parent(s): 13c3c9f

Upload 2 files

Browse files
configs/08-03T09-35-lightning.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lightning:
2
+ callbacks:
3
+ image_logger:
4
+ target: main.ImageLogger
5
+ params:
6
+ batch_frequency: 5000
7
+ max_images: 8
8
+ increase_log_steps: false
9
+ log_images_kwargs:
10
+ quantize_denoised: false
11
+ inpaint: false
12
+ model_checkpoint:
13
+ target: pytorch_lightning.callbacks.ModelCheckpoint
14
+ params:
15
+ save_weights_only: true
16
+ trainer:
17
+ benchmark: true
18
+ accelerator: ddp
19
+ gpus: 0,1,2
configs/08-03T09-35-project.yaml ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 2.5e-05
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.0195
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ cond_stage_key: caption
12
+ image_size: 64
13
+ channels: 3
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss
17
+ use_ema: false
18
+ scheduler_config:
19
+ target: ldm.lr_scheduler.LambdaLinearScheduler
20
+ params:
21
+ warm_up_steps:
22
+ - 1000
23
+ cycle_lengths:
24
+ - 10000000000000
25
+ f_start:
26
+ - 1.0e-06
27
+ f_max:
28
+ - 1.0
29
+ f_min:
30
+ - 1.0
31
+ unet_config:
32
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
33
+ params:
34
+ image_size: 64
35
+ in_channels: 3
36
+ out_channels: 3
37
+ model_channels: 192
38
+ attention_resolutions:
39
+ - 8
40
+ - 4
41
+ - 2
42
+ num_res_blocks: 2
43
+ channel_mult:
44
+ - 1
45
+ - 2
46
+ - 3
47
+ - 5
48
+ num_heads: 1
49
+ use_spatial_transformer: true
50
+ transformer_depth: 1
51
+ context_dim: 512
52
+ ckpt_path: models/ldm/cin256-v2/unet.ckpt
53
+ first_stage_config:
54
+ target: ldm.models.autoencoder.VQModelInterface
55
+ params:
56
+ ckpt_path: models/first_stage_models/vq-f4-tcga-brca/last.ckpt
57
+ embed_dim: 3
58
+ n_embed: 8192
59
+ ddconfig:
60
+ double_z: false
61
+ z_channels: 3
62
+ resolution: 256
63
+ in_channels: 3
64
+ out_ch: 3
65
+ ch: 128
66
+ ch_mult:
67
+ - 1
68
+ - 2
69
+ - 4
70
+ num_res_blocks: 2
71
+ attn_resolutions: []
72
+ dropout: 0.0
73
+ lossconfig:
74
+ target: torch.nn.Identity
75
+ cond_stage_config:
76
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
77
+ params:
78
+ version: vinid/plip
79
+ max_length: 154
80
+ data:
81
+ target: main.DataModuleFromConfig
82
+ params:
83
+ batch_size: 48
84
+ num_workers: 12
85
+ wrap: false
86
+ train:
87
+ target: ldm.data.text_cond.tumor_til_in_text.TCGADataset
88
+ params:
89
+ config:
90
+ root: /home/myellapragad/summer23/TCGA_dataset
91
+ split: train
92
+ crop_size: 256
93
+ num_levels: 2
94
+ p_uncond: 0.1
95
+ validation:
96
+ target: ldm.data.text_cond.tumor_til_in_text.TCGADataset
97
+ params:
98
+ config:
99
+ root: /home/myellapragad/summer23/TCGA_dataset
100
+ split: test
101
+ crop_size: 256
102
+ num_levels: 2