| { | |
| "dataset_name": [ | |
| "Mtot_Nbody_SIMBA" | |
| ], | |
| "dataset_config_name": null, | |
| "dataset_conditional_name": null, | |
| "model_config_name_or_path": null, | |
| "base_channels": 64, | |
| "cross_attention_dim": null, | |
| "vae": null, | |
| "vae_from_pretrained": null, | |
| "vae_scaling_factor": null, | |
| "train_data_dir": null, | |
| "output_dir": "output/ddpm-128-sample-2000-linear-run2", | |
| "tag": [ | |
| "na1" | |
| ], | |
| "overwrite_output_dir": false, | |
| "cache_dir": "data", | |
| "resolution": 128, | |
| "data_size": 13500, | |
| "super_resolution": null, | |
| "local_resize": false, | |
| "conditional": false, | |
| "center_crop": false, | |
| "random_flip": false, | |
| "train_batch_size": 64, | |
| "eval_batch_size": 16, | |
| "dataloader_num_workers": 0, | |
| "num_epochs": 200, | |
| "save_images_epochs": 10, | |
| "save_model_epochs": 10, | |
| "gradient_accumulation_steps": 1, | |
| "learning_rate": 0.0001, | |
| "lr_scheduler": "cosine", | |
| "lr_warmup_steps": 500, | |
| "adam_beta1": 0.95, | |
| "adam_beta2": 0.999, | |
| "adam_weight_decay": 1e-06, | |
| "adam_epsilon": 1e-08, | |
| "use_ema": true, | |
| "ema_inv_gamma": 1.0, | |
| "ema_power": 0.75, | |
| "ema_max_decay": 0.9999, | |
| "push_to_hub": true, | |
| "hub_token": null, | |
| "hub_model_id": null, | |
| "hub_private_repo": false, | |
| "logger": "wandb", | |
| "logging_dir": "logs", | |
| "local_rank": -1, | |
| "mixed_precision": "no", | |
| "prediction_type": "sample", | |
| "loss": "mse", | |
| "ddpm_num_steps": 2000, | |
| "ddpm_num_inference_steps": 2000, | |
| "ddpm_beta_schedule": "linear", | |
| "checkpointing_steps": 10000, | |
| "checkpoints_total_limit": null, | |
| "resume_from_checkpoint": "latest", | |
| "enable_xformers_memory_efficient_attention": false | |
| } |