| { | |
| "AdaLN_Timestep_Embed_Dim": 256, | |
| "architectures": [ | |
| "LexaLCM" | |
| ], | |
| "cfg_scale": 0.0, | |
| "d_ff": 8192, | |
| "d_latent": 1024, | |
| "d_model": 1536, | |
| "denoiser_iterations_inference": 40, | |
| "denoiser_iterations_pretrain": 80, | |
| "dropout_context": 0.1, | |
| "dropout_denoiser": 0.15, | |
| "dropout_latent": 0.1, | |
| "input_dim": 1024, | |
| "model_type": "lexa_lcm_pre2", | |
| "n_heads": 16, | |
| "num_context_layers": 3, | |
| "num_denoiser_layers": 6, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.51.3" | |
| } | |