{ "activation": "gelu", "architectures": [ "ProteomeLMForMaskedLM" ], "attention_dropout": 0.1, "batch_size": 32, "beta1": 0.9, "beta2": 0.999, "dataloader_num_workers": 8, "db_path": "/home/malbrank/training/", "dim": 512, "dropout": 0.1, "dtype": "bfloat16", "eval_accumulation_steps": 1, "eval_epochs": 1, "gradient_accumulation_steps": 1, "gradient_checkpointing": false, "hidden_dim": 512, "initializer_range": 0.02, "input_size": 1152, "learning_rate": 0.0003, "logging_steps": 10, "loss_type": "mse", "mask_fraction": 0.5, "max_grad_norm": 1.0, "max_position_embeddings": 512, "max_steps": 1000000, "min_taxid_size": 200, "model_type": "distilbert", "n_heads": 8, "n_inner": null, "n_layers": 6, "namedir": "ProteomeLM-Mini-Kuma", "num_epochs": 2000, "output_dir": "/home/malbrank/ProteomeLM/output/", "pad_token_id": 0, "qa_dropout": 0.1, "save_epochs": 15, "scheduler": "cosine", "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "torch_dtype": "bfloat16", "transformers_version": "4.51.1", "use_cache": true, "use_one_gpu": "0", "vocab_size": 30522, "wandb_project": "proteomelm", "warmup_steps": 500, "weight_decay": 0.01 }