{ "vocab_size": 26, "embed_dim": 128, "hidden_dim": 256, "lstm_layers": 2, "bidirectional": true, "seq_embed_dim": 256, "combined_dim": 1024, "predictor_hidden": [512, 256, 128, 64], "dropout": 0.1, "max_length": 600, "learning_rate": 0.001, "batch_size": 64, "attention_pooling": true }