| from transformers import PretrainedConfig | |
| class AlphaPilotConfig(PretrainedConfig): | |
| model_type = "alpha_pilot_dqn" | |
| def __init__( | |
| self, | |
| state_dim=8, | |
| action_dim=4, | |
| hidden_layers=[256, 128], | |
| activation="ReLU", | |
| **kwargs | |
| ): | |
| self.state_dim = state_dim | |
| self.action_dim = action_dim | |
| self.hidden_layers = hidden_layers | |
| self.activation = activation | |
| super().__init__(**kwargs) |