JakeOh commited on
Commit
172a389
·
verified ·
1 Parent(s): bc0e7e5

Upload config

Browse files
Files changed (2) hide show
  1. config.json +2 -2
  2. configuration_llada.py +1 -1
config.json CHANGED
@@ -31,8 +31,8 @@
31
  "input_emb_norm": false,
32
  "layer_norm_type": "rms",
33
  "layer_norm_with_affine": true,
34
- "mask_token_id": 8,
35
- "max_sequence_length": 4096,
36
  "mlp_hidden_size": 1024,
37
  "mlp_ratio": 4,
38
  "model_type": "llada",
 
31
  "input_emb_norm": false,
32
  "layer_norm_type": "rms",
33
  "layer_norm_with_affine": true,
34
+ "mask_token_id": 50258,
35
+ "max_sequence_length": 1024,
36
  "mlp_hidden_size": 1024,
37
  "mlp_ratio": 4,
38
  "model_type": "llada",
configuration_llada.py CHANGED
@@ -460,4 +460,4 @@ class LLaDAConfig(PretrainedConfig):
460
 
461
 
462
  # Register the config class so that it is available for transformer pipelines, auto-loading etc.
463
- AutoConfig.register("llada", LLaDAConfig)
 
460
 
461
 
462
  # Register the config class so that it is available for transformer pipelines, auto-loading etc.
463
+ AutoConfig.register("llada", LLaDAConfig)