File size: 263 Bytes
a99bcd4
2d56f53
 
 
 
 
a99bcd4
1
2
3
4
5
6
7
8
{
  "tokenizer_class": "GPT2Tokenizer",
  "vocab_size": 50257,  // Match this with Mamba's vocab size if needed
  "padding_side": "right",
  "special_tokens_map_file": null,
  "model_max_length": 1024  // Define based on the sequence length your model supports
}