{ "model_type": "kimi_k2", "architectures": ["TikTokenTokenizer"], "_name_or_path": "kimi-k2-test", "vocab_size": 151936, "hidden_size": 256, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 512, "max_position_embeddings": 2048, "torch_dtype": "float16" }