| --- |
| license: apache-2.0 |
| --- |
| |
| ```python |
| import os |
| from transformers import ( |
| AutoConfig, |
| AutoModelForImageTextToText, |
| AutoProcessor, |
| AutoTokenizer, |
| ) |
| |
| model_id = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" |
| config = AutoConfig.from_pretrained(model_id) |
| |
| config.text_config.num_hidden_layers = 2 |
| config.text_config.hidden_size = 8 |
| config.text_config.intermediate_size = 64 |
| config.text_config.num_attention_heads = 8 |
| config.text_config.num_key_value_heads = 4 |
| config.text_config.head_dim = 32 |
| |
| config.vision_config.num_hidden_layers = 2 |
| config.vision_config.hidden_size = 128 |
| config.vision_config.intermediate_size = 64 |
| config.vision_config.num_attention_heads = 4 |
| config.vision_config.head_dim = 32 |
| |
| model = AutoModelForImageTextToText.from_config(config) |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| processor = AutoProcessor.from_pretrained(model_id) |
| |
| output_dir = "./tiny-random-mistral3" |
| os.makedirs(output_dir, exist_ok=True) |
| model.save_pretrained(output_dir) |
| tokenizer.save_pretrained(output_dir) |
| processor.save_pretrained(output_dir) |
| ``` |