step3-vl / README.md
yujiepan's picture
Upload folder using huggingface_hub
f286307 verified
metadata
library_name: transformers
base_model:
  - stepfun-ai/Step3-VL-10B

This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from stepfun-ai/Step3-VL-10B.

File path Size
model.safetensors 6.0MB

Example usage:

  • vLLM
vllm serve tiny-random/step3-vl \
    --trust-remote-code \
    --reasoning-parser deepseek_r1 \
    --enable-auto-tool-choice \
    --tool-call-parser hermes
  • Transformers
import torch
from transformers import AutoModelForCausalLM, AutoProcessor

model_id = "tiny-random/step3-vl"
messages = [
    {
        "role": "user",
        "content": [
            {
                "type": "image",
                "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"
            },
            {
                "type": "text",
                "text": "describe this image"
            }
        ],
    }
]
processor = AutoProcessor.from_pretrained(
    model_id,
    trust_remote_code=True,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map="cuda",
    trust_remote_code=True,
    key_mapping={
        "^vision_model": "model.vision_model",
        r"^model(?!\.(language_model|vision_model))": "model.language_model",
        "vit_large_projector": "model.vit_large_projector",
    }
)
inputs = processor.apply_chat_template(
    messages,
    tokenize=True,
    add_generation_prompt=True,
    return_dict=True,
    return_tensors="pt"
).to(model.device)
inputs.pop("token_type_ids", None)
generated_ids = model.generate(**inputs, max_new_tokens=16)
output_text = processor.decode(
    generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
print(output_text)

Codes to create this repo:

Python codes
import json
from pathlib import Path

import accelerate
import torch
from huggingface_hub import file_exists, hf_hub_download, list_repo_files
from safetensors.torch import save_file
from transformers import (
    AutoConfig,
    AutoModel,
    AutoModelForCausalLM,
    AutoProcessor,
    AutoTokenizer,
    GenerationConfig,
    set_seed,
)

source_model_id = "stepfun-ai/Step3-VL-10B"
save_folder = "/tmp/tiny-random/step3-vl"

Path(save_folder).mkdir(parents=True, exist_ok=True)
for f in list_repo_files(source_model_id, repo_type="model"):
    if (f.endswith('.json') or f.endswith('.py') or f.endswith('.model') or f.endswith('.jinja')) and (
        not f.endswith('.index.json')
    ):
        hf_hub_download(repo_id=source_model_id, filename=f,
                        repo_type="model", local_dir=save_folder)

def replace_file(filepath, old_string, new_string):
    with open(filepath, 'r', encoding='utf-8') as f:
        code = f.read()
    code = code.replace(old_string, new_string)
    with open(filepath, 'w', encoding='utf-8') as f:
        f.write(code)

with open(f'{save_folder}/config.json') as f:
    config_json = json.load(f)

config_json['text_config'].update({
    'num_hidden_layers': 2,
    'hidden_size': 8,
    'head_dim': 32,
    'intermediate_size': 64,
    'num_attention_heads': 8,
    "num_key_value_heads": 4,
    'tie_word_embeddings': False,
})
config_json['vision_config'].update({
    'width': 64,
    'layers': 2,
    'heads': 2,
})
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
    json.dump(config_json, f, indent=2)

config = AutoConfig.from_pretrained(
    save_folder,
    trust_remote_code=True,
)
print(config)
torch.set_default_dtype(torch.bfloat16)
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)
torch.set_default_dtype(torch.float32)
# if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
#     model.generation_config = GenerationConfig.from_pretrained(
#         source_model_id, trust_remote_code=True,
#     )
set_seed(42)
model = model.cpu()
with torch.no_grad():
    for name, p in sorted(model.named_parameters()):
        torch.nn.init.normal_(p, 0, 0.1)
        print(name, p.shape)
model_new = torch.nn.Identity()
model_new.model = model.model.language_model
model_new.vision_model = model.model.vision_model
model_new.lm_head = model.lm_head
model_new.vit_large_projector = model.model.vit_large_projector
state_dict = model_new.state_dict()
save_file(state_dict, f"{save_folder}/model.safetensors")

Printing the model:

Click to expand
Step3VL10BForCausalLM(
  (model): StepRoboticsModel(
    (vision_model): StepRoboticsVisionEncoder(
      (conv1): Conv2d(3, 64, kernel_size=(14, 14), stride=(14, 14), bias=False)
      (ln_pre): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
      (ln_post): Identity()
      (transformer): EncoderVisionTransformer(
        (resblocks): ModuleList(
          (0-1): 2 x EncoderVisionBlock(
            (attn): EncoderVisionAttention(
              (out_proj): Linear(in_features=64, out_features=64, bias=True)
              (rope): EncoderRope2D()
            )
            (ln_1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
            (ln_2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
            (mlp): EncoderMLP(
              (c_fc): Linear(in_features=64, out_features=373, bias=True)
              (act_fn): QuickGELUActivation()
              (c_proj): Linear(in_features=373, out_features=64, bias=True)
            )
            (ls_1): EncoderLayerScale()
            (ls_2): EncoderLayerScale()
          )
        )
      )
      (vit_downsampler1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
      (vit_downsampler2): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
    )
    (language_model): Qwen3Model(
      (embed_tokens): Embedding(151936, 8)
      (layers): ModuleList(
        (0-1): 2 x Qwen3DecoderLayer(
          (self_attn): Qwen3Attention(
            (q_proj): Linear(in_features=8, out_features=256, bias=False)
            (k_proj): Linear(in_features=8, out_features=128, bias=False)
            (v_proj): Linear(in_features=8, out_features=128, bias=False)
            (o_proj): Linear(in_features=256, out_features=8, bias=False)
            (q_norm): Qwen3RMSNorm((32,), eps=1e-06)
            (k_norm): Qwen3RMSNorm((32,), eps=1e-06)
          )
          (mlp): Qwen3MLP(
            (gate_proj): Linear(in_features=8, out_features=64, bias=False)
            (up_proj): Linear(in_features=8, out_features=64, bias=False)
            (down_proj): Linear(in_features=64, out_features=8, bias=False)
            (act_fn): SiLUActivation()
          )
          (input_layernorm): Qwen3RMSNorm((8,), eps=1e-06)
          (post_attention_layernorm): Qwen3RMSNorm((8,), eps=1e-06)
        )
      )
      (norm): Qwen3RMSNorm((8,), eps=1e-06)
      (rotary_emb): Qwen3RotaryEmbedding()
    )
    (vit_large_projector): Linear(in_features=256, out_features=8, bias=False)
  )
  (lm_head): Linear(in_features=8, out_features=151936, bias=False)
)