import torch
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
import os
# Create a tiny config for testing
from transformers.models.hunyuan_v1_dense.configuration_hunyuan_v1_dense import HunYuanDenseV1Config
tiny_config = HunYuanDenseV1Config(
vocab_size=300,
hidden_size=64,
intermediate_size=128,
num_hidden_layers=2,
num_attention_heads=4,
head_dim=16,
num_key_value_heads=2,
hidden_act="silu",
max_position_embeddings=128,
rms_norm_eps=1e-05,
use_cache=True,
tie_word_embeddings=False,
rope_theta=10000.0,
attention_bias=False,
attention_dropout=0.0,
use_qk_norm=True,
bos_token_id=1,
eos_token_id=2,
pad_token_id=0,
)
print("Config created:", tiny_config.model_type)
# Create model from config
model = AutoModelForCausalLM.from_config(tiny_config)
model.eval()
print("Model created, params:", sum(p.numel() for p in model.parameters()))
# Save model
save_dir = "/home/panas/git/optimum-intel/tiny-random-hunyuan-v1-dense"
model.save_pretrained(save_dir)
tiny_config.save_pretrained(save_dir)
# Create a simple tokenizer config for testing
from transformers import PreTrainedTokenizerFast
tokenizer = PreTrainedTokenizerFast(
tokenizer_object=None,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
)
# Just save a minimal tokenizer
# Actually, for tests, we can use the AutoTokenizer approach or skip tokenizer
print(f"Saved tiny model to {save_dir}")
print("Files:", os.listdir(save_dir))
- Downloads last month
- 21,713
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support