tiny ramdom models
Collection
105 items • Updated • 8
docker run --gpus all \
--shm-size 32g \
-p 30000:30000 \
-v ~/.cache/huggingface:/root/.cache/huggingface \
--env "HF_TOKEN=<secret>" \
--ipc=host \
lmsysorg/sglang:latest \
python3 -m sglang.launch_server \
--model-path "tiny-random/ring" \
--host 0.0.0.0 \
--port 30000# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "tiny-random/ring",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
]
}'This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from inclusionAI/Ring-1T-preview.
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "tiny-random/ring"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
)
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, trust_remote_code=True)
print(pipe('Write an article about Artificial Intelligence.'))
import json
from pathlib import Path
import accelerate
import torch
from huggingface_hub import file_exists, hf_hub_download
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
set_seed,
)
source_model_id = "inclusionAI/Ring-1T-preview"
save_folder = "/tmp/tiny-random/ring"
processor = AutoTokenizer.from_pretrained(source_model_id)
processor.save_pretrained(save_folder)
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f:
config_json = json.load(f)
for k, v in config_json['auto_map'].items():
config_json['auto_map'][k] = f'{source_model_id}--{v}'
config_json['head_dim'] = 32
config_json['hidden_size'] = 8
config_json['intermediate_size'] = 64
config_json['moe_intermediate_size'] = 64
config_json['first_k_dense_replace'] = 1
config_json['num_attention_heads'] = 8
config_json['num_hidden_layers'] = 2
config_json['num_key_value_heads'] = 4
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
json.dump(config_json, f, indent=2)
config = AutoConfig.from_pretrained(
save_folder,
trust_remote_code=True,
)
print(config)
automap = config_json['auto_map']
torch.set_default_dtype(torch.bfloat16)
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)
torch.set_default_dtype(torch.float32)
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
model.generation_config = GenerationConfig.from_pretrained(
source_model_id, trust_remote_code=True,
)
set_seed(42)
model = model.cpu()
with torch.no_grad():
for name, p in sorted(model.named_parameters()):
torch.nn.init.normal_(p, 0, 0.1)
print(name, p.shape)
model.save_pretrained(save_folder)
print(model)
with open(f"{save_folder}/config.json", "r", encoding='utf-8') as f:
config_json = json.load(f)
config_json['auto_map'] = automap
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
json.dump(config_json, f, indent=2)
for python_file in Path(save_folder).glob('*.py'):
python_file.unlink()
BailingMoeV2ForCausalLM(
(model): BailingMoeV2Model(
(word_embeddings): Embedding(157184, 8, padding_idx=156892)
(layers): ModuleList(
(0): BailingMoeV2DecoderLayer(
(attention): BailingMoeV2SdpaAttention(
(query_key_value): Linear(in_features=8, out_features=512, bias=False)
(query_layernorm): BailingMoeV2RMSNorm()
(key_layernorm): BailingMoeV2RMSNorm()
(dense): Linear(in_features=256, out_features=8, bias=False)
)
(mlp): BailingMoeV2MLP(
(gate_proj): Linear(in_features=8, out_features=64, bias=False)
(up_proj): Linear(in_features=8, out_features=64, bias=False)
(down_proj): Linear(in_features=64, out_features=8, bias=False)
(act_fn): SiLU()
)
(input_layernorm): BailingMoeV2RMSNorm()
(post_attention_layernorm): BailingMoeV2RMSNorm()
)
(1): BailingMoeV2DecoderLayer(
(attention): BailingMoeV2SdpaAttention(
(query_key_value): Linear(in_features=8, out_features=512, bias=False)
(query_layernorm): BailingMoeV2RMSNorm()
(key_layernorm): BailingMoeV2RMSNorm()
(dense): Linear(in_features=256, out_features=8, bias=False)
)
(mlp): BailingMoeV2SparseMoeBlock(
(experts): ModuleList(
(0-255): 256 x BailingMoeV2MLP(
(gate_proj): Linear(in_features=8, out_features=64, bias=False)
(up_proj): Linear(in_features=8, out_features=64, bias=False)
(down_proj): Linear(in_features=64, out_features=8, bias=False)
(act_fn): SiLU()
)
)
(gate): BailingMoeV2Gate()
(shared_experts): BailingMoeV2MLP(
(gate_proj): Linear(in_features=8, out_features=64, bias=False)
(up_proj): Linear(in_features=8, out_features=64, bias=False)
(down_proj): Linear(in_features=64, out_features=8, bias=False)
(act_fn): SiLU()
)
)
(input_layernorm): BailingMoeV2RMSNorm()
(post_attention_layernorm): BailingMoeV2RMSNorm()
)
)
(norm): BailingMoeV2RMSNorm()
(rotary_emb): BailingMoeV2RotaryEmbedding()
)
(lm_head): Linear(in_features=8, out_features=157184, bias=False)
)
Base model
inclusionAI/Ring-1T-preview
Install from pip and serve model
# Install SGLang from pip: pip install sglang# Start the SGLang server: python3 -m sglang.launch_server \ --model-path "tiny-random/ring" \ --host 0.0.0.0 \ --port 30000# Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "tiny-random/ring", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'