Tiny dummy models
Collection
Randomly initialized tiny models for debugging/testing purpose • 176 items • Updated • 6
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("yujiepan/phi-3-tiny-random", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("yujiepan/phi-3-tiny-random", trust_remote_code=True)
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))This model is randomly initialized, using the config from microsoft/Phi-3-mini-128k-instruct but with smaller size. Note the model is in float16.
Codes:
import transformers
import torch
import os
from huggingface_hub import create_repo, upload_folder
source_model_id = 'microsoft/Phi-3-mini-128k-instruct'
save_path = '/tmp/yujiepan/phi-3-tiny-random'
repo_id = 'yujiepan/phi-3-tiny-random'
config = transformers.AutoConfig.from_pretrained(
source_model_id, trust_remote_code=True)
config.hidden_size = 16
config.intermediate_size = 32
config.num_attention_heads = 4
config.num_hidden_layers = 2
config.num_key_value_heads = 4
config.rope_scaling['long_factor'] = [1.0299, 1.0499]
config.rope_scaling['short_factor'] = [1.05, 1.05]
model = transformers.AutoModelForCausalLM.from_config(
config, trust_remote_code=True)
model = model.to(torch.float16)
model.save_pretrained(save_path)
tokenizer = transformers.AutoTokenizer.from_pretrained(
source_model_id, trust_remote_code=True)
tokenizer.save_pretrained(save_path)
result = transformers.pipelines.pipeline(
'text-generation',
model=model.float(), tokenizer=tokenizer)('Hello')
print(result)
os.system(f'ls -alh {save_path}')
create_repo(repo_id, exist_ok=True)
upload_folder(repo_id=repo_id, folder_path=save_path)
from transformers import AutoProcessor
AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True).push_to_hub(repo_id)
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="yujiepan/phi-3-tiny-random", trust_remote_code=True) messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)