| | --- |
| | library_name: transformers |
| | pipeline_tag: text-generation |
| | inference: true |
| | widget: |
| | - text: Hello! |
| | example_title: Hello world |
| | group: Python |
| | base_model: |
| | - Qwen/Qwen3-32B |
| | --- |
| | |
| | This tiny model is for debugging. It is randomly initialized with the config adapted from [Qwen/Qwen3-32B](https://huggingface.co/Qwen/Qwen3-32B). |
| |
|
| | ### Example usage: |
| |
|
| | ```python |
| | from transformers import pipeline |
| | model_id = "yujiepan/qwen3-tiny-random-tp" |
| | pipe = pipeline( |
| | "text-generation", model=model_id, device="cuda", |
| | trust_remote_code=True, max_new_tokens=3, |
| | ) |
| | print(pipe("Hello World!")) |
| | |
| | |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | tokenizer = AutoTokenizer.from_pretrained(model_id) |
| | model = AutoModelForCausalLM.from_pretrained( |
| | model_id, |
| | torch_dtype="auto", |
| | device_map="auto" |
| | ) |
| | prompt = "Give me a short introduction to large language model." |
| | messages = [ |
| | {"role": "user", "content": prompt} |
| | ] |
| | text = tokenizer.apply_chat_template( |
| | messages, |
| | tokenize=False, |
| | add_generation_prompt=True, |
| | enable_thinking=True # Switches between thinking and non-thinking modes. Default is True. |
| | ) |
| | print(text) |
| | model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
| | generated_ids = model.generate( |
| | **model_inputs, |
| | max_new_tokens=128 |
| | ) |
| | output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() |
| | try: |
| | # rindex finding 151668 (</think>) |
| | index = len(output_ids) - output_ids[::-1].index(151668) |
| | except ValueError: |
| | index = 0 |
| | thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n") |
| | content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n") |
| | print("thinking content:", thinking_content) |
| | print("content:", content) |
| | ``` |
| |
|
| | ### Codes to create this repo: |
| |
|
| | ```python |
| | import torch |
| | |
| | from transformers import ( |
| | AutoConfig, |
| | AutoModelForCausalLM, |
| | AutoTokenizer, |
| | GenerationConfig, |
| | pipeline, |
| | set_seed, |
| | ) |
| | |
| | source_model_id = "Qwen/Qwen3-32B" |
| | save_folder = "/tmp/yujiepan/qwen3-tiny-random-tp" |
| | |
| | tokenizer = AutoTokenizer.from_pretrained( |
| | source_model_id, trust_remote_code=True, |
| | ) |
| | tokenizer.save_pretrained(save_folder) |
| | |
| | config = AutoConfig.from_pretrained( |
| | source_model_id, trust_remote_code=True, |
| | ) |
| | config._name_or_path = source_model_id |
| | config.hidden_size = 8 |
| | config.intermediate_size = 32 |
| | config.head_dim = 32 |
| | config.num_key_value_heads = 4 |
| | config.num_attention_heads = 8 |
| | config.num_hidden_layers = 2 |
| | config.max_window_layers = 1 |
| | config.tie_word_embeddings = False |
| | config.layer_types = ['full_attention'] * 2 |
| | model = AutoModelForCausalLM.from_config( |
| | config, |
| | torch_dtype=torch.bfloat16, |
| | trust_remote_code=True, |
| | ) |
| | model.generation_config = GenerationConfig.from_pretrained( |
| | source_model_id, trust_remote_code=True, |
| | ) |
| | set_seed(42) |
| | with torch.no_grad(): |
| | for name, p in sorted(model.named_parameters()): |
| | torch.nn.init.normal_(p, 0, 0.1) |
| | print(name, p.shape) |
| | model.save_pretrained(save_folder) |
| | ``` |