| | --- |
| | base_model: |
| | - Qwen/Qwen3-Coder-Next |
| | tags: |
| | - qwen3 |
| | - moe |
| | - int2 |
| | - quantized |
| | - autoround |
| | license: apache-2.0 |
| | pipeline_tag: text-generation |
| | --- |
| | |
| | <p align="center"> |
| | <img src="https://cdn-uploads.huggingface.co/production/uploads/685e122d50df66f41587d406/XU7ovrDdgsNFAzahuyvI1.png" alt="Tone"> |
| | </p> |
| |
|
| | 语言 [中文](https://huggingface.co/YCWTG/Qwen3-Coder-Next-int2-mixed-AutoRound/blob/main/README_zh.md)|English |
| |
|
| | ## Model Details |
| |
|
| | This model is an **mixed-bits INT2 quantized** model with group_size 512 and symmetric quantization of [Qwen/Qwen3-Coder-Next](https://huggingface.co/Qwen/Qwen3-Coder-Next) generated by [intel/auto-round](https://github.com/intel/auto-round). Please follow the license of the original model. |
| | |
| | ### Quantization Strategy (Intel MoE Recipe) |
| | |
| | | Layer Type | Bits | Notes | |
| | |------------|------|-------| |
| | | Expert layers (512 experts) | 2-bit | MoE expert MLPs | |
| | | Non-expert layers (attention, gate) | 16-bit | Higher precision for quality | |
| | | shared_expert_gate | 16-bit | Skipped (shape not divisible by 32) | |
| | | lm_head | Original | Excluded by AutoRound | |
| |
|
| | ### Model Size |
| |
|
| | - **Original BF16**: ~160GB |
| | - **mixed INT2**: ~25GB (**84%↓↓**) |
| |
|
| | ## Quickstart |
| |
|
| | ### Transformers Usage |
| |
|
| | ```python |
| | import math |
| | import os |
| | |
| | os.environ.setdefault( |
| | "PYTORCH_ALLOC_CONF", |
| | # Keep a safer allocator default and transparently migrate the deprecated env var. |
| | os.environ.pop("PYTORCH_CUDA_ALLOC_CONF", None) or "expandable_segments:True", |
| | ) |
| | |
| | import torch |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | |
| | MODEL_NAME = "YCWTG/Qwen3-Coder-Next-int2-mixed-AutoRound" |
| | AUTO_MAX_TOKENS = True |
| | MANUAL_MAX_NEW_TOKENS = 128 |
| | AUTO_MAX_TOKENS_RATIO = 1.5 |
| | |
| | HAS_CUDA = torch.cuda.is_available() |
| | # Read total VRAM once and use it to choose the default loading mode. |
| | GPU_TOTAL_MIB = torch.cuda.get_device_properties(0).total_memory // (1024 ** 2) if HAS_CUDA else 0 |
| | # 32GB-class GPUs default to False; smaller GPUs default to True. |
| | ENABLE_CPU_OFFLOAD = HAS_CUDA and GPU_TOTAL_MIB < 32000 |
| | MAX_MEMORY = {0: "18GiB", "cpu": "64GiB"} if ENABLE_CPU_OFFLOAD else {0: "22GiB", "cpu": "16GiB"} |
| | |
| | |
| | def get_input_device(model): |
| | # With device_map="auto", the first usable device may not be model.device. |
| | device_map = getattr(model, "hf_device_map", None) |
| | cpu_device = None |
| | if isinstance(device_map, dict): |
| | for loc in device_map.values(): |
| | if isinstance(loc, int): |
| | return torch.device(f"cuda:{loc}") |
| | if isinstance(loc, str): |
| | if loc.startswith("cuda"): |
| | return torch.device(loc) |
| | if loc.startswith("cpu"): |
| | cpu_device = torch.device("cpu") |
| | return cpu_device or next(model.parameters()).device |
| | |
| | |
| | def load_model(): |
| | print("Loading model...") |
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True, use_fast=True) |
| | # Use EOS as PAD to avoid warnings for chat generation on models without a pad token. |
| | tokenizer.pad_token = tokenizer.pad_token or tokenizer.eos_token |
| | tokenizer.padding_side = "left" |
| | |
| | model_kwargs = { |
| | "pretrained_model_name_or_path": MODEL_NAME, |
| | "dtype": torch.bfloat16, |
| | "trust_remote_code": True, |
| | "low_cpu_mem_usage": True, |
| | "device_map": "auto" if HAS_CUDA else "cpu", |
| | } |
| | if HAS_CUDA: |
| | print(f"GPU total memory: {GPU_TOTAL_MIB} MiB") |
| | model_kwargs["max_memory"] = MAX_MEMORY |
| | if ENABLE_CPU_OFFLOAD: |
| | model_kwargs["offload_buffers"] = True |
| | print("CPU offload: ON") |
| | else: |
| | print("CPU offload: OFF (GPU preferred, small CPU spill enabled)") |
| | else: |
| | print("CUDA not available, running on CPU") |
| | |
| | try: |
| | model = AutoModelForCausalLM.from_pretrained(**model_kwargs) |
| | except RuntimeError as e: |
| | # Give a beginner-friendly hint instead of only showing the raw stack trace. |
| | if "out of memory" in str(e).lower(): |
| | print("\nCUDA OOM while loading the model.") |
| | print("Close other GPU programs, or set ENABLE_CPU_OFFLOAD = True and run again.") |
| | raise |
| | model.eval() |
| | return model, tokenizer |
| | |
| | |
| | def multiline_input(): |
| | print('User (type "END" on a single line to send, type "exit" to quit):') |
| | lines = [] |
| | while True: |
| | line = input() |
| | text = line.strip() |
| | if text.lower() in {"exit", "quit"}: |
| | return None |
| | if text == "END": |
| | break |
| | lines.append(line) |
| | return "\n".join(lines) |
| | |
| | |
| | def build_input_ids(tokenizer, messages, device): |
| | if getattr(tokenizer, "chat_template", None): |
| | # Preferred path for chat models: let the tokenizer build the prompt format. |
| | prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| | else: |
| | # Generic fallback for tokenizers without a built-in chat template. |
| | prompt = "\n".join( |
| | [f"{'User' if m['role'] == 'user' else 'Assistant'}: {m['content']}" for m in messages] |
| | + ["Assistant:"] |
| | ) |
| | return tokenizer(prompt, return_tensors="pt")["input_ids"].to(device) |
| | |
| | |
| | def chat_loop(model, tokenizer): |
| | print("\n===== Chat Started =====\n") |
| | print(f"Auto max_tokens: {'ON' if AUTO_MAX_TOKENS else 'OFF'}") |
| | if not AUTO_MAX_TOKENS: |
| | print(f"Manual max_new_tokens: {MANUAL_MAX_NEW_TOKENS}") |
| | print( |
| | "Tip: Set ENABLE_CPU_OFFLOAD = False for a faster full-GPU attempt." |
| | if ENABLE_CPU_OFFLOAD |
| | else "Tip: If max_tokens is too large and you hit CUDA OOM, set ENABLE_CPU_OFFLOAD = True." |
| | ) |
| | |
| | messages = [] |
| | device = get_input_device(model) |
| | print(f"Input device: {device}") |
| | |
| | while True: |
| | user_text = multiline_input() |
| | if user_text is None: |
| | break |
| | |
| | messages.append({"role": "user", "content": user_text}) |
| | input_ids = build_input_ids(tokenizer, messages, device) |
| | prompt_tokens = int(input_ids.shape[-1]) |
| | # Auto mode scales output length with prompt length (1.5x by default). |
| | max_new_tokens = max(1, math.ceil(prompt_tokens * AUTO_MAX_TOKENS_RATIO)) if AUTO_MAX_TOKENS else int(MANUAL_MAX_NEW_TOKENS) |
| | |
| | print(f"Prompt tokens: {prompt_tokens}") |
| | print(f"max_new_tokens: {max_new_tokens}") |
| | |
| | try: |
| | with torch.inference_mode(): |
| | output_ids = model.generate( |
| | input_ids=input_ids, |
| | max_new_tokens=max_new_tokens, |
| | do_sample=True, |
| | temperature=1.0, |
| | top_p=0.95, |
| | top_k=40, |
| | use_cache=False, |
| | pad_token_id=tokenizer.pad_token_id, |
| | eos_token_id=tokenizer.eos_token_id, |
| | ) |
| | except RuntimeError as e: |
| | error_text = str(e).lower() |
| | if HAS_CUDA and ("cublas_status_alloc_failed" in error_text or "out of memory" in error_text): |
| | # Clear cached blocks so the next try starts from a cleaner CUDA state. |
| | torch.cuda.empty_cache() |
| | print("\nCUDA OOM during generation.") |
| | print("Set ENABLE_CPU_OFFLOAD = True, or disable AUTO_MAX_TOKENS and lower MANUAL_MAX_NEW_TOKENS.") |
| | messages.pop() |
| | continue |
| | raise |
| | |
| | reply_text = tokenizer.decode(output_ids[0, input_ids.shape[-1]:], skip_special_tokens=True) |
| | print(f"\nAssistant:\n{reply_text}\n") |
| | messages.append({"role": "assistant", "content": reply_text}) |
| | |
| | |
| | if __name__ == "__main__": |
| | model, tokenizer = load_model() |
| | chat_loop(model, tokenizer) |
| | ``` |
| |
|
| |
|
| |
|
| | ## Generate the Model |
| |
|
| | ```python |
| | from auto_round import AutoRound |
| | |
| | model_name = "Qwen/Qwen3-Coder-Next" |
| | |
| | # Build layer config for mixed-bits (Intel recipe) |
| | layer_config = {} |
| | for i in range(48): # 48 layers |
| | prefix = f"model.layers.{i}" |
| | |
| | # Attention layers -> 16-bit |
| | if i in [3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47]: # self_attn layers |
| | for proj in ["q_proj", "k_proj", "v_proj", "o_proj"]: |
| | layer_config[f"{prefix}.self_attn.{proj}"] = {"bits": 16} |
| | else: # linear_attn layers -> 16-bit |
| | for proj in ["in_proj_qkvz", "in_proj_ba", "out_proj"]: |
| | layer_config[f"{prefix}.linear_attn.{proj}"] = {"bits": 16} |
| | |
| | # MLP gate -> 16-bit |
| | layer_config[f"{prefix}.mlp.gate"] = {"bits": 16} |
| | |
| | # shared_expert_gate -> 16-bit (skipped) |
| | layer_config[f"{prefix}.mlp.shared_expert_gate"] = {"bits": 16} |
| | |
| | autoround = AutoRound( |
| | model_name, |
| | bits=2, # Default for experts |
| | group_size=128, |
| | sym=True, |
| | iters=1000, |
| | nsamples=512, |
| | lr=2e-3, |
| | layer_config=layer_config, |
| | low_gpu_mem_usage=True, |
| | enable_alg_ext=True |
| | ) |
| | output_dir="~/.cache/model/Qwen3-Coder-Next-int2-mixed-AutoRound" |
| | autoround.quantize_and_save(output_dir,format="auto_round" ) |
| | ``` |
| |
|
| | ## Ethical Considerations and Limitations |
| |
|
| | The model can produce factually incorrect output, and should not be relied on to produce factually accurate information. Because of the limitations of the pretrained model and the finetuning datasets, it is possible that this model could generate lewd, biased or otherwise offensive outputs. |
| |
|
| | Therefore, before deploying any applications of the model, developers should perform safety testing. |
| |
|
| | ## Caveats and Recommendations |
| |
|
| | Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. |
| |
|
| | Here are a couple of useful links to learn more about Intel's AI software: |
| |
|
| | - [Intel Neural Compressor](https://github.com/intel/neural-compressor) |
| | - [AutoRound](https://github.com/intel/auto-round) |
| |
|
| | ## Disclaimer |
| |
|
| | The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please consult an attorney before using this model for commercial purposes. |
| |
|
| | ## Cite |
| |
|
| | @article{cheng2023optimize, title={Optimize weight rounding via signed gradient descent for the quantization of llms}, author={Cheng, Wenhua and Zhang, Weiwei and Shen, Haihao and Cai, Yiyang and He, Xin and Lv, Kaokao and Liu, Yi}, journal={arXiv preprint arXiv:2309.05516}, year={2023} } |
| |
|
| | [arxiv](https://arxiv.org/abs/2309.05516) |