Instructions to use np-deploys/MiniMax-M2.7-AWQ-4bit with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use np-deploys/MiniMax-M2.7-AWQ-4bit with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="np-deploys/MiniMax-M2.7-AWQ-4bit", trust_remote_code=True) messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("np-deploys/MiniMax-M2.7-AWQ-4bit", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("np-deploys/MiniMax-M2.7-AWQ-4bit", trust_remote_code=True) messages = [ {"role": "user", "content": "Who are you?"}, ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use np-deploys/MiniMax-M2.7-AWQ-4bit with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "np-deploys/MiniMax-M2.7-AWQ-4bit" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "np-deploys/MiniMax-M2.7-AWQ-4bit", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/np-deploys/MiniMax-M2.7-AWQ-4bit
- SGLang
How to use np-deploys/MiniMax-M2.7-AWQ-4bit with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "np-deploys/MiniMax-M2.7-AWQ-4bit" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "np-deploys/MiniMax-M2.7-AWQ-4bit", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "np-deploys/MiniMax-M2.7-AWQ-4bit" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "np-deploys/MiniMax-M2.7-AWQ-4bit", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use np-deploys/MiniMax-M2.7-AWQ-4bit with Docker Model Runner:
docker model run hf.co/np-deploys/MiniMax-M2.7-AWQ-4bit
| { | |
| "architectures": [ | |
| "MiniMaxM2ForCausalLM" | |
| ], | |
| "attention_dropout": 0.0, | |
| "attn_type_list": [ | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1, | |
| 1 | |
| ], | |
| "auto_map": { | |
| "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config", | |
| "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM" | |
| }, | |
| "bos_token_id": 1, | |
| "dtype": "bfloat16", | |
| "eos_token_id": 2, | |
| "head_dim": 128, | |
| "hidden_act": "silu", | |
| "hidden_size": 3072, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 1536, | |
| "max_position_embeddings": 196608, | |
| "model_type": "minimax_m2", | |
| "mtp_transformer_layers": 1, | |
| "num_attention_heads": 48, | |
| "num_experts_per_tok": 8, | |
| "num_hidden_layers": 62, | |
| "num_key_value_heads": 8, | |
| "num_local_experts": 256, | |
| "num_mtp_modules": 3, | |
| "output_router_logits": false, | |
| "partial_rotary_factor": 0.5, | |
| "qk_norm_type": "per_layer", | |
| "quantization_config": { | |
| "config_groups": { | |
| "group_0": { | |
| "format": "pack-quantized", | |
| "input_activations": null, | |
| "output_activations": null, | |
| "targets": [ | |
| "Linear" | |
| ], | |
| "weights": { | |
| "actorder": null, | |
| "block_structure": null, | |
| "dynamic": false, | |
| "group_size": 32, | |
| "num_bits": 4, | |
| "observer": "mse", | |
| "observer_kwargs": {}, | |
| "scale_dtype": null, | |
| "strategy": "group", | |
| "symmetric": true, | |
| "type": "int", | |
| "zp_dtype": null | |
| } | |
| } | |
| }, | |
| "format": "pack-quantized", | |
| "global_compression_ratio": null, | |
| "ignore": [ | |
| "model.layers.0.block_sparse_moe.gate", | |
| "model.layers.1.block_sparse_moe.gate", | |
| "model.layers.2.block_sparse_moe.gate", | |
| "model.layers.3.block_sparse_moe.gate", | |
| "model.layers.4.block_sparse_moe.gate", | |
| "model.layers.5.block_sparse_moe.gate", | |
| "model.layers.6.block_sparse_moe.gate", | |
| "model.layers.7.block_sparse_moe.gate", | |
| "model.layers.8.block_sparse_moe.gate", | |
| "model.layers.9.block_sparse_moe.gate", | |
| "model.layers.10.block_sparse_moe.gate", | |
| "model.layers.11.block_sparse_moe.gate", | |
| "model.layers.12.block_sparse_moe.gate", | |
| "model.layers.13.block_sparse_moe.gate", | |
| "model.layers.14.block_sparse_moe.gate", | |
| "model.layers.15.block_sparse_moe.gate", | |
| "model.layers.16.block_sparse_moe.gate", | |
| "model.layers.17.block_sparse_moe.gate", | |
| "model.layers.18.block_sparse_moe.gate", | |
| "model.layers.19.block_sparse_moe.gate", | |
| "model.layers.20.block_sparse_moe.gate", | |
| "model.layers.21.block_sparse_moe.gate", | |
| "model.layers.22.block_sparse_moe.gate", | |
| "model.layers.23.block_sparse_moe.gate", | |
| "model.layers.24.block_sparse_moe.gate", | |
| "model.layers.25.block_sparse_moe.gate", | |
| "model.layers.26.block_sparse_moe.gate", | |
| "model.layers.27.block_sparse_moe.gate", | |
| "model.layers.28.block_sparse_moe.gate", | |
| "model.layers.29.block_sparse_moe.gate", | |
| "model.layers.30.block_sparse_moe.gate", | |
| "model.layers.31.block_sparse_moe.gate", | |
| "model.layers.32.block_sparse_moe.gate", | |
| "model.layers.33.block_sparse_moe.gate", | |
| "model.layers.34.block_sparse_moe.gate", | |
| "model.layers.35.block_sparse_moe.gate", | |
| "model.layers.36.block_sparse_moe.gate", | |
| "model.layers.37.block_sparse_moe.gate", | |
| "model.layers.38.block_sparse_moe.gate", | |
| "model.layers.39.block_sparse_moe.gate", | |
| "model.layers.40.block_sparse_moe.gate", | |
| "model.layers.41.block_sparse_moe.gate", | |
| "model.layers.42.block_sparse_moe.gate", | |
| "model.layers.43.block_sparse_moe.gate", | |
| "model.layers.44.block_sparse_moe.gate", | |
| "model.layers.45.block_sparse_moe.gate", | |
| "model.layers.46.block_sparse_moe.gate", | |
| "model.layers.47.block_sparse_moe.gate", | |
| "model.layers.48.block_sparse_moe.gate", | |
| "model.layers.49.block_sparse_moe.gate", | |
| "model.layers.50.block_sparse_moe.gate", | |
| "model.layers.51.block_sparse_moe.gate", | |
| "model.layers.52.block_sparse_moe.gate", | |
| "model.layers.53.block_sparse_moe.gate", | |
| "model.layers.54.block_sparse_moe.gate", | |
| "model.layers.55.block_sparse_moe.gate", | |
| "model.layers.56.block_sparse_moe.gate", | |
| "model.layers.57.block_sparse_moe.gate", | |
| "model.layers.58.block_sparse_moe.gate", | |
| "model.layers.59.block_sparse_moe.gate", | |
| "model.layers.60.block_sparse_moe.gate", | |
| "model.layers.61.block_sparse_moe.gate", | |
| "lm_head" | |
| ], | |
| "kv_cache_scheme": null, | |
| "quant_method": "compressed-tensors", | |
| "quantization_status": "compressed", | |
| "sparsity_config": {}, | |
| "transform_config": {}, | |
| "version": "0.15.1.a20260409" | |
| }, | |
| "rms_norm_eps": 1e-06, | |
| "rope_theta": 5000000, | |
| "rotary_dim": 64, | |
| "router_aux_loss_coef": 0.001, | |
| "router_jitter_noise": 0.0, | |
| "scoring_func": "sigmoid", | |
| "shared_intermediate_size": 0, | |
| "sliding_window": null, | |
| "tie_word_embeddings": false, | |
| "transformers_version": "4.57.6", | |
| "use_cache": true, | |
| "use_mtp": true, | |
| "use_qk_norm": true, | |
| "use_routing_bias": true, | |
| "vocab_size": 200064 | |
| } |