--- language: - zh - en - fr - es - pt - de - it - ru - ja - ko - vi - th - ar - id - tr - fa - nl - pl - cs - he - sv - fi - da - 'no' - el - bg - uk - ur - sr - ms - zsm - nld base_model: - Qwen/Qwen3-8B pipeline_tag: text-generation tags: - qwen - qwen3 - fp8 - vllm - conversational - text-generation-inference license: apache-2.0 license_name: apache-2.0 name: RedHatAI/Qwen3-8B-FP8-dynamic description: >- This model was obtained by quantizing activations and weights of Qwen3-8B to FP8 data type. readme: https://huggingface.co/RedHatAI/Qwen3-8B-FP8-dynamic/main/README.md tasks: - text-to-text provider: Alibaba Cloud license_link: https://www.apache.org/licenses/LICENSE-2.0 validated_on: - RHOAI 2.24 - RHAIIS 3.2.1 ---

Qwen3-8B-FP8-dynamic Model Icon

Validated Badge ## Model Overview - **Model Architecture:** Qwen3ForCausalLM - **Input:** Text - **Output:** Text - **Model Optimizations:** - **Activation quantization:** FP8 - **Weight quantization:** FP8 - **Intended Use Cases:** - Reasoning. - Function calling. - Subject matter experts via fine-tuning. - Multilingual instruction following. - Translation. - **Out-of-scope:** Use in any manner that violates applicable laws or regulations (including trade compliance laws). - **Release Date:** 05/02/2025 - **Version:** 1.0 - **Validated on:** RHOAI 2.24, RHAIIS 3.2.1 - **Model Developers:** RedHat (Neural Magic) ### Model Optimizations This model was obtained by quantizing activations and weights of [Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) to FP8 data type. This optimization reduces the number of bits used to represent weights and activations from 16 to 8, reducing GPU memory requirements (by approximately 50%) and increasing matrix-multiply compute throughput (by approximately 2x). Weight quantization also reduces disk size requirements by approximately 50%. Only weights and activations of the linear operators within transformers blocks are quantized. Weights are quantized with a symmetric static per-channel scheme, whereas activations are quantized with a symmetric dynamic per-token scheme. The [llm-compressor](https://github.com/vllm-project/llm-compressor) library is used for quantization. ## Deployment This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/latest/) backend, as shown in the example below. ```python from vllm import LLM, SamplingParams from transformers import AutoTokenizer model_id = "RedHatAI/Qwen3-8B-FP8-dynamic" number_gpus = 1 sampling_params = SamplingParams(temperature=0.6, top_p=0.95, top_k=20, min_p=0, max_tokens=256) messages = [ {"role": "user", "content": prompt} ] tokenizer = AutoTokenizer.from_pretrained(model_id) messages = [{"role": "user", "content": "Give me a short introduction to large language model."}] prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) llm = LLM(model=model_id, tensor_parallel_size=number_gpus) outputs = llm.generate(prompts, sampling_params) generated_text = outputs[0].outputs[0].text print(generated_text) ``` vLLM aslo supports OpenAI-compatible serving. See the [documentation](https://docs.vllm.ai/en/latest/) for more details.
Deploy on Red Hat AI Inference Server ```bash podman run --rm -it --device nvidia.com/gpu=all -p 8000:8000 \ --ipc=host \ --env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \ --env "HF_HUB_OFFLINE=0" -v ~/.cache/vllm:/home/vllm/.cache \ --name=vllm \ registry.access.redhat.com/rhaiis/rh-vllm-cuda \ vllm serve \ --tensor-parallel-size 8 \ --max-model-len 32768 \ --enforce-eager --model RedHatAI/Qwen3-8B-FP8-dynamic ```
Deploy on Red Hat Openshift AI ```python # Setting up vllm server with ServingRuntime # Save as: vllm-servingruntime.yaml apiVersion: serving.kserve.io/v1alpha1 kind: ServingRuntime metadata: name: vllm-cuda-runtime # OPTIONAL CHANGE: set a unique name annotations: openshift.io/display-name: vLLM NVIDIA GPU ServingRuntime for KServe opendatahub.io/recommended-accelerators: '["nvidia.com/gpu"]' labels: opendatahub.io/dashboard: 'true' spec: annotations: prometheus.io/port: '8080' prometheus.io/path: '/metrics' multiModel: false supportedModelFormats: - autoSelect: true name: vLLM containers: - name: kserve-container image: quay.io/modh/vllm:rhoai-2.24-cuda # CHANGE if needed. If AMD: quay.io/modh/vllm:rhoai-2.24-rocm command: - python - -m - vllm.entrypoints.openai.api_server args: - "--port=8080" - "--model=/mnt/models" - "--served-model-name={{.Name}}" env: - name: HF_HOME value: /tmp/hf_home ports: - containerPort: 8080 protocol: TCP ``` ```python # Attach model to vllm server. This is an NVIDIA template # Save as: inferenceservice.yaml apiVersion: serving.kserve.io/v1beta1 kind: InferenceService metadata: annotations: openshift.io/display-name: Qwen3-8B-FP8-dynamic # OPTIONAL CHANGE serving.kserve.io/deploymentMode: RawDeployment name: Qwen3-8B-FP8-dynamic # specify model name. This value will be used to invoke the model in the payload labels: opendatahub.io/dashboard: 'true' spec: predictor: maxReplicas: 1 minReplicas: 1 model: modelFormat: name: vLLM name: '' resources: limits: cpu: '2' # this is model specific memory: 8Gi # this is model specific nvidia.com/gpu: '1' # this is accelerator specific requests: # same comment for this block cpu: '1' memory: 4Gi nvidia.com/gpu: '1' runtime: vllm-cuda-runtime # must match the ServingRuntime name above storageUri: oci://registry.redhat.io/rhelai1/modelcar-qwen3-8b-fp8-dynamic:1.5 tolerations: - effect: NoSchedule key: nvidia.com/gpu operator: Exists ``` ```bash # make sure first to be in the project where you want to deploy the model # oc project # apply both resources to run model # Apply the ServingRuntime oc apply -f vllm-servingruntime.yaml # Apply the InferenceService oc apply -f qwen-inferenceservice.yaml ``` ```python # Replace and below: # - Run `oc get inferenceservice` to find your URL if unsure. # Call the server using curl: curl https://-predictor-default./v1/chat/completions -H "Content-Type: application/json" \ -d '{ "model": "Qwen3-8B-FP8-dynamic", "stream": true, "stream_options": { "include_usage": true }, "max_tokens": 1, "messages": [ { "role": "user", "content": "How can a bee fly when its wings are so small?" } ] }' ``` See [Red Hat Openshift AI documentation](https://docs.redhat.com/en/documentation/red_hat_openshift_ai/2025) for more details.
## Creation
Creation details This model was created with [llm-compressor](https://github.com/vllm-project/llm-compressor) by running the code snippet below. ```python from llmcompressor.modifiers.quantization import QuantizationModifier from llmcompressor.transformers import oneshot from transformers import AutoModelForCausalLM, AutoTokenizer # Load model model_stub = "Qwen/Qwen3-8B" model_name = model_stub.split("/")[-1] model = AutoModelForCausalLM.from_pretrained(model_stub) tokenizer = AutoTokenizer.from_pretrained(model_stub) # Configure the quantization algorithm and scheme recipe = QuantizationModifier( ignore=["lm_head"], targets="Linear", scheme="FP8_dynamic", ) # Apply quantization oneshot( model=model, recipe=recipe, ) # Save to disk in compressed-tensors format save_path = model_name + "-FP8-dynamic" model.save_pretrained(save_path) tokenizer.save_pretrained(save_path) print(f"Model and tokenizer saved to: {save_path}") ```
## Evaluation The model was evaluated on the OpenLLM leaderboard tasks (versions 1 and 2), using [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness), and on reasoning tasks using [lighteval](https://github.com/neuralmagic/lighteval/tree/reasoning). [vLLM](https://docs.vllm.ai/en/stable/) was used for all evaluations.
Evaluation details **lm-evaluation-harness** ``` lm_eval \ --model vllm \ --model_args pretrained="RedHatAI/Qwen3-8B-FP8-dynamic",dtype=auto,gpu_memory_utilization=0.5,max_model_len=8192,enable_chunk_prefill=True,tensor_parallel_size=1 \ --tasks openllm \ --apply_chat_template\ --fewshot_as_multiturn \ --batch_size auto ``` ``` lm_eval \ --model vllm \ --model_args pretrained="RedHatAI/Qwen3-8B-FP8-dynamic",dtype=auto,gpu_memory_utilization=0.5,max_model_len=8192,enable_chunk_prefill=True,tensor_parallel_size=1 \ --tasks mgsm \ --apply_chat_template\ --batch_size auto ``` ``` lm_eval \ --model vllm \ --model_args pretrained="RedHatAI/Qwen3-8B-FP8-dynamic",dtype=auto,gpu_memory_utilization=0.5,max_model_len=16384,enable_chunk_prefill=True,tensor_parallel_size=1 \ --tasks leaderboard \ --apply_chat_template\ --fewshot_as_multiturn \ --batch_size auto ``` **lighteval** lighteval_model_arguments.yaml ```yaml model_parameters: model_name: RedHatAI/Qwen3-8B-FP8-dynamic dtype: auto gpu_memory_utilization: 0.9 max_model_length: 40960 generation_parameters: temperature: 0.6 top_k: 20 min_p: 0.0 top_p: 0.95 max_new_tokens: 32768 ``` ``` lighteval vllm \ --model_args lighteval_model_arguments.yaml \ --tasks lighteval|aime24|0|0 \ --use_chat_template = true ``` ``` lighteval vllm \ --model_args lighteval_model_arguments.yaml \ --tasks lighteval|aime25|0|0 \ --use_chat_template = true ``` ``` lighteval vllm \ --model_args lighteval_model_arguments.yaml \ --tasks lighteval|math_500|0|0 \ --use_chat_template = true ``` ``` lighteval vllm \ --model_args lighteval_model_arguments.yaml \ --tasks lighteval|gpqa:diamond|0|0 \ --use_chat_template = true ``` ``` lighteval vllm \ --model_args lighteval_model_arguments.yaml \ --tasks extended|lcb:codegeneration \ --use_chat_template = true ```
### Accuracy
Category Benchmark Qwen3-8B Qwen3-8B-FP8-dynamic
(this model)
Recovery
OpenLLM v1 MMLU (5-shot) 71.95 72.30 100.5%
ARC Challenge (25-shot) 61.69 61.60 99.9%
GSM-8K (5-shot, strict-match) 75.97 80.52 106.0%
Hellaswag (10-shot) 56.52 55.95 99.0%
Winogrande (5-shot) 65.98 66.22 100.4%
TruthfulQA (0-shot, mc2) 53.17 52.39 98.5%
Average 64.21 64.83 101.0%
OpenLLM v2 MMLU-Pro (5-shot) 34.57 37.82 109.4%
IFEval (0-shot) 84.77 84.56 99.8%
BBH (3-shot) 25.47 27.20 106.8%
Math-lvl-5 (4-shot) 51.05 51.90 101.7%
GPQA (0-shot) 0.00 0.00 ---
MuSR (0-shot) 10.02 10.65 ---
Average 34.31 35.35 103.0%
Multilingual MGSM (0-shot) 25.97 25.80 99.4%
Reasoning
(generation)
AIME 2024 74.58 76.35 102.4%
AIME 2025 65.21 63.75 97.8%
GPQA diamond 58.59 61.11 104.3%
Math-lvl-5 97.60 96.60 99.0%
LiveCodeBench 56.27 56.60 100.6%