Update README.md
Browse files
README.md
CHANGED
|
@@ -51,6 +51,132 @@ This model can be deployed efficiently using the [vLLM](https://docs.vllm.ai/en/
|
|
| 51 |
vllm serve RedHatAI/Devstral-Small-2507-quantized.w4a16 --tensor-parallel-size 1 --tokenizer_mode mistral
|
| 52 |
```
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
## Evaluation
|
| 55 |
|
| 56 |
The model was evaluated on popular coding tasks (HumanEval, HumanEval+, MBPP, MBPP+) via [EvalPlus](https://github.com/evalplus/evalplus) and vllm backend (v0.10.1.1).
|
|
|
|
| 51 |
vllm serve RedHatAI/Devstral-Small-2507-quantized.w4a16 --tensor-parallel-size 1 --tokenizer_mode mistral
|
| 52 |
```
|
| 53 |
|
| 54 |
+
## Creation
|
| 55 |
+
<details>
|
| 56 |
+
This model was created with [llm-compressor](https://github.com/vllm-project/llm-compressor) by running the code snippet below.
|
| 57 |
+
|
| 58 |
+
```bash
|
| 59 |
+
python quantize.py --model_path mistralai/Devstral-Small-2507 --calib_size 1024 --dampening_frac 0.1 --observer mse --sym false --actorder weight
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
import argparse
|
| 64 |
+
import os
|
| 65 |
+
from datasets import load_dataset
|
| 66 |
+
from transformers import AutoModelForCausalLM
|
| 67 |
+
from llmcompressor.modifiers.quantization import GPTQModifier
|
| 68 |
+
from llmcompressor.transformers import oneshot
|
| 69 |
+
from compressed_tensors.quantization import QuantizationScheme, QuantizationArgs, QuantizationType, QuantizationStrategy
|
| 70 |
+
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
| 71 |
+
from mistral_common.protocol.instruct.request import ChatCompletionRequest
|
| 72 |
+
from mistral_common.protocol.instruct.messages import (
|
| 73 |
+
SystemMessage, UserMessage
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def load_system_prompt(repo_id: str, filename: str) -> str:
|
| 77 |
+
file_path = os.path.join(repo_id, filename)
|
| 78 |
+
with open(file_path, "r") as file:
|
| 79 |
+
system_prompt = file.read()
|
| 80 |
+
return system_prompt
|
| 81 |
+
|
| 82 |
+
def parse_actorder(value):
|
| 83 |
+
if value.lower() == "false":
|
| 84 |
+
return False
|
| 85 |
+
elif value.lower() == "weight":
|
| 86 |
+
return "weight"
|
| 87 |
+
elif value.lower() == "group":
|
| 88 |
+
return "group"
|
| 89 |
+
else:
|
| 90 |
+
raise argparse.ArgumentTypeError("Invalid value for --actorder.")
|
| 91 |
+
|
| 92 |
+
def parse_sym(value):
|
| 93 |
+
if value.lower() == "false":
|
| 94 |
+
return False
|
| 95 |
+
elif value.lower() == "true":
|
| 96 |
+
return True
|
| 97 |
+
else:
|
| 98 |
+
raise argparse.ArgumentTypeError(f"Invalid value for --sym. Use false or true, but got {value}")
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
parser = argparse.ArgumentParser()
|
| 102 |
+
parser.add_argument('--model_path', type=str)
|
| 103 |
+
parser.add_argument('--calib_size', type=int, default=256)
|
| 104 |
+
parser.add_argument('--dampening_frac', type=float, default=0.1)
|
| 105 |
+
parser.add_argument('--observer', type=str, default="minmax")
|
| 106 |
+
parser.add_argument('--sym', type=parse_sym, default=True)
|
| 107 |
+
parser.add_argument(
|
| 108 |
+
'--actorder',
|
| 109 |
+
type=parse_actorder,
|
| 110 |
+
default=False,
|
| 111 |
+
help="Specify actorder as 'group' (string) or False (boolean)."
|
| 112 |
+
)
|
| 113 |
+
args = parser.parse_args()
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 117 |
+
args.model_path,
|
| 118 |
+
device_map="auto",
|
| 119 |
+
torch_dtype="auto",
|
| 120 |
+
use_cache=False,
|
| 121 |
+
trust_remote_code=True,
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
ds = load_dataset("garage-bAInd/Open-Platypus", split="train")
|
| 125 |
+
ds = ds.shuffle(seed=42).select(range(args.calib_size))
|
| 126 |
+
|
| 127 |
+
SYSTEM_PROMPT = load_system_prompt(args.model_path, "SYSTEM_PROMPT.txt")
|
| 128 |
+
tokenizer = MistralTokenizer.from_hf_hub("mistralai/Devstral-Small-2507")
|
| 129 |
+
|
| 130 |
+
def tokenize(sample):
|
| 131 |
+
tmp = tokenizer.encode_chat_completion(
|
| 132 |
+
ChatCompletionRequest(
|
| 133 |
+
messages=[
|
| 134 |
+
SystemMessage(content=SYSTEM_PROMPT),
|
| 135 |
+
UserMessage(content=sample['instruction']),
|
| 136 |
+
],
|
| 137 |
+
)
|
| 138 |
+
)
|
| 139 |
+
return {'input_ids': tmp.tokens}
|
| 140 |
+
|
| 141 |
+
ds = ds.map(tokenize, remove_columns=ds.column_names)
|
| 142 |
+
|
| 143 |
+
quant_scheme = QuantizationScheme(
|
| 144 |
+
targets=["Linear"],
|
| 145 |
+
weights=QuantizationArgs(
|
| 146 |
+
num_bits=4,
|
| 147 |
+
type=QuantizationType.INT,
|
| 148 |
+
symmetric=args.sym,
|
| 149 |
+
group_size=128,
|
| 150 |
+
strategy=QuantizationStrategy.GROUP,
|
| 151 |
+
observer=args.observer,
|
| 152 |
+
actorder=args.actorder
|
| 153 |
+
),
|
| 154 |
+
input_activations=None,
|
| 155 |
+
output_activations=None,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
recipe = [
|
| 159 |
+
GPTQModifier(
|
| 160 |
+
targets=["Linear"],
|
| 161 |
+
ignore=["lm_head"],
|
| 162 |
+
dampening_frac=args.dampening_frac,
|
| 163 |
+
config_groups={"group_0": quant_scheme},
|
| 164 |
+
)
|
| 165 |
+
]
|
| 166 |
+
|
| 167 |
+
oneshot(
|
| 168 |
+
model=model,
|
| 169 |
+
dataset=ds,
|
| 170 |
+
recipe=recipe,
|
| 171 |
+
num_calibration_samples=args.calib_size,
|
| 172 |
+
max_seq_length=8192,
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
save_path = args.model_path + "-quantized.w4a16"
|
| 176 |
+
model.save_pretrained(save_path)
|
| 177 |
+
```
|
| 178 |
+
</details>
|
| 179 |
+
|
| 180 |
## Evaluation
|
| 181 |
|
| 182 |
The model was evaluated on popular coding tasks (HumanEval, HumanEval+, MBPP, MBPP+) via [EvalPlus](https://github.com/evalplus/evalplus) and vllm backend (v0.10.1.1).
|