AWQ model commit
Browse files- config.json +27 -0
- generation_config.json +6 -0
- model.safetensors +3 -0
- quant_config.json +6 -0
- run-humaneval.py +56 -0
- special_tokens_map.json +11 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +50 -0
config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "/workspace/process/nondzu_mistral-7b-codealpaca-lora/source",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"MistralForCausalLM"
|
| 5 |
+
],
|
| 6 |
+
"bos_token_id": 1,
|
| 7 |
+
"eos_token_id": 2,
|
| 8 |
+
"hidden_act": "silu",
|
| 9 |
+
"hidden_size": 4096,
|
| 10 |
+
"initializer_range": 0.02,
|
| 11 |
+
"intermediate_size": 14336,
|
| 12 |
+
"max_position_embeddings": 32768,
|
| 13 |
+
"model_type": "mistral",
|
| 14 |
+
"num_attention_heads": 32,
|
| 15 |
+
"num_hidden_layers": 32,
|
| 16 |
+
"num_key_value_heads": 8,
|
| 17 |
+
"pad_token_id": 0,
|
| 18 |
+
"pretraining_tp": 1,
|
| 19 |
+
"rms_norm_eps": 1e-05,
|
| 20 |
+
"rope_theta": 10000.0,
|
| 21 |
+
"sliding_window": 4096,
|
| 22 |
+
"tie_word_embeddings": false,
|
| 23 |
+
"torch_dtype": "float16",
|
| 24 |
+
"transformers_version": "4.34.1",
|
| 25 |
+
"use_cache": true,
|
| 26 |
+
"vocab_size": 32000
|
| 27 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 1,
|
| 4 |
+
"eos_token_id": 2,
|
| 5 |
+
"transformers_version": "4.35.0.dev0"
|
| 6 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:02ed61dd7abaef2f067ed1122a6654cd6a12f8aa10506708408c3b5264765ace
|
| 3 |
+
size 4150880232
|
quant_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"zero_point": true,
|
| 3 |
+
"q_group_size": 128,
|
| 4 |
+
"w_bit": 4,
|
| 5 |
+
"version": "GEMM"
|
| 6 |
+
}
|
run-humaneval.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.distributed as dist
|
| 3 |
+
import torch.multiprocessing as mp
|
| 4 |
+
from transformers import AutoTokenizer, LlamaForCausalLM
|
| 5 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 6 |
+
from evalplus.data import get_human_eval_plus, write_jsonl
|
| 7 |
+
import os
|
| 8 |
+
from tqdm import tqdm # import tqdm
|
| 9 |
+
|
| 10 |
+
def setup(rank, world_size):
|
| 11 |
+
os.environ['MASTER_ADDR'] = 'localhost'
|
| 12 |
+
os.environ['MASTER_PORT'] = '12355'
|
| 13 |
+
dist.init_process_group("gloo", rank=rank, world_size=world_size)
|
| 14 |
+
|
| 15 |
+
def cleanup():
|
| 16 |
+
dist.destroy_process_group()
|
| 17 |
+
|
| 18 |
+
def generate_one_completion(ddp_model, tokenizer, prompt: str):
|
| 19 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 20 |
+
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=4096)
|
| 21 |
+
|
| 22 |
+
# Generate
|
| 23 |
+
generate_ids = ddp_model.module.generate(inputs.input_ids.to("cuda"), max_new_tokens=384, do_sample=True, top_p=0.75, top_k=40, temperature=0.1, pad_token_id=tokenizer.eos_token_id)
|
| 24 |
+
completion = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 25 |
+
completion = completion.replace(prompt, "").split("\n\n\n")[0]
|
| 26 |
+
|
| 27 |
+
print("-------------------")
|
| 28 |
+
print(completion)
|
| 29 |
+
return completion
|
| 30 |
+
|
| 31 |
+
def run(rank, world_size):
|
| 32 |
+
setup(rank, world_size)
|
| 33 |
+
|
| 34 |
+
model_path = "Nondzu/Mistral-7B-codealpaca-lora"
|
| 35 |
+
model = LlamaForCausalLM.from_pretrained(model_path,load_in_8bit=True)
|
| 36 |
+
ddp_model = DDP(model, device_ids=[rank])
|
| 37 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 38 |
+
|
| 39 |
+
problems = get_human_eval_plus()
|
| 40 |
+
num_samples_per_task = 1
|
| 41 |
+
|
| 42 |
+
samples = [
|
| 43 |
+
dict(task_id=task_id, completion=generate_one_completion(ddp_model, tokenizer, problems[task_id]["prompt"]))
|
| 44 |
+
for task_id in tqdm(problems) # add tqdm here
|
| 45 |
+
for _ in range(num_samples_per_task)
|
| 46 |
+
]
|
| 47 |
+
write_jsonl(f"samples-Nondzu-Mistral-7B-codealpaca-lora-rank{rank}.jsonl", samples)
|
| 48 |
+
|
| 49 |
+
cleanup()
|
| 50 |
+
|
| 51 |
+
def main():
|
| 52 |
+
world_size = 1
|
| 53 |
+
mp.spawn(run, args=(world_size,), nprocs=world_size, join=True)
|
| 54 |
+
|
| 55 |
+
if __name__=="__main__":
|
| 56 |
+
main()
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<unk>",
|
| 4 |
+
"<s>",
|
| 5 |
+
"</s>"
|
| 6 |
+
],
|
| 7 |
+
"bos_token": "<s>",
|
| 8 |
+
"eos_token": "</s>",
|
| 9 |
+
"pad_token": "</s>",
|
| 10 |
+
"unk_token": "<unk>"
|
| 11 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
| 3 |
+
size 493443
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"0": {
|
| 6 |
+
"content": "<unk>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"1": {
|
| 14 |
+
"content": "<s>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"2": {
|
| 22 |
+
"content": "</s>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
}
|
| 29 |
+
},
|
| 30 |
+
"additional_special_tokens": [
|
| 31 |
+
"<unk>",
|
| 32 |
+
"<s>",
|
| 33 |
+
"</s>"
|
| 34 |
+
],
|
| 35 |
+
"bos_token": "<s>",
|
| 36 |
+
"chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
|
| 37 |
+
"clean_up_tokenization_spaces": false,
|
| 38 |
+
"eos_token": "</s>",
|
| 39 |
+
"legacy": true,
|
| 40 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 41 |
+
"pad_token": "</s>",
|
| 42 |
+
"sp_model_kwargs": {},
|
| 43 |
+
"spaces_between_special_tokens": false,
|
| 44 |
+
"tokenizer_class": "LlamaTokenizer",
|
| 45 |
+
"tokenizer_file": "/home/kamil/.cache/huggingface/hub/models--mistralai--Mistral-7B-Instruct-v0.1/snapshots/7ad5799710574ba1c1d953eba3077af582f3a773/tokenizer.json",
|
| 46 |
+
"trust_remote_code": false,
|
| 47 |
+
"unk_token": "<unk>",
|
| 48 |
+
"use_default_system_prompt": true,
|
| 49 |
+
"use_fast": true
|
| 50 |
+
}
|