niobures commited on
Commit
5a456d4
·
verified ·
1 Parent(s): 8d37bbb

Wizard-Vicuna-13B-Uncensored-HF-onnx

Browse files
.gitattributes CHANGED
@@ -1901,3 +1901,4 @@ Wizard-Vicuna-7B-Uncensored-GGUF/Wizard-Vicuna-7B-Uncensored.Q5_K_S.gguf filter=
1901
  Wizard-Vicuna-7B-Uncensored-GGUF/Wizard-Vicuna-7B-Uncensored.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
1902
  Wizard-Vicuna-7B-Uncensored-GGUF/Wizard-Vicuna-7B-Uncensored.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
1903
  Wizard-Vicuna-7B-Uncensored-ONNX/decoder_model_merged.onnx_data filter=lfs diff=lfs merge=lfs -text
 
 
1901
  Wizard-Vicuna-7B-Uncensored-GGUF/Wizard-Vicuna-7B-Uncensored.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
1902
  Wizard-Vicuna-7B-Uncensored-GGUF/Wizard-Vicuna-7B-Uncensored.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
1903
  Wizard-Vicuna-7B-Uncensored-ONNX/decoder_model_merged.onnx_data filter=lfs diff=lfs merge=lfs -text
1904
+ Wizard-Vicuna-13B-Uncensored-HF-onnx/decoder_model_merged.onnx_data filter=lfs diff=lfs merge=lfs -text
Wizard-Vicuna-13B-Uncensored-HF-onnx/.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ decoder_model_merged.onnx_data filter=lfs diff=lfs merge=lfs -text
Wizard-Vicuna-13B-Uncensored-HF-onnx/README.md ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ datasets:
4
+ - ehartford/wizard_vicuna_70k_unfiltered
5
+ language:
6
+ - en
7
+ tags:
8
+ - uncensored
9
+ inference: true
10
+ ---
11
+
12
+ # Wizard-Vicuna-13B-Uncensored-HF-onnx
13
+
14
+
15
+ A converted version of [TheBloke/Wizard-Vicuna-13B-Uncensored-HF](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-HF)
16
+ converted to ONNX fp16 using optimum library.
17
+
18
+ ## Convert command
19
+
20
+ ```bash
21
+ SAVE_DIR=/path/to/save
22
+ optimum-cli export onnx --model TheBloke/Wizard-Vicuna-13B-Uncensored-HF --task causal-lm-with-past --fp16 --device cuda $SAVE_DIR
23
+ rm $SAVE_DIR/Constant_*
24
+ rm $SAVE_DIR/decoder_with_past_model.onnx*
25
+ rm $SAVE_DIR/decoder_model.onnx*
26
+ ```
27
+
28
+ ## Usage
29
+
30
+ First load the onnx model using ORTModelForCausalLM
31
+
32
+ ```python
33
+ import torch
34
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
35
+ from optimum.onnxruntime import ORTModelForCausalLM
36
+
37
+ BASE_MODEL = "sharpbai/Wizard-Vicuna-13B-Uncensored-HF-onnx"
38
+
39
+ tok = AutoTokenizer.from_pretrained(BASE_MODEL, use_fast=False)
40
+ model = ORTModelForCausalLM.from_pretrained(BASE_MODEL,
41
+ provider='CUDAExecutionProvider',
42
+ torch_dtype=torch.float16)
43
+ streamer = TextStreamer(tok)
44
+ ```
45
+
46
+ Then you can generate code
47
+
48
+ ```python
49
+ from datetime import datetime
50
+
51
+ MAX_NEW_TOKENS=200
52
+ inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
53
+
54
+ time = datetime.now()
55
+ # Despite returning the usual output, the streamer will also print the generated text to stdout.
56
+ _ = model.generate(input_ids=inputs.input_ids.to('cuda:0'), streamer=streamer, max_new_tokens=MAX_NEW_TOKENS)
57
+ elapsed = datetime.now() - time
58
+ speed = MAX_NEW_TOKENS / elapsed.total_seconds()
59
+ print(f"elapsed {elapsed}, speed {speed} token/s")
60
+ ```
61
+
62
+ You can compare onnx with transformers
63
+
64
+ ```python
65
+ import torch
66
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
67
+
68
+ BASE_MODEL = "TheBloke/Wizard-Vicuna-13B-Uncensored-HF"
69
+
70
+ model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, device_map='auto',
71
+ torch_dtype=torch.float16)
72
+ tok = AutoTokenizer.from_pretrained(BASE_MODEL, use_fast=False)
73
+
74
+ streamer = TextStreamer(tok)
75
+ ```
76
+
77
+ I have done some tests in this notebook
78
+ https://colab.research.google.com/gist/sharpbai/745fa7c6b2069544c254b1fb73070698/infer-with-onnxruntime-vs-transformers-llama-13b.ipynb
79
+
80
+ ## Original model card
81
+
82
+ -----------------------------------------
83
+
84
+
85
+ <!-- header start -->
86
+ <div style="width: 100%;">
87
+ <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;">
88
+ </div>
89
+ <div style="display: flex; justify-content: space-between; width: 100%;">
90
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
91
+ <p><a href="https://discord.gg/Jq4vkcDakD">Chat & support: my new Discord server</a></p>
92
+ </div>
93
+ <div style="display: flex; flex-direction: column; align-items: flex-end;">
94
+ <p><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p>
95
+ </div>
96
+ </div>
97
+ <!-- header end -->
98
+ # Wizard-Vicuna-13B-Uncensored float16 HF
99
+
100
+ This is a float16 HF repo for [Eric Hartford's 'uncensored' training of Wizard-Vicuna 13B](https://huggingface.co/ehartford/Wizard-Vicuna-13B-Uncensored).
101
+
102
+ It is the result of converting Eric's float32 repo to float16 for easier storage and use.
103
+
104
+ ## Repositories available
105
+
106
+ * [4bit GPTQ models for GPU inference](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ).
107
+ * [4bit and 5bit GGML models for CPU inference](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GGML).
108
+ * [float16 HF format model for GPU inference and further conversions](https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-HF).
109
+
110
+ <!-- footer start -->
111
+ ## Discord
112
+
113
+ For further support, and discussions on these models and AI in general, join us at:
114
+
115
+ [TheBloke AI's Discord server](https://discord.gg/Jq4vkcDakD)
116
+
117
+ ## Thanks, and how to contribute.
118
+
119
+ Thanks to the [chirper.ai](https://chirper.ai) team!
120
+
121
+ I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
122
+
123
+ If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
124
+
125
+ Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
126
+
127
+ * Patreon: https://patreon.com/TheBlokeAI
128
+ * Ko-Fi: https://ko-fi.com/TheBlokeAI
129
+
130
+ **Patreon special mentions**: Aemon Algiz, Dmitriy Samsonov, Nathan LeClaire, Trenton Dambrowitz, Mano Prime, David Flickinger, vamX, Nikolai Manek, senxiiz, Khalefa Al-Ahmad, Illia Dulskyi, Jonathan Leane, Talal Aujan, V. Lukas, Joseph William Delisle, Pyrater, Oscar Rangel, Lone Striker, Luke Pendergrass, Eugene Pentland, Sebastain Graf, Johann-Peter Hartman.
131
+
132
+ Thank you to all my generous patrons and donaters!
133
+ <!-- footer end -->
134
+ # Original model card
135
+
136
+ This is [wizard-vicuna-13b](https://huggingface.co/junelee/wizard-vicuna-13b) trained with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA.
137
+
138
+ Shout out to the open source AI/ML community, and everyone who helped me out.
139
+
140
+ Note:
141
+
142
+ An uncensored model has no guardrails.
143
+
144
+ You are responsible for anything you do with the model, just as you are responsible for anything you do with any dangerous object such as a knife, gun, lighter, or car.
145
+
146
+ Publishing anything this model generates is the same as publishing it yourself.
147
+
148
+ You are responsible for the content you publish, and you cannot blame the model any more than you can blame the knife, gun, lighter, or car for what you do with it.
Wizard-Vicuna-13B-Uncensored-HF-onnx/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "TheBloke/Wizard-Vicuna-13B-Uncensored-HF",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 5120,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 13824,
12
+ "max_position_embeddings": 2048,
13
+ "max_sequence_length": 2048,
14
+ "model_type": "llama",
15
+ "num_attention_heads": 40,
16
+ "num_hidden_layers": 40,
17
+ "pad_token_id": 0,
18
+ "rms_norm_eps": 1e-06,
19
+ "tie_word_embeddings": false,
20
+ "torch_dtype": "float16",
21
+ "transformers_version": "4.30.2",
22
+ "use_cache": true,
23
+ "vocab_size": 32000
24
+ }
Wizard-Vicuna-13B-Uncensored-HF-onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfea5995b0f9b0ebedd2bb43d27d0aba6966fae38482912dd2541f263940433f
3
+ size 7371829
Wizard-Vicuna-13B-Uncensored-HF-onnx/decoder_model_merged.onnx_data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21940ef08a086d9649561df925c338a015700be7585feb810fbeb320e76b68c2
3
+ size 26032558080
Wizard-Vicuna-13B-Uncensored-HF-onnx/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.2"
7
+ }
Wizard-Vicuna-13B-Uncensored-HF-onnx/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/sharpbai/Wizard-Vicuna-13B-Uncensored-HF-onnx
Wizard-Vicuna-13B-Uncensored-HF-onnx/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
Wizard-Vicuna-13B-Uncensored-HF-onnx/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
Wizard-Vicuna-13B-Uncensored-HF-onnx/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
Wizard-Vicuna-13B-Uncensored-HF-onnx/tokenizer_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "__type": "AddedToken",
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "clean_up_tokenization_spaces": false,
11
+ "eos_token": {
12
+ "__type": "AddedToken",
13
+ "content": "</s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "model_max_length": 2048,
20
+ "pad_token": null,
21
+ "padding_side": "right",
22
+ "sp_model_kwargs": {},
23
+ "tokenizer_class": "LlamaTokenizer",
24
+ "unk_token": {
25
+ "__type": "AddedToken",
26
+ "content": "<unk>",
27
+ "lstrip": false,
28
+ "normalized": true,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ }
32
+ }