mwebazarick commited on
Commit
4fe38f5
·
verified ·
1 Parent(s): 682b503

Upload 18 files

Browse files
Version 2/42M_base/Makefile ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # choose your compiler, e.g. gcc/clang
2
+ # example override to clang: make run CC=clang
3
+ CC = gcc
4
+
5
+ # the most basic way of building that is most likely to work on most systems
6
+ .PHONY: run
7
+ run: run.c
8
+ $(CC) -O3 -o run run.c -lm
9
+ $(CC) -O3 -o runq runq.c -lm
10
+
11
+ # useful for a debug build, can then e.g. analyze with valgrind, example:
12
+ # $ valgrind --leak-check=full ./run out/model.bin -n 3
13
+ rundebug: run.c
14
+ $(CC) -g -o run run.c -lm
15
+ $(CC) -g -o runq runq.c -lm
16
+
17
+ # https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
18
+ # https://simonbyrne.github.io/notes/fastmath/
19
+ # -Ofast enables all -O3 optimizations.
20
+ # Disregards strict standards compliance.
21
+ # It also enables optimizations that are not valid for all standard-compliant programs.
22
+ # It turns on -ffast-math, -fallow-store-data-races and the Fortran-specific
23
+ # -fstack-arrays, unless -fmax-stack-var-size is specified, and -fno-protect-parens.
24
+ # It turns off -fsemantic-interposition.
25
+ # In our specific application this is *probably* okay to use
26
+ .PHONY: runfast
27
+ runfast: run.c
28
+ $(CC) -Ofast -o run run.c -lm
29
+ $(CC) -Ofast -o runq runq.c -lm
30
+
31
+ # additionally compiles with OpenMP, allowing multithreaded runs
32
+ # make sure to also enable multiple threads when running, e.g.:
33
+ # OMP_NUM_THREADS=4 ./run out/model.bin
34
+ .PHONY: runomp
35
+ runomp: run.c
36
+ $(CC) -Ofast -fopenmp -march=native run.c -lm -o run
37
+ $(CC) -Ofast -fopenmp -march=native runq.c -lm -o runq
38
+
39
+ .PHONY: win64
40
+ win64:
41
+ x86_64-w64-mingw32-gcc -Ofast -D_WIN32 -o run.exe -I. run.c win.c
42
+ x86_64-w64-mingw32-gcc -Ofast -D_WIN32 -o runq.exe -I. runq.c win.c
43
+
44
+ # compiles with gnu99 standard flags for amazon linux, coreos, etc. compatibility
45
+ .PHONY: rungnu
46
+ rungnu:
47
+ $(CC) -Ofast -std=gnu11 -o run run.c -lm
48
+ $(CC) -Ofast -std=gnu11 -o runq runq.c -lm
49
+
50
+ .PHONY: runompgnu
51
+ runompgnu:
52
+ $(CC) -Ofast -fopenmp -std=gnu11 run.c -lm -o run
53
+ $(CC) -Ofast -fopenmp -std=gnu11 runq.c -lm -o runq
54
+
55
+ # run all tests
56
+ .PHONY: test
57
+ test:
58
+ pytest
59
+
60
+ # run only tests for run.c C implementation (is a bit faster if only C code changed)
61
+ .PHONY: testc
62
+ testc:
63
+ pytest -k runc
64
+
65
+ # run the C tests, without touching pytest / python
66
+ # to increase verbosity level run e.g. as `make testcc VERBOSITY=1`
67
+ VERBOSITY ?= 0
68
+ .PHONY: testcc
69
+ testcc:
70
+ $(CC) -DVERBOSITY=$(VERBOSITY) -O3 -o testc test.c -lm
71
+ ./testc
72
+
73
+ .PHONY: clean
74
+ clean:
75
+ rm -f run
76
+ rm -f runq
Version 2/42M_base/ckpt.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a2d3dd1f330eea772dac4c32122c32146a95b8ac1076f679bf3ec0d90713ef9
3
+ size 426623520
Version 2/42M_base/model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f578d4f018b01386d0f0c127f3c0b42736018df7285e080f9d4d37365ce9394a
3
+ size 142444572
Version 2/42M_base/run.c ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Inference for Llama-2 Transformer model in pure C */
2
+
3
+ #include <stdio.h>
4
+ #include <stdlib.h>
5
+ #include <ctype.h>
6
+ #include <time.h>
7
+ #include <math.h>
8
+ #include <string.h>
9
+ #include <fcntl.h>
10
+ #if defined _WIN32
11
+ #include "win.h"
12
+ #else
13
+ #include <unistd.h>
14
+ #include <sys/mman.h>
15
+ #endif
16
+ // ----------------------------------------------------------------------------
17
+ // Transformer model
18
+
19
+ typedef struct {
20
+ int dim; // transformer dimension
21
+ int hidden_dim; // for ffn layers
22
+ int n_layers; // number of layers
23
+ int n_heads; // number of query heads
24
+ int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
25
+ int vocab_size; // vocabulary size, usually 256 (byte-level)
26
+ int seq_len; // max sequence length
27
+ } Config;
28
+
29
+ typedef struct {
30
+ // token embedding table
31
+ float* token_embedding_table; // (vocab_size, dim)
32
+ // weights for rmsnorms
33
+ float* rms_att_weight; // (layer, dim) rmsnorm weights
34
+ float* rms_ffn_weight; // (layer, dim)
35
+ // weights for matmuls. note dim == n_heads * head_size
36
+ float* wq; // (layer, dim, n_heads * head_size)
37
+ float* wk; // (layer, dim, n_kv_heads * head_size)
38
+ float* wv; // (layer, dim, n_kv_heads * head_size)
39
+ float* wo; // (layer, n_heads * head_size, dim)
40
+ // weights for ffn
41
+ float* w1; // (layer, hidden_dim, dim)
42
+ float* w2; // (layer, dim, hidden_dim)
43
+ float* w3; // (layer, hidden_dim, dim)
44
+ // final rmsnorm
45
+ float* rms_final_weight; // (dim,)
46
+ // (optional) classifier weights for the logits, on the last layer
47
+ float* wcls;
48
+ } TransformerWeights;
49
+
50
+ typedef struct {
51
+ // current wave of activations
52
+ float *x; // activation at current time stamp (dim,)
53
+ float *xb; // same, but inside a residual branch (dim,)
54
+ float *xb2; // an additional buffer just for convenience (dim,)
55
+ float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
56
+ float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
57
+ float *q; // query (dim,)
58
+ float *k; // key (dim,)
59
+ float *v; // value (dim,)
60
+ float *att; // buffer for scores/attention values (n_heads, seq_len)
61
+ float *logits; // output logits
62
+ // kv cache
63
+ float* key_cache; // (layer, seq_len, dim)
64
+ float* value_cache; // (layer, seq_len, dim)
65
+ } RunState;
66
+
67
+ typedef struct {
68
+ Config config; // the hyperparameters of the architecture (the blueprint)
69
+ TransformerWeights weights; // the weights of the model
70
+ RunState state; // buffers for the "wave" of activations in the forward pass
71
+ // some more state needed to properly clean up the memory mapping (sigh)
72
+ int fd; // file descriptor for memory mapping
73
+ float* data; // memory mapped data pointer
74
+ ssize_t file_size; // size of the checkpoint file in bytes
75
+ } Transformer;
76
+
77
+ void malloc_run_state(RunState* s, Config* p) {
78
+ // we calloc instead of malloc to keep valgrind happy
79
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
80
+ s->x = calloc(p->dim, sizeof(float));
81
+ s->xb = calloc(p->dim, sizeof(float));
82
+ s->xb2 = calloc(p->dim, sizeof(float));
83
+ s->hb = calloc(p->hidden_dim, sizeof(float));
84
+ s->hb2 = calloc(p->hidden_dim, sizeof(float));
85
+ s->q = calloc(p->dim, sizeof(float));
86
+ s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
87
+ s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
88
+ s->att = calloc(p->n_heads * p->seq_len, sizeof(float));
89
+ s->logits = calloc(p->vocab_size, sizeof(float));
90
+ // ensure all mallocs went fine
91
+ if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q
92
+ || !s->key_cache || !s->value_cache || !s->att || !s->logits) {
93
+ fprintf(stderr, "malloc failed!\n");
94
+ exit(EXIT_FAILURE);
95
+ }
96
+ }
97
+
98
+ void free_run_state(RunState* s) {
99
+ free(s->x);
100
+ free(s->xb);
101
+ free(s->xb2);
102
+ free(s->hb);
103
+ free(s->hb2);
104
+ free(s->q);
105
+ free(s->att);
106
+ free(s->logits);
107
+ free(s->key_cache);
108
+ free(s->value_cache);
109
+ }
110
+
111
+ void memory_map_weights(TransformerWeights *w, Config* p, float* ptr, int shared_weights) {
112
+ int head_size = p->dim / p->n_heads;
113
+ // make sure the multiplications below are done in 64bit to fit the parameter counts of 13B+ models
114
+ unsigned long long n_layers = p->n_layers;
115
+ w->token_embedding_table = ptr;
116
+ ptr += p->vocab_size * p->dim;
117
+ w->rms_att_weight = ptr;
118
+ ptr += n_layers * p->dim;
119
+ w->wq = ptr;
120
+ ptr += n_layers * p->dim * (p->n_heads * head_size);
121
+ w->wk = ptr;
122
+ ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
123
+ w->wv = ptr;
124
+ ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
125
+ w->wo = ptr;
126
+ ptr += n_layers * (p->n_heads * head_size) * p->dim;
127
+ w->rms_ffn_weight = ptr;
128
+ ptr += n_layers * p->dim;
129
+ w->w1 = ptr;
130
+ ptr += n_layers * p->dim * p->hidden_dim;
131
+ w->w2 = ptr;
132
+ ptr += n_layers * p->hidden_dim * p->dim;
133
+ w->w3 = ptr;
134
+ ptr += n_layers * p->dim * p->hidden_dim;
135
+ w->rms_final_weight = ptr;
136
+ ptr += p->dim;
137
+ ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_real (for RoPE)
138
+ ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_imag (for RoPE)
139
+ w->wcls = shared_weights ? w->token_embedding_table : ptr;
140
+ }
141
+
142
+ void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights,
143
+ int* fd, float** data, ssize_t* file_size) {
144
+ FILE *file = fopen(checkpoint, "rb");
145
+ if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); }
146
+ // read in the config header
147
+ if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); }
148
+ // negative vocab size is hacky way of signaling unshared weights. bit yikes.
149
+ int shared_weights = config->vocab_size > 0 ? 1 : 0;
150
+ config->vocab_size = abs(config->vocab_size);
151
+ // figure out the file size
152
+ fseek(file, 0, SEEK_END); // move file pointer to end of file
153
+ *file_size = ftell(file); // get the file size, in bytes
154
+ fclose(file);
155
+ // memory map the Transformer weights into the data pointer
156
+ *fd = open(checkpoint, O_RDONLY); // open in read only mode
157
+ if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); }
158
+ *data = mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0);
159
+ if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); }
160
+ float* weights_ptr = *data + sizeof(Config)/sizeof(float);
161
+ memory_map_weights(weights, config, weights_ptr, shared_weights);
162
+ }
163
+
164
+ void build_transformer(Transformer *t, char* checkpoint_path) {
165
+ // read in the Config and the Weights from the checkpoint
166
+ read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size);
167
+ // allocate the RunState buffers
168
+ malloc_run_state(&t->state, &t->config);
169
+ }
170
+
171
+ void free_transformer(Transformer* t) {
172
+ // close the memory mapping
173
+ if (t->data != MAP_FAILED) { munmap(t->data, t->file_size); }
174
+ if (t->fd != -1) { close(t->fd); }
175
+ // free the RunState buffers
176
+ free_run_state(&t->state);
177
+ }
178
+
179
+ // ----------------------------------------------------------------------------
180
+ // neural net blocks; the dynamics of the Transformer
181
+
182
+ void rmsnorm(float* o, float* x, float* weight, int size) {
183
+ // calculate sum of squares
184
+ float ss = 0.0f;
185
+ for (int j = 0; j < size; j++) {
186
+ ss += x[j] * x[j];
187
+ }
188
+ ss /= size;
189
+ ss += 1e-5f;
190
+ ss = 1.0f / sqrtf(ss);
191
+ // normalize and scale
192
+ for (int j = 0; j < size; j++) {
193
+ o[j] = weight[j] * (ss * x[j]);
194
+ }
195
+ }
196
+
197
+ void softmax(float* x, int size) {
198
+ // find max value (for numerical stability)
199
+ float max_val = x[0];
200
+ for (int i = 1; i < size; i++) {
201
+ if (x[i] > max_val) {
202
+ max_val = x[i];
203
+ }
204
+ }
205
+ // exp and sum
206
+ float sum = 0.0f;
207
+ for (int i = 0; i < size; i++) {
208
+ x[i] = expf(x[i] - max_val);
209
+ sum += x[i];
210
+ }
211
+ // normalize
212
+ for (int i = 0; i < size; i++) {
213
+ x[i] /= sum;
214
+ }
215
+ }
216
+
217
+ void matmul(float* xout, float* x, float* w, int n, int d) {
218
+ // W (d,n) @ x (n,) -> xout (d,)
219
+ // by far the most amount of time is spent inside this little function
220
+ int i;
221
+ #pragma omp parallel for private(i)
222
+ for (i = 0; i < d; i++) {
223
+ float val = 0.0f;
224
+ for (int j = 0; j < n; j++) {
225
+ val += w[i * n + j] * x[j];
226
+ }
227
+ xout[i] = val;
228
+ }
229
+ }
230
+
231
+ float* forward(Transformer* transformer, int token, int pos) {
232
+
233
+ // a few convenience variables
234
+ Config* p = &transformer->config;
235
+ TransformerWeights* w = &transformer->weights;
236
+ RunState* s = &transformer->state;
237
+ float *x = s->x;
238
+ int dim = p->dim;
239
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
240
+ int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery
241
+ int hidden_dim = p->hidden_dim;
242
+ int head_size = dim / p->n_heads;
243
+
244
+ // copy the token embedding into x
245
+ float* content_row = w->token_embedding_table + token * dim;
246
+ memcpy(x, content_row, dim*sizeof(*x));
247
+
248
+ // forward all the layers
249
+ for(unsigned long long l = 0; l < p->n_layers; l++) {
250
+
251
+ // attention rmsnorm
252
+ rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim);
253
+
254
+ // key and value point to the kv cache
255
+ int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience
256
+ s->k = s->key_cache + loff + pos * kv_dim;
257
+ s->v = s->value_cache + loff + pos * kv_dim;
258
+
259
+ // qkv matmuls for this position
260
+ matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim);
261
+ matmul(s->k, s->xb, w->wk + l*dim*kv_dim, dim, kv_dim);
262
+ matmul(s->v, s->xb, w->wv + l*dim*kv_dim, dim, kv_dim);
263
+
264
+ // RoPE relative positional encoding: complex-valued rotate q and k in each head
265
+ for (int i = 0; i < dim; i+=2) {
266
+ int head_dim = i % head_size;
267
+ float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size);
268
+ float val = pos * freq;
269
+ float fcr = cosf(val);
270
+ float fci = sinf(val);
271
+ int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
272
+ for (int v = 0; v < rotn; v++) {
273
+ float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key)
274
+ float v0 = vec[i];
275
+ float v1 = vec[i+1];
276
+ vec[i] = v0 * fcr - v1 * fci;
277
+ vec[i+1] = v0 * fci + v1 * fcr;
278
+ }
279
+ }
280
+
281
+ // multihead attention. iterate over all heads
282
+ int h;
283
+ #pragma omp parallel for private(h)
284
+ for (h = 0; h < p->n_heads; h++) {
285
+ // get the query vector for this head
286
+ float* q = s->q + h * head_size;
287
+ // attention scores for this head
288
+ float* att = s->att + h * p->seq_len;
289
+ // iterate over all timesteps, including the current one
290
+ for (int t = 0; t <= pos; t++) {
291
+ // get the key vector for this head and at this timestep
292
+ float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
293
+ // calculate the attention score as the dot product of q and k
294
+ float score = 0.0f;
295
+ for (int i = 0; i < head_size; i++) {
296
+ score += q[i] * k[i];
297
+ }
298
+ score /= sqrtf(head_size);
299
+ // save the score to the attention buffer
300
+ att[t] = score;
301
+ }
302
+
303
+ // softmax the scores to get attention weights, from 0..pos inclusively
304
+ softmax(att, pos + 1);
305
+
306
+ // weighted sum of the values, store back into xb
307
+ float* xb = s->xb + h * head_size;
308
+ memset(xb, 0, head_size * sizeof(float));
309
+ for (int t = 0; t <= pos; t++) {
310
+ // get the value vector for this head and at this timestep
311
+ float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
312
+ // get the attention weight for this timestep
313
+ float a = att[t];
314
+ // accumulate the weighted value into xb
315
+ for (int i = 0; i < head_size; i++) {
316
+ xb[i] += a * v[i];
317
+ }
318
+ }
319
+ }
320
+
321
+ // final matmul to get the output of the attention
322
+ matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim);
323
+
324
+ // residual connection back into x
325
+ for (int i = 0; i < dim; i++) {
326
+ x[i] += s->xb2[i];
327
+ }
328
+
329
+ // ffn rmsnorm
330
+ rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim);
331
+
332
+ // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
333
+ // first calculate self.w1(x) and self.w3(x)
334
+ matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim);
335
+ matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim);
336
+
337
+ // SwiGLU non-linearity
338
+ for (int i = 0; i < hidden_dim; i++) {
339
+ float val = s->hb[i];
340
+ // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid
341
+ val *= (1.0f / (1.0f + expf(-val)));
342
+ // elementwise multiply with w3(x)
343
+ val *= s->hb2[i];
344
+ s->hb[i] = val;
345
+ }
346
+
347
+ // final matmul to get the output of the ffn
348
+ matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim);
349
+
350
+ // residual connection
351
+ for (int i = 0; i < dim; i++) {
352
+ x[i] += s->xb[i];
353
+ }
354
+ }
355
+
356
+ // final rmsnorm
357
+ rmsnorm(x, x, w->rms_final_weight, dim);
358
+
359
+ // classifier into logits
360
+ matmul(s->logits, x, w->wcls, p->dim, p->vocab_size);
361
+ return s->logits;
362
+ }
363
+
364
+ // ----------------------------------------------------------------------------
365
+ // The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens
366
+
367
+ typedef struct {
368
+ char *str;
369
+ int id;
370
+ } TokenIndex;
371
+
372
+ typedef struct {
373
+ char** vocab;
374
+ float* vocab_scores;
375
+ TokenIndex *sorted_vocab;
376
+ int vocab_size;
377
+ unsigned int max_token_length;
378
+ unsigned char byte_pieces[512]; // stores all single-byte strings
379
+ } Tokenizer;
380
+
381
+ int compare_tokens(const void *a, const void *b) {
382
+ return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str);
383
+ }
384
+
385
+ void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size) {
386
+ // i should have written the vocab_size into the tokenizer file... sigh
387
+ t->vocab_size = vocab_size;
388
+ // malloc space to hold the scores and the strings
389
+ t->vocab = (char**)malloc(vocab_size * sizeof(char*));
390
+ t->vocab_scores = (float*)malloc(vocab_size * sizeof(float));
391
+ t->sorted_vocab = NULL; // initialized lazily
392
+ for (int i = 0; i < 256; i++) {
393
+ t->byte_pieces[i * 2] = (unsigned char)i;
394
+ t->byte_pieces[i * 2 + 1] = '\0';
395
+ }
396
+ // read in the file
397
+ FILE *file = fopen(tokenizer_path, "rb");
398
+ if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); }
399
+ if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
400
+ int len;
401
+ for (int i = 0; i < vocab_size; i++) {
402
+ if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);}
403
+ if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
404
+ t->vocab[i] = (char *)malloc(len + 1);
405
+ if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
406
+ t->vocab[i][len] = '\0'; // add the string terminating token
407
+ }
408
+ fclose(file);
409
+ }
410
+
411
+ void free_tokenizer(Tokenizer* t) {
412
+ for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); }
413
+ free(t->vocab);
414
+ free(t->vocab_scores);
415
+ free(t->sorted_vocab);
416
+ }
417
+
418
+ char* decode(Tokenizer* t, int prev_token, int token) {
419
+ char *piece = t->vocab[token];
420
+ // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89)
421
+ if (prev_token == 1 && piece[0] == ' ') { piece++; }
422
+ // careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
423
+ // parse this and convert and return the actual byte
424
+ unsigned char byte_val;
425
+ if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) {
426
+ piece = (char*)t->byte_pieces + byte_val * 2;
427
+ }
428
+ return piece;
429
+ }
430
+
431
+ void safe_printf(char *piece) {
432
+ // piece might be a raw byte token, and we only want to print printable chars or whitespace
433
+ // because some of the other bytes can be various control codes, backspace, etc.
434
+ if (piece == NULL) { return; }
435
+ if (piece[0] == '\0') { return; }
436
+ if (piece[1] == '\0') {
437
+ unsigned char byte_val = piece[0];
438
+ if (!(isprint(byte_val) || isspace(byte_val))) {
439
+ return; // bad byte, don't print it
440
+ }
441
+ }
442
+ printf("%s", piece);
443
+ }
444
+
445
+ int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) {
446
+ // efficiently find the perfect match for str in vocab, return its index or -1 if not found
447
+ TokenIndex tok = { .str = str }; // acts as the key to search for
448
+ TokenIndex *res = bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens);
449
+ return res != NULL ? res->id : -1;
450
+ }
451
+
452
+ void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) {
453
+ // encode the string text (input) into an upper-bound preallocated tokens[] array
454
+ // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2)
455
+ if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); }
456
+
457
+ if (t->sorted_vocab == NULL) {
458
+ // lazily malloc and sort the vocabulary
459
+ t->sorted_vocab = malloc(t->vocab_size * sizeof(TokenIndex));
460
+ for (int i = 0; i < t->vocab_size; i++) {
461
+ t->sorted_vocab[i].str = t->vocab[i];
462
+ t->sorted_vocab[i].id = i;
463
+ }
464
+ qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens);
465
+ }
466
+
467
+ // create a temporary buffer that will store merge candidates of always two consecutive tokens
468
+ // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1)
469
+ char* str_buffer = malloc((t->max_token_length*2 +1 +2) * sizeof(char));
470
+ size_t str_len = 0;
471
+
472
+ // start at 0 tokens
473
+ *n_tokens = 0;
474
+
475
+ // add optional BOS (=1) token, if desired
476
+ if (bos) tokens[(*n_tokens)++] = 1;
477
+
478
+ // add_dummy_prefix is true by default
479
+ // so prepend a dummy prefix token to the input string, but only if text != ""
480
+ // TODO: pretty sure this isn't correct in the general case but I don't have the
481
+ // energy to read more of the sentencepiece code to figure out what it's doing
482
+ if (text[0] != '\0') {
483
+ int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);
484
+ tokens[(*n_tokens)++] = dummy_prefix;
485
+ }
486
+
487
+ // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:
488
+ // Code point ↔ UTF-8 conversion
489
+ // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4
490
+ // U+0000 U+007F 0xxxxxxx
491
+ // U+0080 U+07FF 110xxxxx 10xxxxxx
492
+ // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx
493
+ // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
494
+
495
+ // process the raw (UTF-8) byte sequence of the input string
496
+ for (char *c = text; *c != '\0'; c++) {
497
+
498
+ // reset buffer if the current byte is ASCII or a leading byte
499
+ // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest
500
+ // 0x80 is 10000000
501
+ // in UTF-8, all continuation bytes start with "10" in first two bits
502
+ // so in English this is: "if this byte is not a continuation byte"
503
+ if ((*c & 0xC0) != 0x80) {
504
+ // this byte must be either a leading byte (11...) or an ASCII char (0x...)
505
+ // => reset our location, as we're starting a new UTF-8 codepoint
506
+ str_len = 0;
507
+ }
508
+
509
+ // append the current byte to the buffer
510
+ str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line
511
+ str_buffer[str_len] = '\0';
512
+
513
+ // while the next character is a continuation byte, continue appending
514
+ // but if there are too many of them, just stop to avoid overruning str_buffer size.
515
+ if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {
516
+ continue;
517
+ }
518
+
519
+ // ok c+1 is not a continuation byte, so we've read in a full codepoint
520
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
521
+
522
+ if (id != -1) {
523
+ // we found this codepoint in vocab, add it as a token
524
+ tokens[(*n_tokens)++] = id;
525
+ } else {
526
+ // byte_fallback encoding: just encode each byte as a token
527
+ // +3 is here because the first 3 vocab elements are <unk>, <s>, </s>
528
+ // so the individual bytes only start at index 3
529
+ for (int i=0; i < str_len; i++) {
530
+ tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;
531
+ }
532
+ }
533
+ str_len = 0; // protect against a sequence of stray UTF8 continuation bytes
534
+ }
535
+
536
+ // merge the best consecutive pair each iteration, according the scores in vocab_scores
537
+ while (1) {
538
+ float best_score = -1e10;
539
+ int best_id = -1;
540
+ int best_idx = -1;
541
+
542
+ for (int i=0; i < (*n_tokens-1); i++) {
543
+ // check if we can merge the pair (tokens[i], tokens[i+1])
544
+ sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]);
545
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
546
+ if (id != -1 && t->vocab_scores[id] > best_score) {
547
+ // this merge pair exists in vocab! record its score and position
548
+ best_score = t->vocab_scores[id];
549
+ best_id = id;
550
+ best_idx = i;
551
+ }
552
+ }
553
+
554
+ if (best_idx == -1) {
555
+ break; // we couldn't find any more pairs to merge, so we're done
556
+ }
557
+
558
+ // merge the consecutive pair (best_idx, best_idx+1) into new token best_id
559
+ tokens[best_idx] = best_id;
560
+ // delete token at position best_idx+1, shift the entire sequence back 1
561
+ for (int i = best_idx+1; i < (*n_tokens-1); i++) {
562
+ tokens[i] = tokens[i+1];
563
+ }
564
+ (*n_tokens)--; // token length decreased
565
+ }
566
+
567
+ // add optional EOS (=2) token, if desired
568
+ if (eos) tokens[(*n_tokens)++] = 2;
569
+
570
+ free(str_buffer);
571
+ }
572
+
573
+ // ----------------------------------------------------------------------------
574
+ // The Sampler, which takes logits and returns a sampled token
575
+ // sampling can be done in a few ways: greedy argmax, sampling, top-p sampling
576
+
577
+ typedef struct {
578
+ float prob;
579
+ int index;
580
+ } ProbIndex; // struct used when sorting probabilities during top-p sampling
581
+
582
+ typedef struct {
583
+ int vocab_size;
584
+ ProbIndex* probindex; // buffer used in top-p sampling
585
+ float temperature;
586
+ float topp;
587
+ unsigned long long rng_state;
588
+ } Sampler;
589
+
590
+ int sample_argmax(float* probabilities, int n) {
591
+ // return the index that has the highest probability
592
+ int max_i = 0;
593
+ float max_p = probabilities[0];
594
+ for (int i = 1; i < n; i++) {
595
+ if (probabilities[i] > max_p) {
596
+ max_i = i;
597
+ max_p = probabilities[i];
598
+ }
599
+ }
600
+ return max_i;
601
+ }
602
+
603
+ int sample_mult(float* probabilities, int n, float coin) {
604
+ // sample index from probabilities (they must sum to 1!)
605
+ // coin is a random number in [0, 1), usually from random_f32()
606
+ float cdf = 0.0f;
607
+ for (int i = 0; i < n; i++) {
608
+ cdf += probabilities[i];
609
+ if (coin < cdf) {
610
+ return i;
611
+ }
612
+ }
613
+ return n - 1; // in case of rounding errors
614
+ }
615
+
616
+ int compare(const void* a, const void* b) {
617
+ ProbIndex* a_ = (ProbIndex*) a;
618
+ ProbIndex* b_ = (ProbIndex*) b;
619
+ if (a_->prob > b_->prob) return -1;
620
+ if (a_->prob < b_->prob) return 1;
621
+ return 0;
622
+ }
623
+
624
+ int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) {
625
+ // top-p sampling (or "nucleus sampling") samples from the smallest set of
626
+ // tokens that exceed probability topp. This way we never sample tokens that
627
+ // have very low probabilities and are less likely to go "off the rails".
628
+ // coin is a random number in [0, 1), usually from random_f32()
629
+
630
+ int n0 = 0;
631
+ // quicksort indices in descending order of probabilities
632
+ // values smaller than (1 - topp) / (n - 1) cannot be part of the result
633
+ // so for efficiency we crop these out as candidates before sorting
634
+ const float cutoff = (1.0f - topp) / (n - 1);
635
+ for (int i = 0; i < n; i++) {
636
+ if (probabilities[i] >= cutoff) {
637
+ probindex[n0].index = i;
638
+ probindex[n0].prob = probabilities[i];
639
+ n0++;
640
+ }
641
+ }
642
+ qsort(probindex, n0, sizeof(ProbIndex), compare);
643
+
644
+ // truncate the list where cumulative probability exceeds topp
645
+ float cumulative_prob = 0.0f;
646
+ int last_idx = n0 - 1; // in case of rounding errors consider all elements
647
+ for (int i = 0; i < n0; i++) {
648
+ cumulative_prob += probindex[i].prob;
649
+ if (cumulative_prob > topp) {
650
+ last_idx = i;
651
+ break; // we've exceeded topp by including last_idx
652
+ }
653
+ }
654
+
655
+ // sample from the truncated list
656
+ float r = coin * cumulative_prob;
657
+ float cdf = 0.0f;
658
+ for (int i = 0; i <= last_idx; i++) {
659
+ cdf += probindex[i].prob;
660
+ if (r < cdf) {
661
+ return probindex[i].index;
662
+ }
663
+ }
664
+ return probindex[last_idx].index; // in case of rounding errors
665
+ }
666
+
667
+ void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) {
668
+ sampler->vocab_size = vocab_size;
669
+ sampler->temperature = temperature;
670
+ sampler->topp = topp;
671
+ sampler->rng_state = rng_seed;
672
+ // buffer only used with nucleus sampling; may not need but it's ~small
673
+ sampler->probindex = malloc(sampler->vocab_size * sizeof(ProbIndex));
674
+ }
675
+
676
+ void free_sampler(Sampler* sampler) {
677
+ free(sampler->probindex);
678
+ }
679
+
680
+ unsigned int random_u32(unsigned long long *state) {
681
+ // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
682
+ *state ^= *state >> 12;
683
+ *state ^= *state << 25;
684
+ *state ^= *state >> 27;
685
+ return (*state * 0x2545F4914F6CDD1Dull) >> 32;
686
+ }
687
+ float random_f32(unsigned long long *state) { // random float32 in [0,1)
688
+ return (random_u32(state) >> 8) / 16777216.0f;
689
+ }
690
+
691
+ int sample(Sampler* sampler, float* logits) {
692
+ // sample the token given the logits and some hyperparameters
693
+ int next;
694
+ if (sampler->temperature == 0.0f) {
695
+ // greedy argmax sampling: take the token with the highest probability
696
+ next = sample_argmax(logits, sampler->vocab_size);
697
+ } else {
698
+ // apply the temperature to the logits
699
+ for (int q=0; q<sampler->vocab_size; q++) { logits[q] /= sampler->temperature; }
700
+ // apply softmax to the logits to get the probabilities for next token
701
+ softmax(logits, sampler->vocab_size);
702
+ // flip a (float) coin (this is our source of entropy for sampling)
703
+ float coin = random_f32(&sampler->rng_state);
704
+ // we sample from this distribution to get the next token
705
+ if (sampler->topp <= 0 || sampler->topp >= 1) {
706
+ // simply sample from the predicted probability distribution
707
+ next = sample_mult(logits, sampler->vocab_size, coin);
708
+ } else {
709
+ // top-p (nucleus) sampling, clamping the least likely tokens to zero
710
+ next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin);
711
+ }
712
+ }
713
+ return next;
714
+ }
715
+
716
+ // ----------------------------------------------------------------------------
717
+ // utilities: time
718
+
719
+ long time_in_ms() {
720
+ // return time in milliseconds, for benchmarking the model speed
721
+ struct timespec time;
722
+ clock_gettime(CLOCK_REALTIME, &time);
723
+ return time.tv_sec * 1000 + time.tv_nsec / 1000000;
724
+ }
725
+
726
+ // ----------------------------------------------------------------------------
727
+ // generation loop
728
+
729
+ void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, char *prompt, int steps) {
730
+ char *empty_prompt = "";
731
+ if (prompt == NULL) { prompt = empty_prompt; }
732
+
733
+ // encode the (string) prompt into tokens sequence
734
+ int num_prompt_tokens = 0;
735
+ int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS
736
+ encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
737
+ if (num_prompt_tokens < 1) {
738
+ fprintf(stderr, "something is wrong, expected at least 1 prompt token\n");
739
+ exit(EXIT_FAILURE);
740
+ }
741
+
742
+ // start the main loop
743
+ long start = 0; // used to time our code, only initialized after first iteration
744
+ int next; // will store the next token in the sequence
745
+ int token = prompt_tokens[0]; // kick off with the first token in the prompt
746
+ int pos = 0; // position in the sequence
747
+ while (pos < steps) {
748
+
749
+ // forward the transformer to get logits for the next token
750
+ float* logits = forward(transformer, token, pos);
751
+
752
+ // advance the state machine
753
+ if (pos < num_prompt_tokens - 1) {
754
+ // if we are still processing the input prompt, force the next prompt token
755
+ next = prompt_tokens[pos + 1];
756
+ } else {
757
+ // otherwise sample the next token from the logits
758
+ next = sample(sampler, logits);
759
+ }
760
+ pos++;
761
+
762
+ // data-dependent terminating condition: the BOS (=1) token delimits sequences
763
+ if (next == 1) { break; }
764
+
765
+ // print the token as string, decode it with the Tokenizer object
766
+ char* piece = decode(tokenizer, token, next);
767
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
768
+ fflush(stdout);
769
+ token = next;
770
+
771
+ // init the timer here because the first iteration can be slower
772
+ if (start == 0) { start = time_in_ms(); }
773
+ }
774
+ printf("\n");
775
+
776
+ // report achieved tok/s (pos-1 because the timer starts after first iteration)
777
+ if (pos > 1) {
778
+ long end = time_in_ms();
779
+ fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000);
780
+ }
781
+
782
+ free(prompt_tokens);
783
+ }
784
+
785
+ void read_stdin(const char* guide, char* buffer, size_t bufsize) {
786
+ // read a line from stdin, up to but not including \n
787
+ printf("%s", guide);
788
+ if (fgets(buffer, bufsize, stdin) != NULL) {
789
+ size_t len = strlen(buffer);
790
+ if (len > 0 && buffer[len - 1] == '\n') {
791
+ buffer[len - 1] = '\0'; // strip newline
792
+ }
793
+ }
794
+ }
795
+
796
+ // ----------------------------------------------------------------------------
797
+ // chat loop
798
+ // I manually inspected the tokens for a few chat conversations compared to
799
+ // python reference and that seemed ok, but this was not thoroughly tested and
800
+ // is not safely implemented, it's more a proof of concept atm.
801
+
802
+ void chat(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler,
803
+ char *cli_user_prompt, char *cli_system_prompt, int steps) {
804
+
805
+ // buffers for reading the system prompt and user prompt from stdin
806
+ // you'll notice they are soomewhat haphazardly and unsafely set atm
807
+ char system_prompt[512];
808
+ char user_prompt[512];
809
+ char rendered_prompt[1152];
810
+ int num_prompt_tokens = 0;
811
+ int* prompt_tokens = (int*)malloc(1152 * sizeof(int));
812
+ int user_idx;
813
+
814
+ // start the main loop
815
+ int8_t user_turn = 1; // user starts
816
+ int next; // will store the next token in the sequence
817
+ int token; // stores the current token to feed into the transformer
818
+ int prev_token;
819
+ int pos = 0; // position in the sequence
820
+ while (pos < steps) {
821
+
822
+ // when it is the user's turn to contribute tokens to the dialog...
823
+ if (user_turn) {
824
+ // get the (optional) system prompt at position 0
825
+ if (pos == 0) {
826
+ // at position 0, the user can also contribute a system prompt
827
+ if (cli_system_prompt == NULL) {
828
+ // system prompt was not passed in, attempt to get it from stdin
829
+ read_stdin("Enter system prompt (optional): ", system_prompt, sizeof(system_prompt));
830
+ } else {
831
+ // system prompt was passed in, use it
832
+ strcpy(system_prompt, cli_system_prompt);
833
+ }
834
+ }
835
+ // get the user prompt
836
+ if (pos == 0 && cli_user_prompt != NULL) {
837
+ // user prompt for position 0 was passed in, use it
838
+ strcpy(user_prompt, cli_user_prompt);
839
+ } else {
840
+ // otherwise get user prompt from stdin
841
+ read_stdin("User: ", user_prompt, sizeof(user_prompt));
842
+ }
843
+ // render user/system prompts into the Llama 2 Chat schema
844
+ if (pos == 0 && system_prompt[0] != '\0') {
845
+ char system_template[] = "[INST] <<SYS>>\n%s\n<</SYS>>\n\n%s [/INST]";
846
+ sprintf(rendered_prompt, system_template, system_prompt, user_prompt);
847
+ } else {
848
+ char user_template[] = "[INST] %s [/INST]";
849
+ sprintf(rendered_prompt, user_template, user_prompt);
850
+ }
851
+ // encode the rendered prompt into tokens
852
+ encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
853
+ user_idx = 0; // reset the user index
854
+ user_turn = 0;
855
+ printf("Assistant: ");
856
+ }
857
+
858
+ // determine the token to pass into the transformer next
859
+ if (user_idx < num_prompt_tokens) {
860
+ // if we are still processing the input prompt, force the next prompt token
861
+ token = prompt_tokens[user_idx++];
862
+ } else {
863
+ // otherwise use the next token sampled from previous turn
864
+ token = next;
865
+ }
866
+ // EOS (=2) token ends the Assistant turn
867
+ if (token == 2) { user_turn = 1; }
868
+
869
+ // forward the transformer to get logits for the next token
870
+ float* logits = forward(transformer, token, pos);
871
+ next = sample(sampler, logits);
872
+ pos++;
873
+
874
+ if (user_idx >= num_prompt_tokens && next != 2) {
875
+ // the Assistant is responding, so print its output
876
+ char* piece = decode(tokenizer, token, next);
877
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
878
+ fflush(stdout);
879
+ }
880
+ if (next == 2) { printf("\n"); }
881
+ }
882
+ printf("\n");
883
+ free(prompt_tokens);
884
+ }
885
+
886
+
887
+ // ----------------------------------------------------------------------------
888
+ // CLI, include only if not testing
889
+ #ifndef TESTING
890
+
891
+ void error_usage() {
892
+ fprintf(stderr, "Usage: run <checkpoint> [options]\n");
893
+ fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n");
894
+ fprintf(stderr, "Options:\n");
895
+ fprintf(stderr, " -t <float> temperature in [0,inf], default 1.0\n");
896
+ fprintf(stderr, " -p <float> p value in top-p (nucleus) sampling in [0,1] default 0.9\n");
897
+ fprintf(stderr, " -s <int> random seed, default time(NULL)\n");
898
+ fprintf(stderr, " -n <int> number of steps to run for, default 256. 0 = max_seq_len\n");
899
+ fprintf(stderr, " -i <string> input prompt\n");
900
+ fprintf(stderr, " -z <string> optional path to custom tokenizer\n");
901
+ fprintf(stderr, " -m <string> mode: generate|chat, default: generate\n");
902
+ fprintf(stderr, " -y <string> (optional) system prompt in chat mode\n");
903
+ exit(EXIT_FAILURE);
904
+ }
905
+
906
+ int main(int argc, char *argv[]) {
907
+
908
+ // default parameters
909
+ char *checkpoint_path = NULL; // e.g. out/model.bin
910
+ char *tokenizer_path = "tokenizer.bin";
911
+ float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher
912
+ float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower
913
+ int steps = 256; // number of steps to run for
914
+ char *prompt = NULL; // prompt string
915
+ unsigned long long rng_seed = 0; // seed rng with time by default
916
+ char *mode = "generate"; // generate|chat
917
+ char *system_prompt = NULL; // the (optional) system prompt to use in chat mode
918
+
919
+ // poor man's C argparse so we can override the defaults above from the command line
920
+ if (argc >= 2) { checkpoint_path = argv[1]; } else { error_usage(); }
921
+ for (int i = 2; i < argc; i+=2) {
922
+ // do some basic validation
923
+ if (i + 1 >= argc) { error_usage(); } // must have arg after flag
924
+ if (argv[i][0] != '-') { error_usage(); } // must start with dash
925
+ if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)
926
+ // read in the args
927
+ if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); }
928
+ else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); }
929
+ else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); }
930
+ else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); }
931
+ else if (argv[i][1] == 'i') { prompt = argv[i + 1]; }
932
+ else if (argv[i][1] == 'z') { tokenizer_path = argv[i + 1]; }
933
+ else if (argv[i][1] == 'm') { mode = argv[i + 1]; }
934
+ else if (argv[i][1] == 'y') { system_prompt = argv[i + 1]; }
935
+ else { error_usage(); }
936
+ }
937
+
938
+ // parameter validation/overrides
939
+ if (rng_seed <= 0) rng_seed = (unsigned int)time(NULL);
940
+ if (temperature < 0.0) temperature = 0.0;
941
+ if (topp < 0.0 || 1.0 < topp) topp = 0.9;
942
+ if (steps < 0) steps = 0;
943
+
944
+ // build the Transformer via the model .bin file
945
+ Transformer transformer;
946
+ build_transformer(&transformer, checkpoint_path);
947
+ if (steps == 0 || steps > transformer.config.seq_len) steps = transformer.config.seq_len; // override to ~max length
948
+
949
+ // build the Tokenizer via the tokenizer .bin file
950
+ Tokenizer tokenizer;
951
+ build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size);
952
+
953
+ // build the Sampler
954
+ Sampler sampler;
955
+ build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, rng_seed);
956
+
957
+ // run!
958
+ if (strcmp(mode, "generate") == 0) {
959
+ generate(&transformer, &tokenizer, &sampler, prompt, steps);
960
+ } else if (strcmp(mode, "chat") == 0) {
961
+ chat(&transformer, &tokenizer, &sampler, prompt, system_prompt, steps);
962
+ } else {
963
+ fprintf(stderr, "unknown mode: %s\n", mode);
964
+ error_usage();
965
+ }
966
+
967
+ // memory and file handles cleanup
968
+ free_sampler(&sampler);
969
+ free_tokenizer(&tokenizer);
970
+ free_transformer(&transformer);
971
+ return 0;
972
+ }
973
+ #endif
Version 2/42M_base/runq.c ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Inference for Llama-2 Transformer model in pure C, int8 quantized forward pass. */
2
+
3
+ #include <stdio.h>
4
+ #include <stdlib.h>
5
+ #include <ctype.h>
6
+ #include <stdint.h>
7
+ #include <time.h>
8
+ #include <math.h>
9
+ #include <string.h>
10
+ #include <fcntl.h>
11
+ #if defined _WIN32
12
+ #include "win.h"
13
+ #else
14
+ #include <unistd.h>
15
+ #include <sys/mman.h>
16
+ #endif
17
+ // ----------------------------------------------------------------------------
18
+ // Globals
19
+ int GS = 0; // group size global for quantization of the weights
20
+
21
+ // ----------------------------------------------------------------------------
22
+ // Transformer model
23
+
24
+ typedef struct {
25
+ int dim; // transformer dimension
26
+ int hidden_dim; // for ffn layers
27
+ int n_layers; // number of layers
28
+ int n_heads; // number of query heads
29
+ int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
30
+ int vocab_size; // vocabulary size, usually 256 (byte-level)
31
+ int seq_len; // max sequence length
32
+ } Config;
33
+
34
+ typedef struct {
35
+ int8_t* q; // quantized values
36
+ float* s; // scaling factors
37
+ } QuantizedTensor;
38
+
39
+ typedef struct {
40
+ // token embedding table
41
+ QuantizedTensor *q_tokens; // (vocab_size, dim)
42
+ float* token_embedding_table; // same, but dequantized
43
+
44
+ // weights for rmsnorms
45
+ float* rms_att_weight; // (layer, dim) rmsnorm weights
46
+ float* rms_ffn_weight; // (layer, dim)
47
+ // weights for matmuls. note dim == n_heads * head_size
48
+ QuantizedTensor *wq; // (layer, dim, n_heads * head_size)
49
+ QuantizedTensor *wk; // (layer, dim, n_kv_heads * head_size)
50
+ QuantizedTensor *wv; // (layer, dim, n_kv_heads * head_size)
51
+ QuantizedTensor *wo; // (layer, n_heads * head_size, dim)
52
+ // weights for ffn
53
+ QuantizedTensor *w1; // (layer, hidden_dim, dim)
54
+ QuantizedTensor *w2; // (layer, dim, hidden_dim)
55
+ QuantizedTensor *w3; // (layer, hidden_dim, dim)
56
+ // final rmsnorm
57
+ float* rms_final_weight; // (dim,)
58
+ // (optional) classifier weights for the logits, on the last layer
59
+ QuantizedTensor *wcls;
60
+ } TransformerWeights;
61
+
62
+ typedef struct {
63
+ // current wave of activations
64
+ float *x; // activation at current time stamp (dim,)
65
+ float *xb; // same, but inside a residual branch (dim,)
66
+ float *xb2; // an additional buffer just for convenience (dim,)
67
+ float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
68
+ float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
69
+ QuantizedTensor xq; // quantized x (dim,)
70
+ QuantizedTensor hq; // quantized hb (hidden_dim,)
71
+ float *q; // query (dim,)
72
+ float *k; // key (dim,)
73
+ float *v; // value (dim,)
74
+ float *att; // buffer for scores/attention values (n_heads, seq_len)
75
+ float *logits; // output logits
76
+ // kv cache
77
+ float* key_cache; // (layer, seq_len, dim)
78
+ float* value_cache; // (layer, seq_len, dim)
79
+ } RunState;
80
+
81
+ typedef struct {
82
+ Config config; // the hyperparameters of the architecture (the blueprint)
83
+ TransformerWeights weights; // the weights of the model
84
+ RunState state; // buffers for the "wave" of activations in the forward pass
85
+ // some more state needed to properly clean up the memory mapping (sigh)
86
+ int fd; // file descriptor for memory mapping
87
+ float* data; // memory mapped data pointer
88
+ ssize_t file_size; // size of the checkpoint file in bytes
89
+ } Transformer;
90
+
91
+ void malloc_run_state(RunState* s, Config* p) {
92
+ // we calloc instead of malloc to keep valgrind happy
93
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
94
+ s->x = calloc(p->dim, sizeof(float));
95
+ s->xb = calloc(p->dim, sizeof(float));
96
+ s->xb2 = calloc(p->dim, sizeof(float));
97
+ s->hb = calloc(p->hidden_dim, sizeof(float));
98
+ s->hb2 = calloc(p->hidden_dim, sizeof(float));
99
+ s->xq = (QuantizedTensor) { .q = calloc(p->dim, sizeof(int8_t)), .s = calloc(p->dim, sizeof(float)) };
100
+ s->hq = (QuantizedTensor) { .q = calloc(p->hidden_dim, sizeof(int8_t)), .s = calloc(p->hidden_dim, sizeof(float)) };
101
+ s->q = calloc(p->dim, sizeof(float));
102
+ s->k = calloc(kv_dim, sizeof(float));
103
+ s->v = calloc(kv_dim, sizeof(float));
104
+ s->att = calloc(p->n_heads * p->seq_len, sizeof(float));
105
+ s->logits = calloc(p->vocab_size, sizeof(float));
106
+ s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
107
+ s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
108
+ // ensure all mallocs went fine
109
+ if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q
110
+ || !s->k || !s->v || !s->att || !s->logits || !s->key_cache
111
+ || !s->value_cache) {
112
+ fprintf(stderr, "malloc failed!\n");
113
+ exit(EXIT_FAILURE);
114
+ }
115
+ }
116
+
117
+ void free_run_state(RunState* s) {
118
+ free(s->x);
119
+ free(s->xb);
120
+ free(s->xb2);
121
+ free(s->hb);
122
+ free(s->hb2);
123
+ free(s->xq.q);
124
+ free(s->xq.s);
125
+ free(s->hq.q);
126
+ free(s->hq.s);
127
+ free(s->q);
128
+ free(s->k);
129
+ free(s->v);
130
+ free(s->att);
131
+ free(s->logits);
132
+ free(s->key_cache);
133
+ free(s->value_cache);
134
+ }
135
+
136
+ // ----------------------------------------------------------------------------
137
+ // Quantization functions
138
+
139
+ void dequantize(QuantizedTensor *qx, float* x, int n) {
140
+ for (int i = 0; i < n; i++) {
141
+ x[i] = qx->q[i] * qx->s[i / GS];
142
+ }
143
+ }
144
+
145
+ void quantize(QuantizedTensor *qx, float* x, int n) {
146
+ int num_groups = n / GS;
147
+ float Q_MAX = 127.0f;
148
+
149
+ for (int group = 0; group < num_groups; group++) {
150
+
151
+ // find the max absolute value in the current group
152
+ float wmax = 0.0;
153
+ for (int i = 0; i < GS; i++) {
154
+ float val = fabs(x[group * GS + i]);
155
+ if (val > wmax) {
156
+ wmax = val;
157
+ }
158
+ }
159
+
160
+ // calculate and write the scaling factor
161
+ float scale = wmax / Q_MAX;
162
+ qx->s[group] = scale;
163
+
164
+ // calculate and write the quantized values
165
+ for (int i = 0; i < GS; i++) {
166
+ float quant_value = x[group * GS + i] / scale; // scale
167
+ int8_t quantized = (int8_t) round(quant_value); // round and clamp
168
+ qx->q[group * GS + i] = quantized;
169
+ }
170
+ }
171
+ }
172
+
173
+ /* initialize `n` x quantized tensor (with `size_each` elements), starting from memory pointed at *ptr */
174
+ QuantizedTensor *init_quantized_tensors(void **ptr, int n, int size_each) {
175
+ void *p = *ptr;
176
+ QuantizedTensor *res = malloc(n * sizeof(QuantizedTensor));
177
+ for(int i=0; i<n; i++) {
178
+ /* map quantized int8 values*/
179
+ res[i].q = (int8_t*)p;
180
+ p = (int8_t*)p + size_each;
181
+ /* map scale factors */
182
+ res[i].s = (float*)p;
183
+ p = (float*)p + size_each / GS;
184
+ }
185
+ *ptr = p; // advance ptr to current position
186
+ return res;
187
+ }
188
+
189
+ void memory_map_weights(TransformerWeights *w, Config* p, void* ptr, uint8_t shared_classifier) {
190
+ int head_size = p->dim / p->n_heads;
191
+ // first are the parameters that are kept in fp32 (the rmsnorm (1D) weights)
192
+ float* fptr = (float*) ptr; // cast our pointer to float*
193
+ w->rms_att_weight = fptr;
194
+ fptr += p->n_layers * p->dim;
195
+ w->rms_ffn_weight = fptr;
196
+ fptr += p->n_layers * p->dim;
197
+ w->rms_final_weight = fptr;
198
+ fptr += p->dim;
199
+
200
+ // now read all the quantized weights
201
+ ptr = (void*)fptr; // now cast the pointer back to void*
202
+ w->q_tokens = init_quantized_tensors(&ptr, 1, p->vocab_size * p->dim);
203
+ // dequantize token embedding table
204
+ w->token_embedding_table = malloc(p->vocab_size * p->dim * sizeof(float));
205
+ dequantize(w->q_tokens, w->token_embedding_table, p->vocab_size * p->dim);
206
+
207
+ w->wq = init_quantized_tensors(&ptr, p->n_layers, p->dim * (p->n_heads * head_size));
208
+ w->wk = init_quantized_tensors(&ptr, p->n_layers, p->dim * (p->n_kv_heads * head_size));
209
+ w->wv = init_quantized_tensors(&ptr, p->n_layers, p->dim * (p->n_kv_heads * head_size));
210
+ w->wo = init_quantized_tensors(&ptr, p->n_layers, (p->n_heads * head_size) * p->dim);
211
+
212
+ w->w1 = init_quantized_tensors(&ptr, p->n_layers, p->dim * p->hidden_dim);
213
+ w->w2 = init_quantized_tensors(&ptr, p->n_layers, p->hidden_dim * p->dim);
214
+ w->w3 = init_quantized_tensors(&ptr, p->n_layers, p->dim * p->hidden_dim);
215
+
216
+ w->wcls = shared_classifier ? w->q_tokens : init_quantized_tensors(&ptr, 1, p->dim * p->vocab_size);
217
+ }
218
+
219
+ void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights,
220
+ int* fd, float** data, ssize_t* file_size) {
221
+ FILE *file = fopen(checkpoint, "rb");
222
+ if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); }
223
+ // read in magic number (uint32), has to be 0x616b3432, i.e. "ak42" in ASCII
224
+ uint32_t magic_number;
225
+ if (fread(&magic_number, sizeof(uint32_t), 1, file) != 1) { exit(EXIT_FAILURE); }
226
+ if (magic_number != 0x616b3432) { fprintf(stderr, "Bad magic number\n"); exit(EXIT_FAILURE); }
227
+ // read in the version number (uint32), has to be 2
228
+ int version;
229
+ if (fread(&version, sizeof(int), 1, file) != 1) { exit(EXIT_FAILURE); }
230
+ if (version != 2) { fprintf(stderr, "Bad version %d, need version 2\n", version); exit(EXIT_FAILURE); }
231
+ int header_size = 256; // the header size for version 2 in bytes
232
+ // read in the Config
233
+ if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); }
234
+ // read in flags
235
+ uint8_t shared_classifier; // a byte to indicate if the classifier is shared
236
+ if (fread(&shared_classifier, sizeof(uint8_t), 1, file) != 1) { exit(EXIT_FAILURE); }
237
+ int group_size; // the group size used in quantization
238
+ if (fread(&group_size, sizeof(int), 1, file) != 1) { exit(EXIT_FAILURE); }
239
+ GS = group_size; // set as global, as it will be used in many places
240
+ // figure out the file size
241
+ fseek(file, 0, SEEK_END); // move file pointer to end of file
242
+ *file_size = ftell(file); // get the file size, in bytes
243
+ fclose(file);
244
+ // memory map the Transformer weights into the data pointer
245
+ *fd = open(checkpoint, O_RDONLY); // open in read only mode
246
+ if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); }
247
+ *data = mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0);
248
+ if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); }
249
+ void* weights_ptr = ((char*)*data) + header_size; // skip header bytes. char is 1 byte
250
+ memory_map_weights(weights, config, weights_ptr, shared_classifier);
251
+ }
252
+
253
+ void build_transformer(Transformer *t, char* checkpoint_path) {
254
+ // read in the Config and the Weights from the checkpoint
255
+ read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size);
256
+ // allocate the RunState buffers
257
+ malloc_run_state(&t->state, &t->config);
258
+ }
259
+
260
+ void free_transformer(Transformer* t) {
261
+ // free QuantizedTensors
262
+ free(t->weights.q_tokens);
263
+ free(t->weights.token_embedding_table);
264
+ free(t->weights.wq);
265
+ free(t->weights.wk);
266
+ free(t->weights.wv);
267
+ free(t->weights.wo);
268
+ free(t->weights.w1);
269
+ free(t->weights.w2);
270
+ free(t->weights.w3);
271
+ if(t->weights.wcls != t->weights.q_tokens) { free(t->weights.wcls); }
272
+ // close the memory mapping
273
+ if (t->data != MAP_FAILED) { munmap(t->data, t->file_size); }
274
+ if (t->fd != -1) { close(t->fd); }
275
+ // free the RunState buffers
276
+ free_run_state(&t->state);
277
+ }
278
+
279
+ // ----------------------------------------------------------------------------
280
+ // neural net blocks; the dynamics of the Transformer
281
+
282
+ void rmsnorm(float* o, float* x, float* weight, int size) {
283
+ // calculate sum of squares
284
+ float ss = 0.0f;
285
+ for (int j = 0; j < size; j++) {
286
+ ss += x[j] * x[j];
287
+ }
288
+ ss /= size;
289
+ ss += 1e-5f;
290
+ ss = 1.0f / sqrtf(ss);
291
+ // normalize and scale
292
+ for (int j = 0; j < size; j++) {
293
+ o[j] = weight[j] * (ss * x[j]);
294
+ }
295
+ }
296
+
297
+ void softmax(float* x, int size) {
298
+ // find max value (for numerical stability)
299
+ float max_val = x[0];
300
+ for (int i = 1; i < size; i++) {
301
+ if (x[i] > max_val) {
302
+ max_val = x[i];
303
+ }
304
+ }
305
+ // exp and sum
306
+ float sum = 0.0f;
307
+ for (int i = 0; i < size; i++) {
308
+ x[i] = expf(x[i] - max_val);
309
+ sum += x[i];
310
+ }
311
+ // normalize
312
+ for (int i = 0; i < size; i++) {
313
+ x[i] /= sum;
314
+ }
315
+ }
316
+
317
+ void matmul(float* xout, QuantizedTensor *x, QuantizedTensor *w, int n, int d) {
318
+ // W (d,n) @ x (n,) -> xout (d,)
319
+ // by far the most amount of time is spent inside this little function
320
+ // inputs to this function are both quantized
321
+
322
+ int i;
323
+ #pragma omp parallel for private(i)
324
+ for (i = 0; i < d; i++) {
325
+
326
+ float val = 0.0f;
327
+ int32_t ival = 0;
328
+ int in = i * n;
329
+
330
+ // do the matmul in groups of GS
331
+ int j;
332
+ for (j = 0; j <= n - GS; j += GS) {
333
+ for (int k = 0; k < GS; k++) {
334
+ ival += ((int32_t) x->q[j + k]) * ((int32_t) w->q[in + j + k]);
335
+ }
336
+ val += ((float) ival) * w->s[(in + j) / GS] * x->s[j / GS];
337
+ ival = 0;
338
+ }
339
+
340
+ xout[i] = val;
341
+ }
342
+ }
343
+
344
+ float* forward(Transformer* transformer, int token, int pos) {
345
+
346
+ // a few convenience variables
347
+ Config* p = &transformer->config;
348
+ TransformerWeights* w = &transformer->weights;
349
+ RunState* s = &transformer->state;
350
+ float *x = s->x;
351
+ int dim = p->dim;
352
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
353
+ int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery
354
+ int hidden_dim = p->hidden_dim;
355
+ int head_size = dim / p->n_heads;
356
+
357
+ // copy the token embedding into x
358
+ memcpy(x, w->token_embedding_table + token*dim, dim * sizeof(float));
359
+
360
+ // forward all the layers
361
+ for(int l = 0; l < p->n_layers; l++) {
362
+
363
+ // attention rmsnorm
364
+ rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim);
365
+
366
+ // qkv matmuls for this position
367
+ quantize(&s->xq, s->xb, dim);
368
+ matmul(s->q, &s->xq, w->wq + l, dim, dim);
369
+ matmul(s->k, &s->xq, w->wk + l, dim, kv_dim);
370
+ matmul(s->v, &s->xq, w->wv + l, dim, kv_dim);
371
+
372
+ // RoPE relative positional encoding: complex-valued rotate q and k in each head
373
+ for (int i = 0; i < dim; i+=2) {
374
+ int head_dim = i % head_size;
375
+ float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size);
376
+ float val = pos * freq;
377
+ float fcr = cosf(val);
378
+ float fci = sinf(val);
379
+ int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
380
+ for (int v = 0; v < rotn; v++) {
381
+ float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key)
382
+ float v0 = vec[i];
383
+ float v1 = vec[i+1];
384
+ vec[i] = v0 * fcr - v1 * fci;
385
+ vec[i+1] = v0 * fci + v1 * fcr;
386
+ }
387
+ }
388
+
389
+ // save key,value at this time step (pos) to our kv cache
390
+ int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience
391
+ float* key_cache_row = s->key_cache + loff + pos * kv_dim;
392
+ float* value_cache_row = s->value_cache + loff + pos * kv_dim;
393
+ memcpy(key_cache_row, s->k, kv_dim * sizeof(*key_cache_row));
394
+ memcpy(value_cache_row, s->v, kv_dim * sizeof(*value_cache_row));
395
+
396
+ // multihead attention. iterate over all heads
397
+ int h;
398
+ #pragma omp parallel for private(h)
399
+ for (h = 0; h < p->n_heads; h++) {
400
+ // get the query vector for this head
401
+ float* q = s->q + h * head_size;
402
+ // attention scores for this head
403
+ float* att = s->att + h * p->seq_len;
404
+ // iterate over all timesteps, including the current one
405
+ for (int t = 0; t <= pos; t++) {
406
+ // get the key vector for this head and at this timestep
407
+ float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
408
+ // calculate the attention score as the dot product of q and k
409
+ float score = 0.0f;
410
+ for (int i = 0; i < head_size; i++) {
411
+ score += q[i] * k[i];
412
+ }
413
+ score /= sqrtf(head_size);
414
+ // save the score to the attention buffer
415
+ att[t] = score;
416
+ }
417
+
418
+ // softmax the scores to get attention weights, from 0..pos inclusively
419
+ softmax(att, pos + 1);
420
+
421
+ // weighted sum of the values, store back into xb
422
+ float* xb = s->xb + h * head_size;
423
+ memset(xb, 0, head_size * sizeof(float));
424
+ for (int t = 0; t <= pos; t++) {
425
+ // get the value vector for this head and at this timestep
426
+ float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
427
+ // get the attention weight for this timestep
428
+ float a = att[t];
429
+ // accumulate the weighted value into xb
430
+ for (int i = 0; i < head_size; i++) {
431
+ xb[i] += a * v[i];
432
+ }
433
+ }
434
+ }
435
+
436
+ // final matmul to get the output of the attention
437
+ quantize(&s->xq, s->xb, dim);
438
+ matmul(s->xb2, &s->xq, w->wo + l, dim, dim);
439
+
440
+ // residual connection back into x
441
+ for (int i = 0; i < dim; i++) {
442
+ x[i] += s->xb2[i];
443
+ }
444
+
445
+ // ffn rmsnorm
446
+ rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim);
447
+
448
+ // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
449
+ // first calculate self.w1(x) and self.w3(x)
450
+ quantize(&s->xq, s->xb, dim);
451
+ matmul(s->hb, &s->xq, w->w1 + l, dim, hidden_dim);
452
+ matmul(s->hb2, &s->xq, w->w3 + l, dim, hidden_dim);
453
+
454
+ // SwiGLU non-linearity
455
+ for (int i = 0; i < hidden_dim; i++) {
456
+ float val = s->hb[i];
457
+ // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid
458
+ val *= (1.0f / (1.0f + expf(-val)));
459
+ // elementwise multiply with w3(x)
460
+ val *= s->hb2[i];
461
+ s->hb[i] = val;
462
+ }
463
+
464
+ // final matmul to get the output of the ffn
465
+ quantize(&s->hq, s->hb, hidden_dim);
466
+ matmul(s->xb, &s->hq, w->w2 + l, hidden_dim, dim);
467
+
468
+ // residual connection
469
+ for (int i = 0; i < dim; i++) {
470
+ x[i] += s->xb[i];
471
+ }
472
+ }
473
+
474
+ // final rmsnorm
475
+ rmsnorm(x, x, w->rms_final_weight, dim);
476
+
477
+ // classifier into logits
478
+ quantize(&s->xq, x, dim);
479
+ matmul(s->logits, &s->xq, w->wcls, dim, p->vocab_size);
480
+ return s->logits;
481
+ }
482
+
483
+ // ----------------------------------------------------------------------------
484
+ // The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens
485
+
486
+ typedef struct {
487
+ char *str;
488
+ int id;
489
+ } TokenIndex;
490
+
491
+ typedef struct {
492
+ char** vocab;
493
+ float* vocab_scores;
494
+ TokenIndex *sorted_vocab;
495
+ int vocab_size;
496
+ unsigned int max_token_length;
497
+ unsigned char byte_pieces[512]; // stores all single-byte strings
498
+ } Tokenizer;
499
+
500
+ int compare_tokens(const void *a, const void *b) {
501
+ return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str);
502
+ }
503
+
504
+ void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size) {
505
+ // i should have written the vocab_size into the tokenizer file... sigh
506
+ t->vocab_size = vocab_size;
507
+ // malloc space to hold the scores and the strings
508
+ t->vocab = (char**)malloc(vocab_size * sizeof(char*));
509
+ t->vocab_scores = (float*)malloc(vocab_size * sizeof(float));
510
+ t->sorted_vocab = NULL; // initialized lazily
511
+ for (int i = 0; i < 256; i++) {
512
+ t->byte_pieces[i * 2] = (unsigned char)i;
513
+ t->byte_pieces[i * 2 + 1] = '\0';
514
+ }
515
+ // read in the file
516
+ FILE *file = fopen(tokenizer_path, "rb");
517
+ if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); }
518
+ if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
519
+ int len;
520
+ for (int i = 0; i < vocab_size; i++) {
521
+ if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);}
522
+ if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
523
+ t->vocab[i] = (char *)malloc(len + 1);
524
+ if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
525
+ t->vocab[i][len] = '\0'; // add the string terminating token
526
+ }
527
+ fclose(file);
528
+ }
529
+
530
+ void free_tokenizer(Tokenizer* t) {
531
+ for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); }
532
+ free(t->vocab);
533
+ free(t->vocab_scores);
534
+ free(t->sorted_vocab);
535
+ }
536
+
537
+ char* decode(Tokenizer* t, int prev_token, int token) {
538
+ char *piece = t->vocab[token];
539
+ // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89)
540
+ if (prev_token == 1 && piece[0] == ' ') { piece++; }
541
+ // careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
542
+ // parse this and convert and return the actual byte
543
+ unsigned char byte_val;
544
+ if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) {
545
+ piece = (char*)t->byte_pieces + byte_val * 2;
546
+ }
547
+ return piece;
548
+ }
549
+
550
+ void safe_printf(char *piece) {
551
+ // piece might be a raw byte token, and we only want to print printable chars or whitespace
552
+ // because some of the other bytes can be various control codes, backspace, etc.
553
+ if (piece == NULL) { return; }
554
+ if (piece[0] == '\0') { return; }
555
+ if (piece[1] == '\0') {
556
+ unsigned char byte_val = piece[0];
557
+ if (!(isprint(byte_val) || isspace(byte_val))) {
558
+ return; // bad byte, don't print it
559
+ }
560
+ }
561
+ printf("%s", piece);
562
+ }
563
+
564
+ int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) {
565
+ // efficiently find the perfect match for str in vocab, return its index or -1 if not found
566
+ TokenIndex tok = { .str = str }; // acts as the key to search for
567
+ TokenIndex *res = bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens);
568
+ return res != NULL ? res->id : -1;
569
+ }
570
+
571
+ void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) {
572
+ // encode the string text (input) into an upper-bound preallocated tokens[] array
573
+ // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2)
574
+ if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); }
575
+
576
+ if (t->sorted_vocab == NULL) {
577
+ // lazily malloc and sort the vocabulary
578
+ t->sorted_vocab = malloc(t->vocab_size * sizeof(TokenIndex));
579
+ for (int i = 0; i < t->vocab_size; i++) {
580
+ t->sorted_vocab[i].str = t->vocab[i];
581
+ t->sorted_vocab[i].id = i;
582
+ }
583
+ qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens);
584
+ }
585
+
586
+ // create a temporary buffer that will store merge candidates of always two consecutive tokens
587
+ // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1)
588
+ char* str_buffer = malloc((t->max_token_length*2 +1 +2) * sizeof(char));
589
+ size_t str_len = 0;
590
+
591
+ // start at 0 tokens
592
+ *n_tokens = 0;
593
+
594
+ // add optional BOS (=1) token, if desired
595
+ if (bos) tokens[(*n_tokens)++] = 1;
596
+
597
+ // add_dummy_prefix is true by default
598
+ // so prepend a dummy prefix token to the input string, but only if text != ""
599
+ // TODO: pretty sure this isn't correct in the general case but I don't have the
600
+ // energy to read more of the sentencepiece code to figure out what it's doing
601
+ if (text[0] != '\0') {
602
+ int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);
603
+ tokens[(*n_tokens)++] = dummy_prefix;
604
+ }
605
+
606
+ // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:
607
+ // Code point ↔ UTF-8 conversion
608
+ // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4
609
+ // U+0000 U+007F 0xxxxxxx
610
+ // U+0080 U+07FF 110xxxxx 10xxxxxx
611
+ // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx
612
+ // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
613
+
614
+ // process the raw (UTF-8) byte sequence of the input string
615
+ for (char *c = text; *c != '\0'; c++) {
616
+
617
+ // reset buffer if the current byte is ASCII or a leading byte
618
+ // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest
619
+ // 0x80 is 10000000
620
+ // in UTF-8, all continuation bytes start with "10" in first two bits
621
+ // so in English this is: "if this byte is not a continuation byte"
622
+ if ((*c & 0xC0) != 0x80) {
623
+ // this byte must be either a leading byte (11...) or an ASCII char (0x...)
624
+ // => reset our location, as we're starting a new UTF-8 codepoint
625
+ str_len = 0;
626
+ }
627
+
628
+ // append the current byte to the buffer
629
+ str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line
630
+ str_buffer[str_len] = '\0';
631
+
632
+ // while the next character is a continuation byte, continue appending
633
+ // but if there are too many of them, just stop to avoid overruning str_buffer size.
634
+ if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {
635
+ continue;
636
+ }
637
+
638
+ // ok c+1 is not a continuation byte, so we've read in a full codepoint
639
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
640
+
641
+ if (id != -1) {
642
+ // we found this codepoint in vocab, add it as a token
643
+ tokens[(*n_tokens)++] = id;
644
+ } else {
645
+ // byte_fallback encoding: just encode each byte as a token
646
+ // +3 is here because the first 3 vocab elements are <unk>, <s>, </s>
647
+ // so the individual bytes only start at index 3
648
+ for (int i=0; i < str_len; i++) {
649
+ tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;
650
+ }
651
+ }
652
+ str_len = 0; // protect against a sequence of stray UTF8 continuation bytes
653
+ }
654
+
655
+ // merge the best consecutive pair each iteration, according the scores in vocab_scores
656
+ while (1) {
657
+ float best_score = -1e10;
658
+ int best_id = -1;
659
+ int best_idx = -1;
660
+
661
+ for (int i=0; i < (*n_tokens-1); i++) {
662
+ // check if we can merge the pair (tokens[i], tokens[i+1])
663
+ sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]);
664
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
665
+ if (id != -1 && t->vocab_scores[id] > best_score) {
666
+ // this merge pair exists in vocab! record its score and position
667
+ best_score = t->vocab_scores[id];
668
+ best_id = id;
669
+ best_idx = i;
670
+ }
671
+ }
672
+
673
+ if (best_idx == -1) {
674
+ break; // we couldn't find any more pairs to merge, so we're done
675
+ }
676
+
677
+ // merge the consecutive pair (best_idx, best_idx+1) into new token best_id
678
+ tokens[best_idx] = best_id;
679
+ // delete token at position best_idx+1, shift the entire sequence back 1
680
+ for (int i = best_idx+1; i < (*n_tokens-1); i++) {
681
+ tokens[i] = tokens[i+1];
682
+ }
683
+ (*n_tokens)--; // token length decreased
684
+ }
685
+
686
+ // add optional EOS (=2) token, if desired
687
+ if (eos) tokens[(*n_tokens)++] = 2;
688
+
689
+ free(str_buffer);
690
+ }
691
+
692
+ // ----------------------------------------------------------------------------
693
+ // The Sampler, which takes logits and returns a sampled token
694
+ // sampling can be done in a few ways: greedy argmax, sampling, top-p sampling
695
+
696
+ typedef struct {
697
+ float prob;
698
+ int index;
699
+ } ProbIndex; // struct used when sorting probabilities during top-p sampling
700
+
701
+ typedef struct {
702
+ int vocab_size;
703
+ ProbIndex* probindex; // buffer used in top-p sampling
704
+ float temperature;
705
+ float topp;
706
+ unsigned long long rng_state;
707
+ } Sampler;
708
+
709
+ int sample_argmax(float* probabilities, int n) {
710
+ // return the index that has the highest probability
711
+ int max_i = 0;
712
+ float max_p = probabilities[0];
713
+ for (int i = 1; i < n; i++) {
714
+ if (probabilities[i] > max_p) {
715
+ max_i = i;
716
+ max_p = probabilities[i];
717
+ }
718
+ }
719
+ return max_i;
720
+ }
721
+
722
+ int sample_mult(float* probabilities, int n, float coin) {
723
+ // sample index from probabilities (they must sum to 1!)
724
+ // coin is a random number in [0, 1), usually from random_f32()
725
+ float cdf = 0.0f;
726
+ for (int i = 0; i < n; i++) {
727
+ cdf += probabilities[i];
728
+ if (coin < cdf) {
729
+ return i;
730
+ }
731
+ }
732
+ return n - 1; // in case of rounding errors
733
+ }
734
+
735
+ int compare(const void* a, const void* b) {
736
+ ProbIndex* a_ = (ProbIndex*) a;
737
+ ProbIndex* b_ = (ProbIndex*) b;
738
+ if (a_->prob > b_->prob) return -1;
739
+ if (a_->prob < b_->prob) return 1;
740
+ return 0;
741
+ }
742
+
743
+ int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) {
744
+ // top-p sampling (or "nucleus sampling") samples from the smallest set of
745
+ // tokens that exceed probability topp. This way we never sample tokens that
746
+ // have very low probabilities and are less likely to go "off the rails".
747
+ // coin is a random number in [0, 1), usually from random_f32()
748
+
749
+ int n0 = 0;
750
+ // quicksort indices in descending order of probabilities
751
+ // values smaller than (1 - topp) / (n - 1) cannot be part of the result
752
+ // so for efficiency we crop these out as candidates before sorting
753
+ const float cutoff = (1.0f - topp) / (n - 1);
754
+ for (int i = 0; i < n; i++) {
755
+ if (probabilities[i] >= cutoff) {
756
+ probindex[n0].index = i;
757
+ probindex[n0].prob = probabilities[i];
758
+ n0++;
759
+ }
760
+ }
761
+ qsort(probindex, n0, sizeof(ProbIndex), compare);
762
+
763
+ // truncate the list where cumulative probability exceeds topp
764
+ float cumulative_prob = 0.0f;
765
+ int last_idx = n0 - 1; // in case of rounding errors consider all elements
766
+ for (int i = 0; i < n0; i++) {
767
+ cumulative_prob += probindex[i].prob;
768
+ if (cumulative_prob > topp) {
769
+ last_idx = i;
770
+ break; // we've exceeded topp by including last_idx
771
+ }
772
+ }
773
+
774
+ // sample from the truncated list
775
+ float r = coin * cumulative_prob;
776
+ float cdf = 0.0f;
777
+ for (int i = 0; i <= last_idx; i++) {
778
+ cdf += probindex[i].prob;
779
+ if (r < cdf) {
780
+ return probindex[i].index;
781
+ }
782
+ }
783
+ return probindex[last_idx].index; // in case of rounding errors
784
+ }
785
+
786
+ void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) {
787
+ sampler->vocab_size = vocab_size;
788
+ sampler->temperature = temperature;
789
+ sampler->topp = topp;
790
+ sampler->rng_state = rng_seed;
791
+ // buffer only used with nucleus sampling; may not need but it's ~small
792
+ sampler->probindex = malloc(sampler->vocab_size * sizeof(ProbIndex));
793
+ }
794
+
795
+ void free_sampler(Sampler* sampler) {
796
+ free(sampler->probindex);
797
+ }
798
+
799
+ unsigned int random_u32(unsigned long long *state) {
800
+ // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
801
+ *state ^= *state >> 12;
802
+ *state ^= *state << 25;
803
+ *state ^= *state >> 27;
804
+ return (*state * 0x2545F4914F6CDD1Dull) >> 32;
805
+ }
806
+ float random_f32(unsigned long long *state) { // random float32 in [0,1)
807
+ return (random_u32(state) >> 8) / 16777216.0f;
808
+ }
809
+
810
+ int sample(Sampler* sampler, float* logits) {
811
+ // sample the token given the logits and some hyperparameters
812
+ int next;
813
+ if (sampler->temperature == 0.0f) {
814
+ // greedy argmax sampling: take the token with the highest probability
815
+ next = sample_argmax(logits, sampler->vocab_size);
816
+ } else {
817
+ // apply the temperature to the logits
818
+ for (int q=0; q<sampler->vocab_size; q++) { logits[q] /= sampler->temperature; }
819
+ // apply softmax to the logits to get the probabilities for next token
820
+ softmax(logits, sampler->vocab_size);
821
+ // flip a (float) coin (this is our source of entropy for sampling)
822
+ float coin = random_f32(&sampler->rng_state);
823
+ // we sample from this distribution to get the next token
824
+ if (sampler->topp <= 0 || sampler->topp >= 1) {
825
+ // simply sample from the predicted probability distribution
826
+ next = sample_mult(logits, sampler->vocab_size, coin);
827
+ } else {
828
+ // top-p (nucleus) sampling, clamping the least likely tokens to zero
829
+ next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin);
830
+ }
831
+ }
832
+ return next;
833
+ }
834
+
835
+ // ----------------------------------------------------------------------------
836
+ // utilities: time
837
+
838
+ long time_in_ms() {
839
+ // return time in milliseconds, for benchmarking the model speed
840
+ struct timespec time;
841
+ clock_gettime(CLOCK_REALTIME, &time);
842
+ return time.tv_sec * 1000 + time.tv_nsec / 1000000;
843
+ }
844
+
845
+ // ----------------------------------------------------------------------------
846
+ // generation loop
847
+
848
+ void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, char *prompt, int steps) {
849
+ char *empty_prompt = "";
850
+ if (prompt == NULL) { prompt = empty_prompt; }
851
+
852
+ // encode the (string) prompt into tokens sequence
853
+ int num_prompt_tokens = 0;
854
+ int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS
855
+ encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
856
+ if (num_prompt_tokens < 1) {
857
+ fprintf(stderr, "something is wrong, expected at least 1 prompt token\n");
858
+ exit(EXIT_FAILURE);
859
+ }
860
+
861
+ // start the main loop
862
+ long start = 0; // used to time our code, only initialized after first iteration
863
+ int next; // will store the next token in the sequence
864
+ int token = prompt_tokens[0]; // kick off with the first token in the prompt
865
+ int pos = 0; // position in the sequence
866
+ while (pos < steps) {
867
+
868
+ // forward the transformer to get logits for the next token
869
+ float* logits = forward(transformer, token, pos);
870
+
871
+ // advance the state state machine
872
+ if (pos < num_prompt_tokens - 1) {
873
+ // if we are still processing the input prompt, force the next prompt token
874
+ next = prompt_tokens[pos + 1];
875
+ } else {
876
+ // otherwise sample the next token from the logits
877
+ next = sample(sampler, logits);
878
+ }
879
+ pos++;
880
+
881
+ // data-dependent terminating condition: the BOS (=1) token delimits sequences
882
+ if (next == 1) { break; }
883
+
884
+ // print the token as string, decode it with the Tokenizer object
885
+ char* piece = decode(tokenizer, token, next);
886
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
887
+ fflush(stdout);
888
+ token = next;
889
+
890
+ // init the timer here because the first iteration can be slower
891
+ if (start == 0) { start = time_in_ms(); }
892
+ }
893
+ printf("\n");
894
+
895
+ // report achieved tok/s (pos-1 because the timer starts after first iteration)
896
+ if (pos > 1) {
897
+ long end = time_in_ms();
898
+ fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000);
899
+ }
900
+
901
+ free(prompt_tokens);
902
+ }
903
+
904
+ void read_stdin(const char* guide, char* buffer, size_t bufsize) {
905
+ // read a line from stdin, up to but not including \n
906
+ printf("%s", guide);
907
+ if (fgets(buffer, bufsize, stdin) != NULL) {
908
+ size_t len = strlen(buffer);
909
+ if (len > 0 && buffer[len - 1] == '\n') {
910
+ buffer[len - 1] = '\0'; // strip newline
911
+ }
912
+ }
913
+ }
914
+
915
+ // ----------------------------------------------------------------------------
916
+ // chat loop
917
+ // I manually inspected the tokens for a few chat conversations compared to
918
+ // python reference and that seemed ok, but this was not thoroughly tested and
919
+ // is not safely implemented, it's more a proof of concept atm.
920
+
921
+ void chat(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler,
922
+ char *cli_user_prompt, char *cli_system_prompt, int steps) {
923
+
924
+ // buffers for reading the system prompt and user prompt from stdin
925
+ // you'll notice they are soomewhat haphazardly and unsafely set atm
926
+ char system_prompt[512];
927
+ char user_prompt[512];
928
+ char rendered_prompt[1152];
929
+ int num_prompt_tokens = 0;
930
+ int* prompt_tokens = (int*)malloc(1152 * sizeof(int));
931
+ int user_idx;
932
+
933
+ // start the main loop
934
+ int8_t user_turn = 1; // user starts
935
+ int next; // will store the next token in the sequence
936
+ int token; // stores the current token to feed into the transformer
937
+ int prev_token;
938
+ int pos = 0; // position in the sequence
939
+ while (pos < steps) {
940
+
941
+ // when it is the user's turn to contribute tokens to the dialog...
942
+ if (user_turn) {
943
+ // get the (optional) system prompt at position 0
944
+ if (pos == 0) {
945
+ // at position 0, the user can also contribute a system prompt
946
+ if (cli_system_prompt == NULL) {
947
+ // system prompt was not passed in, attempt to get it from stdin
948
+ read_stdin("Enter system prompt (optional): ", system_prompt, sizeof(system_prompt));
949
+ } else {
950
+ // system prompt was passed in, use it
951
+ strcpy(system_prompt, cli_system_prompt);
952
+ }
953
+ }
954
+ // get the user prompt
955
+ if (pos == 0 && cli_user_prompt != NULL) {
956
+ // user prompt for position 0 was passed in, use it
957
+ strcpy(user_prompt, cli_user_prompt);
958
+ } else {
959
+ // otherwise get user prompt from stdin
960
+ read_stdin("User: ", user_prompt, sizeof(user_prompt));
961
+ }
962
+ // render user/system prompts into the Llama 2 Chat schema
963
+ if (pos == 0 && system_prompt[0] != '\0') {
964
+ char system_template[] = "[INST] <<SYS>>\n%s\n<</SYS>>\n\n%s [/INST]";
965
+ sprintf(rendered_prompt, system_template, system_prompt, user_prompt);
966
+ } else {
967
+ char user_template[] = "[INST] %s [/INST]";
968
+ sprintf(rendered_prompt, user_template, user_prompt);
969
+ }
970
+ // encode the rendered prompt into tokens
971
+ encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
972
+ user_idx = 0; // reset the user index
973
+ user_turn = 0;
974
+ printf("Assistant: ");
975
+ }
976
+
977
+ // determine the token to pass into the transformer next
978
+ if (user_idx < num_prompt_tokens) {
979
+ // if we are still processing the input prompt, force the next prompt token
980
+ token = prompt_tokens[user_idx++];
981
+ } else {
982
+ // otherwise use the next token sampled from previous turn
983
+ token = next;
984
+ }
985
+ // EOS (=2) token ends the Assistant turn
986
+ if (token == 2) { user_turn = 1; }
987
+
988
+ // forward the transformer to get logits for the next token
989
+ float* logits = forward(transformer, token, pos);
990
+ next = sample(sampler, logits);
991
+ pos++;
992
+
993
+ if (user_idx >= num_prompt_tokens && next != 2) {
994
+ // the Assistant is responding, so print its output
995
+ char* piece = decode(tokenizer, token, next);
996
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
997
+ fflush(stdout);
998
+ }
999
+ if (next == 2) { printf("\n"); }
1000
+ }
1001
+ printf("\n");
1002
+ free(prompt_tokens);
1003
+ }
1004
+
1005
+
1006
+ // ----------------------------------------------------------------------------
1007
+ // CLI, include only if not testing
1008
+ #ifndef TESTING
1009
+
1010
+ void error_usage() {
1011
+ fprintf(stderr, "Usage: run <checkpoint> [options]\n");
1012
+ fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n");
1013
+ fprintf(stderr, "Options:\n");
1014
+ fprintf(stderr, " -t <float> temperature in [0,inf], default 1.0\n");
1015
+ fprintf(stderr, " -p <float> p value in top-p (nucleus) sampling in [0,1] default 0.9\n");
1016
+ fprintf(stderr, " -s <int> random seed, default time(NULL)\n");
1017
+ fprintf(stderr, " -n <int> number of steps to run for, default 256. 0 = max_seq_len\n");
1018
+ fprintf(stderr, " -i <string> input prompt\n");
1019
+ fprintf(stderr, " -z <string> optional path to custom tokenizer\n");
1020
+ fprintf(stderr, " -m <string> mode: generate|chat, default: generate\n");
1021
+ fprintf(stderr, " -y <string> (optional) system prompt in chat mode\n");
1022
+ exit(EXIT_FAILURE);
1023
+ }
1024
+
1025
+ int main(int argc, char *argv[]) {
1026
+
1027
+ // default parameters
1028
+ char *checkpoint_path = NULL; // e.g. out/model.bin
1029
+ char *tokenizer_path = "tokenizer.bin";
1030
+ float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher
1031
+ float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower
1032
+ int steps = 256; // number of steps to run for
1033
+ char *prompt = NULL; // prompt string
1034
+ unsigned long long rng_seed = 0; // seed rng with time by default
1035
+ char *mode = "generate"; // generate|chat
1036
+ char *system_prompt = NULL; // the (optional) system prompt to use in chat mode
1037
+
1038
+ // poor man's C argparse so we can override the defaults above from the command line
1039
+ if (argc >= 2) { checkpoint_path = argv[1]; } else { error_usage(); }
1040
+ for (int i = 2; i < argc; i+=2) {
1041
+ // do some basic validation
1042
+ if (i + 1 >= argc) { error_usage(); } // must have arg after flag
1043
+ if (argv[i][0] != '-') { error_usage(); } // must start with dash
1044
+ if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)
1045
+ // read in the args
1046
+ if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); }
1047
+ else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); }
1048
+ else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); }
1049
+ else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); }
1050
+ else if (argv[i][1] == 'i') { prompt = argv[i + 1]; }
1051
+ else if (argv[i][1] == 'z') { tokenizer_path = argv[i + 1]; }
1052
+ else if (argv[i][1] == 'm') { mode = argv[i + 1]; }
1053
+ else if (argv[i][1] == 'y') { system_prompt = argv[i + 1]; }
1054
+ else { error_usage(); }
1055
+ }
1056
+
1057
+ // parameter validation/overrides
1058
+ if (rng_seed <= 0) rng_seed = (unsigned int)time(NULL);
1059
+ if (temperature < 0.0) temperature = 0.0;
1060
+ if (topp < 0.0 || 1.0 < topp) topp = 0.9;
1061
+ if (steps < 0) steps = 0;
1062
+
1063
+ // build the Transformer via the model .bin file
1064
+ Transformer transformer;
1065
+ build_transformer(&transformer, checkpoint_path);
1066
+ if (steps == 0 || steps > transformer.config.seq_len) steps = transformer.config.seq_len; // override to ~max length
1067
+
1068
+ // build the Tokenizer via the tokenizer .bin file
1069
+ Tokenizer tokenizer;
1070
+ build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size);
1071
+
1072
+ // build the Sampler
1073
+ Sampler sampler;
1074
+ build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, rng_seed);
1075
+
1076
+ // run!
1077
+ if (strcmp(mode, "generate") == 0) {
1078
+ generate(&transformer, &tokenizer, &sampler, prompt, steps);
1079
+ } else if (strcmp(mode, "chat") == 0) {
1080
+ chat(&transformer, &tokenizer, &sampler, prompt, system_prompt, steps);
1081
+ } else {
1082
+ fprintf(stderr, "unknown mode: %s\n", mode);
1083
+ error_usage();
1084
+ }
1085
+
1086
+ // memory and file handles cleanup
1087
+ free_sampler(&sampler);
1088
+ free_tokenizer(&tokenizer);
1089
+ free_transformer(&transformer);
1090
+ return 0;
1091
+ }
1092
+ #endif
Version 2/42M_base/test.c ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TESTING
2
+ #include "run.c"
3
+
4
+ void assert_eq(int a, int b) {
5
+ if (a != b) {
6
+ printf("Assertion failed: %d != %d\n", a, b);
7
+ exit(EXIT_FAILURE);
8
+ }
9
+ }
10
+
11
+ void test_prompt_encoding(Tokenizer* tokenizer, char* prompt, int* expected_tokens, int num_expected_tokens) {
12
+ // encode
13
+ int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int));
14
+ int num_prompt_tokens = 0; // the total number of prompt tokens
15
+ encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
16
+
17
+ #if VERBOSITY == 1
18
+ // print maybe
19
+ printf("expected tokens:\n");
20
+ for (int i = 0; i < num_expected_tokens; i++) printf("%d ", expected_tokens[i]);
21
+ printf("\n");
22
+ printf("actual tokens:\n");
23
+ for (int i = 0; i < num_prompt_tokens; i++) printf("%d ", prompt_tokens[i]);
24
+ printf("\n");
25
+ #endif
26
+
27
+ // verify
28
+ assert_eq(num_prompt_tokens, num_expected_tokens);
29
+ for (int i = 0; i < num_prompt_tokens; i++) {
30
+ assert_eq(prompt_tokens[i], expected_tokens[i]);
31
+ }
32
+
33
+ #if VERBOSITY == 1
34
+ printf("OK\n");
35
+ printf("---\n");
36
+ #endif
37
+ free(prompt_tokens);
38
+ }
39
+
40
+ void test_prompt_encodings() {
41
+ // let's verify that the Tokenizer works as expected
42
+
43
+ char *tokenizer_path = "tokenizer.bin";
44
+ int vocab_size = 32000;
45
+ Tokenizer tokenizer;
46
+ build_tokenizer(&tokenizer, tokenizer_path, vocab_size);
47
+
48
+ // test 0 (test the empty string) (I added this as a simple case)
49
+ char *prompt0 = "";
50
+ int expected_tokens0[] = {1};
51
+ test_prompt_encoding(&tokenizer, prompt0, expected_tokens0, sizeof(expected_tokens0) / sizeof(int));
52
+
53
+ // the tests below are taken from the Meta Llama 2 repo example code
54
+ // https://github.com/facebookresearch/llama/blob/main/example_text_completion.py
55
+ // and the expected tokens come from me breaking in the debugger in Python
56
+
57
+ // test 1
58
+ char *prompt = "I believe the meaning of life is";
59
+ int expected_tokens[] = {1, 306, 4658, 278, 6593, 310, 2834, 338};
60
+ test_prompt_encoding(&tokenizer, prompt, expected_tokens, sizeof(expected_tokens) / sizeof(int));
61
+
62
+ // test 2
63
+ char* prompt2 = "Simply put, the theory of relativity states that ";
64
+ int expected_tokens2[] = {1, 3439, 17632, 1925, 29892, 278, 6368, 310, 14215, 537, 5922, 393, 29871};
65
+ test_prompt_encoding(&tokenizer, prompt2, expected_tokens2, sizeof(expected_tokens2) / sizeof(int));
66
+
67
+ // test 3
68
+ char* prompt3 = "A brief message congratulating the team on the launch:\n\n Hi everyone,\n\n I just ";
69
+ int expected_tokens3[] = {1, 319, 11473, 2643, 378, 629, 271, 18099, 278, 3815, 373, 278, 6826, 29901, 13, 13, 4706, 6324, 14332, 29892, 13, 13, 4706, 306, 925, 29871};
70
+ test_prompt_encoding(&tokenizer, prompt3, expected_tokens3, sizeof(expected_tokens3) / sizeof(int));
71
+
72
+ // test 4
73
+ char* prompt4 = "Translate English to French:\n\n sea otter => loutre de mer\n peppermint => menthe poivrée\n plush girafe => girafe peluche\n cheese =>";
74
+ int expected_tokens4[] = {1, 4103, 9632, 4223, 304, 5176, 29901, 13, 13, 4706, 7205, 4932, 357, 1149, 301, 449, 276, 316, 2778, 13, 4706, 1236, 407, 837, 524, 1149, 6042, 354, 772, 440, 29878, 1318, 13, 4706, 715, 1878, 330, 3055, 1725, 1149, 330, 3055, 1725, 4639, 28754, 13, 4706, 923, 968, 1149};
75
+ test_prompt_encoding(&tokenizer, prompt4, expected_tokens4, sizeof(expected_tokens4) / sizeof(int));
76
+
77
+ // memory and file handles cleanup
78
+ free_tokenizer(&tokenizer);
79
+ }
80
+
81
+ int main(int argc, char *argv[]) {
82
+ test_prompt_encodings();
83
+ printf("ALL OK\n");
84
+ }
Version 2/42M_base/tokenizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ec2f99b3631bb74d9a839ede430c8b9fc192f06ca470dbca7585c33077908af
3
+ size 259440
Version 2/42M_base/win.c ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "win.h"
2
+ #include <errno.h>
3
+ #include <io.h>
4
+
5
+ #ifndef FILE_MAP_EXECUTE
6
+ #define FILE_MAP_EXECUTE 0x0020
7
+ #endif /* FILE_MAP_EXECUTE */
8
+
9
+ static int __map_mman_error(const uint32_t err, const int deferr)
10
+ {
11
+ if (err == 0)
12
+ return 0;
13
+ //TODO: implement
14
+ return err;
15
+ }
16
+
17
+ static uint32_t __map_mmap_prot_page(const int prot)
18
+ {
19
+ uint32_t protect = 0;
20
+
21
+ if (prot == PROT_NONE)
22
+ return protect;
23
+
24
+ if ((prot & PROT_EXEC) != 0)
25
+ {
26
+ protect = ((prot & PROT_WRITE) != 0) ?
27
+ PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
28
+ }
29
+ else
30
+ {
31
+ protect = ((prot & PROT_WRITE) != 0) ?
32
+ PAGE_READWRITE : PAGE_READONLY;
33
+ }
34
+
35
+ return protect;
36
+ }
37
+
38
+ static uint32_t __map_mmap_prot_file(const int prot)
39
+ {
40
+ uint32_t desiredAccess = 0;
41
+
42
+ if (prot == PROT_NONE)
43
+ return desiredAccess;
44
+
45
+ if ((prot & PROT_READ) != 0)
46
+ desiredAccess |= FILE_MAP_READ;
47
+ if ((prot & PROT_WRITE) != 0)
48
+ desiredAccess |= FILE_MAP_WRITE;
49
+ if ((prot & PROT_EXEC) != 0)
50
+ desiredAccess |= FILE_MAP_EXECUTE;
51
+
52
+ return desiredAccess;
53
+ }
54
+
55
+ void* mmap(void *addr, size_t len, int prot, int flags, int fildes, ssize_t off)
56
+ {
57
+ HANDLE fm, h;
58
+ void * map = MAP_FAILED;
59
+
60
+ #ifdef _MSC_VER
61
+ #pragma warning(push)
62
+ #pragma warning(disable: 4293)
63
+ #endif
64
+
65
+ const uint32_t dwFileOffsetLow = (uint32_t)(off & 0xFFFFFFFFL);
66
+ const uint32_t dwFileOffsetHigh = (uint32_t)((off >> 32) & 0xFFFFFFFFL);
67
+ const uint32_t protect = __map_mmap_prot_page(prot);
68
+ const uint32_t desiredAccess = __map_mmap_prot_file(prot);
69
+
70
+ const ssize_t maxSize = off + (ssize_t)len;
71
+
72
+ const uint32_t dwMaxSizeLow = (uint32_t)(maxSize & 0xFFFFFFFFL);
73
+ const uint32_t dwMaxSizeHigh = (uint32_t)((maxSize >> 32) & 0xFFFFFFFFL);
74
+
75
+ #ifdef _MSC_VER
76
+ #pragma warning(pop)
77
+ #endif
78
+
79
+ errno = 0;
80
+
81
+ if (len == 0
82
+ /* Unsupported flag combinations */
83
+ || (flags & MAP_FIXED) != 0
84
+ /* Unsupported protection combinations */
85
+ || prot == PROT_EXEC)
86
+ {
87
+ errno = EINVAL;
88
+ return MAP_FAILED;
89
+ }
90
+
91
+ h = ((flags & MAP_ANONYMOUS) == 0) ?
92
+ (HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE;
93
+
94
+ if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE)
95
+ {
96
+ errno = EBADF;
97
+ return MAP_FAILED;
98
+ }
99
+
100
+ fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL);
101
+
102
+ if (fm == NULL)
103
+ {
104
+ errno = __map_mman_error(GetLastError(), EPERM);
105
+ return MAP_FAILED;
106
+ }
107
+
108
+ map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len);
109
+
110
+ CloseHandle(fm);
111
+
112
+ if (map == NULL)
113
+ {
114
+ errno = __map_mman_error(GetLastError(), EPERM);
115
+ return MAP_FAILED;
116
+ }
117
+
118
+ return map;
119
+ }
120
+
121
+ int munmap(void *addr, size_t len)
122
+ {
123
+ if (UnmapViewOfFile(addr))
124
+ return 0;
125
+
126
+ errno = __map_mman_error(GetLastError(), EPERM);
127
+
128
+ return -1;
129
+ }
130
+
131
+ int mprotect(void *addr, size_t len, int prot)
132
+ {
133
+ uint32_t newProtect = __map_mmap_prot_page(prot);
134
+ uint32_t oldProtect = 0;
135
+
136
+ if (VirtualProtect(addr, len, newProtect, &oldProtect))
137
+ return 0;
138
+
139
+ errno = __map_mman_error(GetLastError(), EPERM);
140
+
141
+ return -1;
142
+ }
143
+
144
+ int msync(void *addr, size_t len, int flags)
145
+ {
146
+ if (FlushViewOfFile(addr, len))
147
+ return 0;
148
+
149
+ errno = __map_mman_error(GetLastError(), EPERM);
150
+
151
+ return -1;
152
+ }
153
+
154
+ int mlock(const void *addr, size_t len)
155
+ {
156
+ if (VirtualLock((LPVOID)addr, len))
157
+ return 0;
158
+
159
+ errno = __map_mman_error(GetLastError(), EPERM);
160
+
161
+ return -1;
162
+ }
163
+
164
+ int munlock(const void *addr, size_t len)
165
+ {
166
+ if (VirtualUnlock((LPVOID)addr, len))
167
+ return 0;
168
+
169
+ errno = __map_mman_error(GetLastError(), EPERM);
170
+
171
+ return -1;
172
+ }
173
+
174
+ // Portable clock_gettime function for Windows
175
+ int clock_gettime(int clk_id, struct timespec *tp) {
176
+ uint32_t ticks = GetTickCount();
177
+ tp->tv_sec = ticks / 1000;
178
+ tp->tv_nsec = (ticks % 1000) * 1000000;
179
+ return 0;
180
+ }
Version 2/42M_base/win.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef _WIN_H_
2
+ #define _WIN_H_
3
+
4
+ #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
5
+ #include <windows.h>
6
+ #include <time.h>
7
+ #include <stdint.h>
8
+
9
+ #define ssize_t int64_t
10
+ #define ftell _ftelli64
11
+
12
+ // Below code is originally from mman-win32
13
+ //
14
+ /*
15
+ * sys/mman.h
16
+ * mman-win32
17
+ */
18
+
19
+ #ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later.
20
+ #define _WIN32_WINNT 0x0501 // Change this to the appropriate value to target other versions of Windows.
21
+ #endif
22
+
23
+ /* All the headers include this file. */
24
+ #ifndef _MSC_VER
25
+ #include <_mingw.h>
26
+ #endif
27
+
28
+ #include <sys/types.h>
29
+
30
+ #ifdef __cplusplus
31
+ extern "C" {
32
+ #endif
33
+
34
+ #define PROT_NONE 0
35
+ #define PROT_READ 1
36
+ #define PROT_WRITE 2
37
+ #define PROT_EXEC 4
38
+
39
+ #define MAP_FILE 0
40
+ #define MAP_SHARED 1
41
+ #define MAP_PRIVATE 2
42
+ #define MAP_TYPE 0xf
43
+ #define MAP_FIXED 0x10
44
+ #define MAP_ANONYMOUS 0x20
45
+ #define MAP_ANON MAP_ANONYMOUS
46
+
47
+ #define MAP_FAILED ((void *)-1)
48
+
49
+ /* Flags for msync. */
50
+ #define MS_ASYNC 1
51
+ #define MS_SYNC 2
52
+ #define MS_INVALIDATE 4
53
+
54
+ /* Flags for portable clock_gettime call. */
55
+ #define CLOCK_REALTIME 0
56
+
57
+ void* mmap(void *addr, size_t len, int prot, int flags, int fildes, ssize_t off);
58
+ int munmap(void *addr, size_t len);
59
+ int mprotect(void *addr, size_t len, int prot);
60
+ int msync(void *addr, size_t len, int flags);
61
+ int mlock(const void *addr, size_t len);
62
+ int munlock(const void *addr, size_t len);
63
+ int clock_gettime(int clk_id, struct timespec *tp);
64
+
65
+ #ifdef __cplusplus
66
+ };
67
+ #endif
68
+
69
+ #endif /* _WIN_H_ */
Version 2/42M_finetuned/Makefile ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # choose your compiler, e.g. gcc/clang
2
+ # example override to clang: make run CC=clang
3
+ CC = gcc
4
+
5
+ # the most basic way of building that is most likely to work on most systems
6
+ .PHONY: run
7
+ run: run.c
8
+ $(CC) -O3 -o run run.c -lm
9
+ $(CC) -O3 -o runq runq.c -lm
10
+
11
+ # useful for a debug build, can then e.g. analyze with valgrind, example:
12
+ # $ valgrind --leak-check=full ./run out/model.bin -n 3
13
+ rundebug: run.c
14
+ $(CC) -g -o run run.c -lm
15
+ $(CC) -g -o runq runq.c -lm
16
+
17
+ # https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
18
+ # https://simonbyrne.github.io/notes/fastmath/
19
+ # -Ofast enables all -O3 optimizations.
20
+ # Disregards strict standards compliance.
21
+ # It also enables optimizations that are not valid for all standard-compliant programs.
22
+ # It turns on -ffast-math, -fallow-store-data-races and the Fortran-specific
23
+ # -fstack-arrays, unless -fmax-stack-var-size is specified, and -fno-protect-parens.
24
+ # It turns off -fsemantic-interposition.
25
+ # In our specific application this is *probably* okay to use
26
+ .PHONY: runfast
27
+ runfast: run.c
28
+ $(CC) -Ofast -o run run.c -lm
29
+ $(CC) -Ofast -o runq runq.c -lm
30
+
31
+ # additionally compiles with OpenMP, allowing multithreaded runs
32
+ # make sure to also enable multiple threads when running, e.g.:
33
+ # OMP_NUM_THREADS=4 ./run out/model.bin
34
+ .PHONY: runomp
35
+ runomp: run.c
36
+ $(CC) -Ofast -fopenmp -march=native run.c -lm -o run
37
+ $(CC) -Ofast -fopenmp -march=native runq.c -lm -o runq
38
+
39
+ .PHONY: win64
40
+ win64:
41
+ x86_64-w64-mingw32-gcc -Ofast -D_WIN32 -o run.exe -I. run.c win.c
42
+ x86_64-w64-mingw32-gcc -Ofast -D_WIN32 -o runq.exe -I. runq.c win.c
43
+
44
+ # compiles with gnu99 standard flags for amazon linux, coreos, etc. compatibility
45
+ .PHONY: rungnu
46
+ rungnu:
47
+ $(CC) -Ofast -std=gnu11 -o run run.c -lm
48
+ $(CC) -Ofast -std=gnu11 -o runq runq.c -lm
49
+
50
+ .PHONY: runompgnu
51
+ runompgnu:
52
+ $(CC) -Ofast -fopenmp -std=gnu11 run.c -lm -o run
53
+ $(CC) -Ofast -fopenmp -std=gnu11 runq.c -lm -o runq
54
+
55
+ # run all tests
56
+ .PHONY: test
57
+ test:
58
+ pytest
59
+
60
+ # run only tests for run.c C implementation (is a bit faster if only C code changed)
61
+ .PHONY: testc
62
+ testc:
63
+ pytest -k runc
64
+
65
+ # run the C tests, without touching pytest / python
66
+ # to increase verbosity level run e.g. as `make testcc VERBOSITY=1`
67
+ VERBOSITY ?= 0
68
+ .PHONY: testcc
69
+ testcc:
70
+ $(CC) -DVERBOSITY=$(VERBOSITY) -O3 -o testc test.c -lm
71
+ ./testc
72
+
73
+ .PHONY: clean
74
+ clean:
75
+ rm -f run
76
+ rm -f runq
Version 2/42M_finetuned/ckpt.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a95314831a58e009e0c826cae358a261be6d0a04d2af116c45cfd1bbf087b612
3
+ size 426623328
Version 2/42M_finetuned/model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:354cc50836719816dffacdb8cc6c77a3569497a18dedc06b03a8cf2a730ac5bf
3
+ size 142444572
Version 2/42M_finetuned/run.c ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Inference for Llama-2 Transformer model in pure C */
2
+
3
+ #include <stdio.h>
4
+ #include <stdlib.h>
5
+ #include <ctype.h>
6
+ #include <time.h>
7
+ #include <math.h>
8
+ #include <string.h>
9
+ #include <fcntl.h>
10
+ #if defined _WIN32
11
+ #include "win.h"
12
+ #else
13
+ #include <unistd.h>
14
+ #include <sys/mman.h>
15
+ #endif
16
+ // ----------------------------------------------------------------------------
17
+ // Transformer model
18
+
19
+ typedef struct {
20
+ int dim; // transformer dimension
21
+ int hidden_dim; // for ffn layers
22
+ int n_layers; // number of layers
23
+ int n_heads; // number of query heads
24
+ int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
25
+ int vocab_size; // vocabulary size, usually 256 (byte-level)
26
+ int seq_len; // max sequence length
27
+ } Config;
28
+
29
+ typedef struct {
30
+ // token embedding table
31
+ float* token_embedding_table; // (vocab_size, dim)
32
+ // weights for rmsnorms
33
+ float* rms_att_weight; // (layer, dim) rmsnorm weights
34
+ float* rms_ffn_weight; // (layer, dim)
35
+ // weights for matmuls. note dim == n_heads * head_size
36
+ float* wq; // (layer, dim, n_heads * head_size)
37
+ float* wk; // (layer, dim, n_kv_heads * head_size)
38
+ float* wv; // (layer, dim, n_kv_heads * head_size)
39
+ float* wo; // (layer, n_heads * head_size, dim)
40
+ // weights for ffn
41
+ float* w1; // (layer, hidden_dim, dim)
42
+ float* w2; // (layer, dim, hidden_dim)
43
+ float* w3; // (layer, hidden_dim, dim)
44
+ // final rmsnorm
45
+ float* rms_final_weight; // (dim,)
46
+ // (optional) classifier weights for the logits, on the last layer
47
+ float* wcls;
48
+ } TransformerWeights;
49
+
50
+ typedef struct {
51
+ // current wave of activations
52
+ float *x; // activation at current time stamp (dim,)
53
+ float *xb; // same, but inside a residual branch (dim,)
54
+ float *xb2; // an additional buffer just for convenience (dim,)
55
+ float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
56
+ float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
57
+ float *q; // query (dim,)
58
+ float *k; // key (dim,)
59
+ float *v; // value (dim,)
60
+ float *att; // buffer for scores/attention values (n_heads, seq_len)
61
+ float *logits; // output logits
62
+ // kv cache
63
+ float* key_cache; // (layer, seq_len, dim)
64
+ float* value_cache; // (layer, seq_len, dim)
65
+ } RunState;
66
+
67
+ typedef struct {
68
+ Config config; // the hyperparameters of the architecture (the blueprint)
69
+ TransformerWeights weights; // the weights of the model
70
+ RunState state; // buffers for the "wave" of activations in the forward pass
71
+ // some more state needed to properly clean up the memory mapping (sigh)
72
+ int fd; // file descriptor for memory mapping
73
+ float* data; // memory mapped data pointer
74
+ ssize_t file_size; // size of the checkpoint file in bytes
75
+ } Transformer;
76
+
77
+ void malloc_run_state(RunState* s, Config* p) {
78
+ // we calloc instead of malloc to keep valgrind happy
79
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
80
+ s->x = calloc(p->dim, sizeof(float));
81
+ s->xb = calloc(p->dim, sizeof(float));
82
+ s->xb2 = calloc(p->dim, sizeof(float));
83
+ s->hb = calloc(p->hidden_dim, sizeof(float));
84
+ s->hb2 = calloc(p->hidden_dim, sizeof(float));
85
+ s->q = calloc(p->dim, sizeof(float));
86
+ s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
87
+ s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
88
+ s->att = calloc(p->n_heads * p->seq_len, sizeof(float));
89
+ s->logits = calloc(p->vocab_size, sizeof(float));
90
+ // ensure all mallocs went fine
91
+ if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q
92
+ || !s->key_cache || !s->value_cache || !s->att || !s->logits) {
93
+ fprintf(stderr, "malloc failed!\n");
94
+ exit(EXIT_FAILURE);
95
+ }
96
+ }
97
+
98
+ void free_run_state(RunState* s) {
99
+ free(s->x);
100
+ free(s->xb);
101
+ free(s->xb2);
102
+ free(s->hb);
103
+ free(s->hb2);
104
+ free(s->q);
105
+ free(s->att);
106
+ free(s->logits);
107
+ free(s->key_cache);
108
+ free(s->value_cache);
109
+ }
110
+
111
+ void memory_map_weights(TransformerWeights *w, Config* p, float* ptr, int shared_weights) {
112
+ int head_size = p->dim / p->n_heads;
113
+ // make sure the multiplications below are done in 64bit to fit the parameter counts of 13B+ models
114
+ unsigned long long n_layers = p->n_layers;
115
+ w->token_embedding_table = ptr;
116
+ ptr += p->vocab_size * p->dim;
117
+ w->rms_att_weight = ptr;
118
+ ptr += n_layers * p->dim;
119
+ w->wq = ptr;
120
+ ptr += n_layers * p->dim * (p->n_heads * head_size);
121
+ w->wk = ptr;
122
+ ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
123
+ w->wv = ptr;
124
+ ptr += n_layers * p->dim * (p->n_kv_heads * head_size);
125
+ w->wo = ptr;
126
+ ptr += n_layers * (p->n_heads * head_size) * p->dim;
127
+ w->rms_ffn_weight = ptr;
128
+ ptr += n_layers * p->dim;
129
+ w->w1 = ptr;
130
+ ptr += n_layers * p->dim * p->hidden_dim;
131
+ w->w2 = ptr;
132
+ ptr += n_layers * p->hidden_dim * p->dim;
133
+ w->w3 = ptr;
134
+ ptr += n_layers * p->dim * p->hidden_dim;
135
+ w->rms_final_weight = ptr;
136
+ ptr += p->dim;
137
+ ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_real (for RoPE)
138
+ ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_imag (for RoPE)
139
+ w->wcls = shared_weights ? w->token_embedding_table : ptr;
140
+ }
141
+
142
+ void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights,
143
+ int* fd, float** data, ssize_t* file_size) {
144
+ FILE *file = fopen(checkpoint, "rb");
145
+ if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); }
146
+ // read in the config header
147
+ if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); }
148
+ // negative vocab size is hacky way of signaling unshared weights. bit yikes.
149
+ int shared_weights = config->vocab_size > 0 ? 1 : 0;
150
+ config->vocab_size = abs(config->vocab_size);
151
+ // figure out the file size
152
+ fseek(file, 0, SEEK_END); // move file pointer to end of file
153
+ *file_size = ftell(file); // get the file size, in bytes
154
+ fclose(file);
155
+ // memory map the Transformer weights into the data pointer
156
+ *fd = open(checkpoint, O_RDONLY); // open in read only mode
157
+ if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); }
158
+ *data = mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0);
159
+ if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); }
160
+ float* weights_ptr = *data + sizeof(Config)/sizeof(float);
161
+ memory_map_weights(weights, config, weights_ptr, shared_weights);
162
+ }
163
+
164
+ void build_transformer(Transformer *t, char* checkpoint_path) {
165
+ // read in the Config and the Weights from the checkpoint
166
+ read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size);
167
+ // allocate the RunState buffers
168
+ malloc_run_state(&t->state, &t->config);
169
+ }
170
+
171
+ void free_transformer(Transformer* t) {
172
+ // close the memory mapping
173
+ if (t->data != MAP_FAILED) { munmap(t->data, t->file_size); }
174
+ if (t->fd != -1) { close(t->fd); }
175
+ // free the RunState buffers
176
+ free_run_state(&t->state);
177
+ }
178
+
179
+ // ----------------------------------------------------------------------------
180
+ // neural net blocks; the dynamics of the Transformer
181
+
182
+ void rmsnorm(float* o, float* x, float* weight, int size) {
183
+ // calculate sum of squares
184
+ float ss = 0.0f;
185
+ for (int j = 0; j < size; j++) {
186
+ ss += x[j] * x[j];
187
+ }
188
+ ss /= size;
189
+ ss += 1e-5f;
190
+ ss = 1.0f / sqrtf(ss);
191
+ // normalize and scale
192
+ for (int j = 0; j < size; j++) {
193
+ o[j] = weight[j] * (ss * x[j]);
194
+ }
195
+ }
196
+
197
+ void softmax(float* x, int size) {
198
+ // find max value (for numerical stability)
199
+ float max_val = x[0];
200
+ for (int i = 1; i < size; i++) {
201
+ if (x[i] > max_val) {
202
+ max_val = x[i];
203
+ }
204
+ }
205
+ // exp and sum
206
+ float sum = 0.0f;
207
+ for (int i = 0; i < size; i++) {
208
+ x[i] = expf(x[i] - max_val);
209
+ sum += x[i];
210
+ }
211
+ // normalize
212
+ for (int i = 0; i < size; i++) {
213
+ x[i] /= sum;
214
+ }
215
+ }
216
+
217
+ void matmul(float* xout, float* x, float* w, int n, int d) {
218
+ // W (d,n) @ x (n,) -> xout (d,)
219
+ // by far the most amount of time is spent inside this little function
220
+ int i;
221
+ #pragma omp parallel for private(i)
222
+ for (i = 0; i < d; i++) {
223
+ float val = 0.0f;
224
+ for (int j = 0; j < n; j++) {
225
+ val += w[i * n + j] * x[j];
226
+ }
227
+ xout[i] = val;
228
+ }
229
+ }
230
+
231
+ float* forward(Transformer* transformer, int token, int pos) {
232
+
233
+ // a few convenience variables
234
+ Config* p = &transformer->config;
235
+ TransformerWeights* w = &transformer->weights;
236
+ RunState* s = &transformer->state;
237
+ float *x = s->x;
238
+ int dim = p->dim;
239
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
240
+ int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery
241
+ int hidden_dim = p->hidden_dim;
242
+ int head_size = dim / p->n_heads;
243
+
244
+ // copy the token embedding into x
245
+ float* content_row = w->token_embedding_table + token * dim;
246
+ memcpy(x, content_row, dim*sizeof(*x));
247
+
248
+ // forward all the layers
249
+ for(unsigned long long l = 0; l < p->n_layers; l++) {
250
+
251
+ // attention rmsnorm
252
+ rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim);
253
+
254
+ // key and value point to the kv cache
255
+ int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience
256
+ s->k = s->key_cache + loff + pos * kv_dim;
257
+ s->v = s->value_cache + loff + pos * kv_dim;
258
+
259
+ // qkv matmuls for this position
260
+ matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim);
261
+ matmul(s->k, s->xb, w->wk + l*dim*kv_dim, dim, kv_dim);
262
+ matmul(s->v, s->xb, w->wv + l*dim*kv_dim, dim, kv_dim);
263
+
264
+ // RoPE relative positional encoding: complex-valued rotate q and k in each head
265
+ for (int i = 0; i < dim; i+=2) {
266
+ int head_dim = i % head_size;
267
+ float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size);
268
+ float val = pos * freq;
269
+ float fcr = cosf(val);
270
+ float fci = sinf(val);
271
+ int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
272
+ for (int v = 0; v < rotn; v++) {
273
+ float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key)
274
+ float v0 = vec[i];
275
+ float v1 = vec[i+1];
276
+ vec[i] = v0 * fcr - v1 * fci;
277
+ vec[i+1] = v0 * fci + v1 * fcr;
278
+ }
279
+ }
280
+
281
+ // multihead attention. iterate over all heads
282
+ int h;
283
+ #pragma omp parallel for private(h)
284
+ for (h = 0; h < p->n_heads; h++) {
285
+ // get the query vector for this head
286
+ float* q = s->q + h * head_size;
287
+ // attention scores for this head
288
+ float* att = s->att + h * p->seq_len;
289
+ // iterate over all timesteps, including the current one
290
+ for (int t = 0; t <= pos; t++) {
291
+ // get the key vector for this head and at this timestep
292
+ float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
293
+ // calculate the attention score as the dot product of q and k
294
+ float score = 0.0f;
295
+ for (int i = 0; i < head_size; i++) {
296
+ score += q[i] * k[i];
297
+ }
298
+ score /= sqrtf(head_size);
299
+ // save the score to the attention buffer
300
+ att[t] = score;
301
+ }
302
+
303
+ // softmax the scores to get attention weights, from 0..pos inclusively
304
+ softmax(att, pos + 1);
305
+
306
+ // weighted sum of the values, store back into xb
307
+ float* xb = s->xb + h * head_size;
308
+ memset(xb, 0, head_size * sizeof(float));
309
+ for (int t = 0; t <= pos; t++) {
310
+ // get the value vector for this head and at this timestep
311
+ float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
312
+ // get the attention weight for this timestep
313
+ float a = att[t];
314
+ // accumulate the weighted value into xb
315
+ for (int i = 0; i < head_size; i++) {
316
+ xb[i] += a * v[i];
317
+ }
318
+ }
319
+ }
320
+
321
+ // final matmul to get the output of the attention
322
+ matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim);
323
+
324
+ // residual connection back into x
325
+ for (int i = 0; i < dim; i++) {
326
+ x[i] += s->xb2[i];
327
+ }
328
+
329
+ // ffn rmsnorm
330
+ rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim);
331
+
332
+ // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
333
+ // first calculate self.w1(x) and self.w3(x)
334
+ matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim);
335
+ matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim);
336
+
337
+ // SwiGLU non-linearity
338
+ for (int i = 0; i < hidden_dim; i++) {
339
+ float val = s->hb[i];
340
+ // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid
341
+ val *= (1.0f / (1.0f + expf(-val)));
342
+ // elementwise multiply with w3(x)
343
+ val *= s->hb2[i];
344
+ s->hb[i] = val;
345
+ }
346
+
347
+ // final matmul to get the output of the ffn
348
+ matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim);
349
+
350
+ // residual connection
351
+ for (int i = 0; i < dim; i++) {
352
+ x[i] += s->xb[i];
353
+ }
354
+ }
355
+
356
+ // final rmsnorm
357
+ rmsnorm(x, x, w->rms_final_weight, dim);
358
+
359
+ // classifier into logits
360
+ matmul(s->logits, x, w->wcls, p->dim, p->vocab_size);
361
+ return s->logits;
362
+ }
363
+
364
+ // ----------------------------------------------------------------------------
365
+ // The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens
366
+
367
+ typedef struct {
368
+ char *str;
369
+ int id;
370
+ } TokenIndex;
371
+
372
+ typedef struct {
373
+ char** vocab;
374
+ float* vocab_scores;
375
+ TokenIndex *sorted_vocab;
376
+ int vocab_size;
377
+ unsigned int max_token_length;
378
+ unsigned char byte_pieces[512]; // stores all single-byte strings
379
+ } Tokenizer;
380
+
381
+ int compare_tokens(const void *a, const void *b) {
382
+ return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str);
383
+ }
384
+
385
+ void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size) {
386
+ // i should have written the vocab_size into the tokenizer file... sigh
387
+ t->vocab_size = vocab_size;
388
+ // malloc space to hold the scores and the strings
389
+ t->vocab = (char**)malloc(vocab_size * sizeof(char*));
390
+ t->vocab_scores = (float*)malloc(vocab_size * sizeof(float));
391
+ t->sorted_vocab = NULL; // initialized lazily
392
+ for (int i = 0; i < 256; i++) {
393
+ t->byte_pieces[i * 2] = (unsigned char)i;
394
+ t->byte_pieces[i * 2 + 1] = '\0';
395
+ }
396
+ // read in the file
397
+ FILE *file = fopen(tokenizer_path, "rb");
398
+ if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); }
399
+ if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
400
+ int len;
401
+ for (int i = 0; i < vocab_size; i++) {
402
+ if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);}
403
+ if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
404
+ t->vocab[i] = (char *)malloc(len + 1);
405
+ if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
406
+ t->vocab[i][len] = '\0'; // add the string terminating token
407
+ }
408
+ fclose(file);
409
+ }
410
+
411
+ void free_tokenizer(Tokenizer* t) {
412
+ for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); }
413
+ free(t->vocab);
414
+ free(t->vocab_scores);
415
+ free(t->sorted_vocab);
416
+ }
417
+
418
+ char* decode(Tokenizer* t, int prev_token, int token) {
419
+ char *piece = t->vocab[token];
420
+ // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89)
421
+ if (prev_token == 1 && piece[0] == ' ') { piece++; }
422
+ // careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
423
+ // parse this and convert and return the actual byte
424
+ unsigned char byte_val;
425
+ if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) {
426
+ piece = (char*)t->byte_pieces + byte_val * 2;
427
+ }
428
+ return piece;
429
+ }
430
+
431
+ void safe_printf(char *piece) {
432
+ // piece might be a raw byte token, and we only want to print printable chars or whitespace
433
+ // because some of the other bytes can be various control codes, backspace, etc.
434
+ if (piece == NULL) { return; }
435
+ if (piece[0] == '\0') { return; }
436
+ if (piece[1] == '\0') {
437
+ unsigned char byte_val = piece[0];
438
+ if (!(isprint(byte_val) || isspace(byte_val))) {
439
+ return; // bad byte, don't print it
440
+ }
441
+ }
442
+ printf("%s", piece);
443
+ }
444
+
445
+ int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) {
446
+ // efficiently find the perfect match for str in vocab, return its index or -1 if not found
447
+ TokenIndex tok = { .str = str }; // acts as the key to search for
448
+ TokenIndex *res = bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens);
449
+ return res != NULL ? res->id : -1;
450
+ }
451
+
452
+ void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) {
453
+ // encode the string text (input) into an upper-bound preallocated tokens[] array
454
+ // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2)
455
+ if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); }
456
+
457
+ if (t->sorted_vocab == NULL) {
458
+ // lazily malloc and sort the vocabulary
459
+ t->sorted_vocab = malloc(t->vocab_size * sizeof(TokenIndex));
460
+ for (int i = 0; i < t->vocab_size; i++) {
461
+ t->sorted_vocab[i].str = t->vocab[i];
462
+ t->sorted_vocab[i].id = i;
463
+ }
464
+ qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens);
465
+ }
466
+
467
+ // create a temporary buffer that will store merge candidates of always two consecutive tokens
468
+ // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1)
469
+ char* str_buffer = malloc((t->max_token_length*2 +1 +2) * sizeof(char));
470
+ size_t str_len = 0;
471
+
472
+ // start at 0 tokens
473
+ *n_tokens = 0;
474
+
475
+ // add optional BOS (=1) token, if desired
476
+ if (bos) tokens[(*n_tokens)++] = 1;
477
+
478
+ // add_dummy_prefix is true by default
479
+ // so prepend a dummy prefix token to the input string, but only if text != ""
480
+ // TODO: pretty sure this isn't correct in the general case but I don't have the
481
+ // energy to read more of the sentencepiece code to figure out what it's doing
482
+ if (text[0] != '\0') {
483
+ int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);
484
+ tokens[(*n_tokens)++] = dummy_prefix;
485
+ }
486
+
487
+ // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:
488
+ // Code point ↔ UTF-8 conversion
489
+ // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4
490
+ // U+0000 U+007F 0xxxxxxx
491
+ // U+0080 U+07FF 110xxxxx 10xxxxxx
492
+ // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx
493
+ // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
494
+
495
+ // process the raw (UTF-8) byte sequence of the input string
496
+ for (char *c = text; *c != '\0'; c++) {
497
+
498
+ // reset buffer if the current byte is ASCII or a leading byte
499
+ // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest
500
+ // 0x80 is 10000000
501
+ // in UTF-8, all continuation bytes start with "10" in first two bits
502
+ // so in English this is: "if this byte is not a continuation byte"
503
+ if ((*c & 0xC0) != 0x80) {
504
+ // this byte must be either a leading byte (11...) or an ASCII char (0x...)
505
+ // => reset our location, as we're starting a new UTF-8 codepoint
506
+ str_len = 0;
507
+ }
508
+
509
+ // append the current byte to the buffer
510
+ str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line
511
+ str_buffer[str_len] = '\0';
512
+
513
+ // while the next character is a continuation byte, continue appending
514
+ // but if there are too many of them, just stop to avoid overruning str_buffer size.
515
+ if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {
516
+ continue;
517
+ }
518
+
519
+ // ok c+1 is not a continuation byte, so we've read in a full codepoint
520
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
521
+
522
+ if (id != -1) {
523
+ // we found this codepoint in vocab, add it as a token
524
+ tokens[(*n_tokens)++] = id;
525
+ } else {
526
+ // byte_fallback encoding: just encode each byte as a token
527
+ // +3 is here because the first 3 vocab elements are <unk>, <s>, </s>
528
+ // so the individual bytes only start at index 3
529
+ for (int i=0; i < str_len; i++) {
530
+ tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;
531
+ }
532
+ }
533
+ str_len = 0; // protect against a sequence of stray UTF8 continuation bytes
534
+ }
535
+
536
+ // merge the best consecutive pair each iteration, according the scores in vocab_scores
537
+ while (1) {
538
+ float best_score = -1e10;
539
+ int best_id = -1;
540
+ int best_idx = -1;
541
+
542
+ for (int i=0; i < (*n_tokens-1); i++) {
543
+ // check if we can merge the pair (tokens[i], tokens[i+1])
544
+ sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]);
545
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
546
+ if (id != -1 && t->vocab_scores[id] > best_score) {
547
+ // this merge pair exists in vocab! record its score and position
548
+ best_score = t->vocab_scores[id];
549
+ best_id = id;
550
+ best_idx = i;
551
+ }
552
+ }
553
+
554
+ if (best_idx == -1) {
555
+ break; // we couldn't find any more pairs to merge, so we're done
556
+ }
557
+
558
+ // merge the consecutive pair (best_idx, best_idx+1) into new token best_id
559
+ tokens[best_idx] = best_id;
560
+ // delete token at position best_idx+1, shift the entire sequence back 1
561
+ for (int i = best_idx+1; i < (*n_tokens-1); i++) {
562
+ tokens[i] = tokens[i+1];
563
+ }
564
+ (*n_tokens)--; // token length decreased
565
+ }
566
+
567
+ // add optional EOS (=2) token, if desired
568
+ if (eos) tokens[(*n_tokens)++] = 2;
569
+
570
+ free(str_buffer);
571
+ }
572
+
573
+ // ----------------------------------------------------------------------------
574
+ // The Sampler, which takes logits and returns a sampled token
575
+ // sampling can be done in a few ways: greedy argmax, sampling, top-p sampling
576
+
577
+ typedef struct {
578
+ float prob;
579
+ int index;
580
+ } ProbIndex; // struct used when sorting probabilities during top-p sampling
581
+
582
+ typedef struct {
583
+ int vocab_size;
584
+ ProbIndex* probindex; // buffer used in top-p sampling
585
+ float temperature;
586
+ float topp;
587
+ unsigned long long rng_state;
588
+ } Sampler;
589
+
590
+ int sample_argmax(float* probabilities, int n) {
591
+ // return the index that has the highest probability
592
+ int max_i = 0;
593
+ float max_p = probabilities[0];
594
+ for (int i = 1; i < n; i++) {
595
+ if (probabilities[i] > max_p) {
596
+ max_i = i;
597
+ max_p = probabilities[i];
598
+ }
599
+ }
600
+ return max_i;
601
+ }
602
+
603
+ int sample_mult(float* probabilities, int n, float coin) {
604
+ // sample index from probabilities (they must sum to 1!)
605
+ // coin is a random number in [0, 1), usually from random_f32()
606
+ float cdf = 0.0f;
607
+ for (int i = 0; i < n; i++) {
608
+ cdf += probabilities[i];
609
+ if (coin < cdf) {
610
+ return i;
611
+ }
612
+ }
613
+ return n - 1; // in case of rounding errors
614
+ }
615
+
616
+ int compare(const void* a, const void* b) {
617
+ ProbIndex* a_ = (ProbIndex*) a;
618
+ ProbIndex* b_ = (ProbIndex*) b;
619
+ if (a_->prob > b_->prob) return -1;
620
+ if (a_->prob < b_->prob) return 1;
621
+ return 0;
622
+ }
623
+
624
+ int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) {
625
+ // top-p sampling (or "nucleus sampling") samples from the smallest set of
626
+ // tokens that exceed probability topp. This way we never sample tokens that
627
+ // have very low probabilities and are less likely to go "off the rails".
628
+ // coin is a random number in [0, 1), usually from random_f32()
629
+
630
+ int n0 = 0;
631
+ // quicksort indices in descending order of probabilities
632
+ // values smaller than (1 - topp) / (n - 1) cannot be part of the result
633
+ // so for efficiency we crop these out as candidates before sorting
634
+ const float cutoff = (1.0f - topp) / (n - 1);
635
+ for (int i = 0; i < n; i++) {
636
+ if (probabilities[i] >= cutoff) {
637
+ probindex[n0].index = i;
638
+ probindex[n0].prob = probabilities[i];
639
+ n0++;
640
+ }
641
+ }
642
+ qsort(probindex, n0, sizeof(ProbIndex), compare);
643
+
644
+ // truncate the list where cumulative probability exceeds topp
645
+ float cumulative_prob = 0.0f;
646
+ int last_idx = n0 - 1; // in case of rounding errors consider all elements
647
+ for (int i = 0; i < n0; i++) {
648
+ cumulative_prob += probindex[i].prob;
649
+ if (cumulative_prob > topp) {
650
+ last_idx = i;
651
+ break; // we've exceeded topp by including last_idx
652
+ }
653
+ }
654
+
655
+ // sample from the truncated list
656
+ float r = coin * cumulative_prob;
657
+ float cdf = 0.0f;
658
+ for (int i = 0; i <= last_idx; i++) {
659
+ cdf += probindex[i].prob;
660
+ if (r < cdf) {
661
+ return probindex[i].index;
662
+ }
663
+ }
664
+ return probindex[last_idx].index; // in case of rounding errors
665
+ }
666
+
667
+ void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) {
668
+ sampler->vocab_size = vocab_size;
669
+ sampler->temperature = temperature;
670
+ sampler->topp = topp;
671
+ sampler->rng_state = rng_seed;
672
+ // buffer only used with nucleus sampling; may not need but it's ~small
673
+ sampler->probindex = malloc(sampler->vocab_size * sizeof(ProbIndex));
674
+ }
675
+
676
+ void free_sampler(Sampler* sampler) {
677
+ free(sampler->probindex);
678
+ }
679
+
680
+ unsigned int random_u32(unsigned long long *state) {
681
+ // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
682
+ *state ^= *state >> 12;
683
+ *state ^= *state << 25;
684
+ *state ^= *state >> 27;
685
+ return (*state * 0x2545F4914F6CDD1Dull) >> 32;
686
+ }
687
+ float random_f32(unsigned long long *state) { // random float32 in [0,1)
688
+ return (random_u32(state) >> 8) / 16777216.0f;
689
+ }
690
+
691
+ int sample(Sampler* sampler, float* logits) {
692
+ // sample the token given the logits and some hyperparameters
693
+ int next;
694
+ if (sampler->temperature == 0.0f) {
695
+ // greedy argmax sampling: take the token with the highest probability
696
+ next = sample_argmax(logits, sampler->vocab_size);
697
+ } else {
698
+ // apply the temperature to the logits
699
+ for (int q=0; q<sampler->vocab_size; q++) { logits[q] /= sampler->temperature; }
700
+ // apply softmax to the logits to get the probabilities for next token
701
+ softmax(logits, sampler->vocab_size);
702
+ // flip a (float) coin (this is our source of entropy for sampling)
703
+ float coin = random_f32(&sampler->rng_state);
704
+ // we sample from this distribution to get the next token
705
+ if (sampler->topp <= 0 || sampler->topp >= 1) {
706
+ // simply sample from the predicted probability distribution
707
+ next = sample_mult(logits, sampler->vocab_size, coin);
708
+ } else {
709
+ // top-p (nucleus) sampling, clamping the least likely tokens to zero
710
+ next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin);
711
+ }
712
+ }
713
+ return next;
714
+ }
715
+
716
+ // ----------------------------------------------------------------------------
717
+ // utilities: time
718
+
719
+ long time_in_ms() {
720
+ // return time in milliseconds, for benchmarking the model speed
721
+ struct timespec time;
722
+ clock_gettime(CLOCK_REALTIME, &time);
723
+ return time.tv_sec * 1000 + time.tv_nsec / 1000000;
724
+ }
725
+
726
+ // ----------------------------------------------------------------------------
727
+ // generation loop
728
+
729
+ void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, char *prompt, int steps) {
730
+ char *empty_prompt = "";
731
+ if (prompt == NULL) { prompt = empty_prompt; }
732
+
733
+ // encode the (string) prompt into tokens sequence
734
+ int num_prompt_tokens = 0;
735
+ int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS
736
+ encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
737
+ if (num_prompt_tokens < 1) {
738
+ fprintf(stderr, "something is wrong, expected at least 1 prompt token\n");
739
+ exit(EXIT_FAILURE);
740
+ }
741
+
742
+ // start the main loop
743
+ long start = 0; // used to time our code, only initialized after first iteration
744
+ int next; // will store the next token in the sequence
745
+ int token = prompt_tokens[0]; // kick off with the first token in the prompt
746
+ int pos = 0; // position in the sequence
747
+ while (pos < steps) {
748
+
749
+ // forward the transformer to get logits for the next token
750
+ float* logits = forward(transformer, token, pos);
751
+
752
+ // advance the state machine
753
+ if (pos < num_prompt_tokens - 1) {
754
+ // if we are still processing the input prompt, force the next prompt token
755
+ next = prompt_tokens[pos + 1];
756
+ } else {
757
+ // otherwise sample the next token from the logits
758
+ next = sample(sampler, logits);
759
+ }
760
+ pos++;
761
+
762
+ // data-dependent terminating condition: the BOS (=1) token delimits sequences
763
+ if (next == 1) { break; }
764
+
765
+ // print the token as string, decode it with the Tokenizer object
766
+ char* piece = decode(tokenizer, token, next);
767
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
768
+ fflush(stdout);
769
+ token = next;
770
+
771
+ // init the timer here because the first iteration can be slower
772
+ if (start == 0) { start = time_in_ms(); }
773
+ }
774
+ printf("\n");
775
+
776
+ // report achieved tok/s (pos-1 because the timer starts after first iteration)
777
+ if (pos > 1) {
778
+ long end = time_in_ms();
779
+ fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000);
780
+ }
781
+
782
+ free(prompt_tokens);
783
+ }
784
+
785
+ void read_stdin(const char* guide, char* buffer, size_t bufsize) {
786
+ // read a line from stdin, up to but not including \n
787
+ printf("%s", guide);
788
+ if (fgets(buffer, bufsize, stdin) != NULL) {
789
+ size_t len = strlen(buffer);
790
+ if (len > 0 && buffer[len - 1] == '\n') {
791
+ buffer[len - 1] = '\0'; // strip newline
792
+ }
793
+ }
794
+ }
795
+
796
+ // ----------------------------------------------------------------------------
797
+ // chat loop
798
+ // I manually inspected the tokens for a few chat conversations compared to
799
+ // python reference and that seemed ok, but this was not thoroughly tested and
800
+ // is not safely implemented, it's more a proof of concept atm.
801
+
802
+ void chat(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler,
803
+ char *cli_user_prompt, char *cli_system_prompt, int steps) {
804
+
805
+ // buffers for reading the system prompt and user prompt from stdin
806
+ // you'll notice they are soomewhat haphazardly and unsafely set atm
807
+ char system_prompt[512];
808
+ char user_prompt[512];
809
+ char rendered_prompt[1152];
810
+ int num_prompt_tokens = 0;
811
+ int* prompt_tokens = (int*)malloc(1152 * sizeof(int));
812
+ int user_idx;
813
+
814
+ // start the main loop
815
+ int8_t user_turn = 1; // user starts
816
+ int next; // will store the next token in the sequence
817
+ int token; // stores the current token to feed into the transformer
818
+ int prev_token;
819
+ int pos = 0; // position in the sequence
820
+ while (pos < steps) {
821
+
822
+ // when it is the user's turn to contribute tokens to the dialog...
823
+ if (user_turn) {
824
+ // get the (optional) system prompt at position 0
825
+ if (pos == 0) {
826
+ // at position 0, the user can also contribute a system prompt
827
+ if (cli_system_prompt == NULL) {
828
+ // system prompt was not passed in, attempt to get it from stdin
829
+ read_stdin("Enter system prompt (optional): ", system_prompt, sizeof(system_prompt));
830
+ } else {
831
+ // system prompt was passed in, use it
832
+ strcpy(system_prompt, cli_system_prompt);
833
+ }
834
+ }
835
+ // get the user prompt
836
+ if (pos == 0 && cli_user_prompt != NULL) {
837
+ // user prompt for position 0 was passed in, use it
838
+ strcpy(user_prompt, cli_user_prompt);
839
+ } else {
840
+ // otherwise get user prompt from stdin
841
+ read_stdin("User: ", user_prompt, sizeof(user_prompt));
842
+ }
843
+ // render user/system prompts into the Llama 2 Chat schema
844
+ if (pos == 0 && system_prompt[0] != '\0') {
845
+ char system_template[] = "[INST] <<SYS>>\n%s\n<</SYS>>\n\n%s [/INST]";
846
+ sprintf(rendered_prompt, system_template, system_prompt, user_prompt);
847
+ } else {
848
+ char user_template[] = "[INST] %s [/INST]";
849
+ sprintf(rendered_prompt, user_template, user_prompt);
850
+ }
851
+ // encode the rendered prompt into tokens
852
+ encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
853
+ user_idx = 0; // reset the user index
854
+ user_turn = 0;
855
+ printf("Assistant: ");
856
+ }
857
+
858
+ // determine the token to pass into the transformer next
859
+ if (user_idx < num_prompt_tokens) {
860
+ // if we are still processing the input prompt, force the next prompt token
861
+ token = prompt_tokens[user_idx++];
862
+ } else {
863
+ // otherwise use the next token sampled from previous turn
864
+ token = next;
865
+ }
866
+ // EOS (=2) token ends the Assistant turn
867
+ if (token == 2) { user_turn = 1; }
868
+
869
+ // forward the transformer to get logits for the next token
870
+ float* logits = forward(transformer, token, pos);
871
+ next = sample(sampler, logits);
872
+ pos++;
873
+
874
+ if (user_idx >= num_prompt_tokens && next != 2) {
875
+ // the Assistant is responding, so print its output
876
+ char* piece = decode(tokenizer, token, next);
877
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
878
+ fflush(stdout);
879
+ }
880
+ if (next == 2) { printf("\n"); }
881
+ }
882
+ printf("\n");
883
+ free(prompt_tokens);
884
+ }
885
+
886
+
887
+ // ----------------------------------------------------------------------------
888
+ // CLI, include only if not testing
889
+ #ifndef TESTING
890
+
891
+ void error_usage() {
892
+ fprintf(stderr, "Usage: run <checkpoint> [options]\n");
893
+ fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n");
894
+ fprintf(stderr, "Options:\n");
895
+ fprintf(stderr, " -t <float> temperature in [0,inf], default 1.0\n");
896
+ fprintf(stderr, " -p <float> p value in top-p (nucleus) sampling in [0,1] default 0.9\n");
897
+ fprintf(stderr, " -s <int> random seed, default time(NULL)\n");
898
+ fprintf(stderr, " -n <int> number of steps to run for, default 256. 0 = max_seq_len\n");
899
+ fprintf(stderr, " -i <string> input prompt\n");
900
+ fprintf(stderr, " -z <string> optional path to custom tokenizer\n");
901
+ fprintf(stderr, " -m <string> mode: generate|chat, default: generate\n");
902
+ fprintf(stderr, " -y <string> (optional) system prompt in chat mode\n");
903
+ exit(EXIT_FAILURE);
904
+ }
905
+
906
+ int main(int argc, char *argv[]) {
907
+
908
+ // default parameters
909
+ char *checkpoint_path = NULL; // e.g. out/model.bin
910
+ char *tokenizer_path = "tokenizer.bin";
911
+ float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher
912
+ float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower
913
+ int steps = 256; // number of steps to run for
914
+ char *prompt = NULL; // prompt string
915
+ unsigned long long rng_seed = 0; // seed rng with time by default
916
+ char *mode = "generate"; // generate|chat
917
+ char *system_prompt = NULL; // the (optional) system prompt to use in chat mode
918
+
919
+ // poor man's C argparse so we can override the defaults above from the command line
920
+ if (argc >= 2) { checkpoint_path = argv[1]; } else { error_usage(); }
921
+ for (int i = 2; i < argc; i+=2) {
922
+ // do some basic validation
923
+ if (i + 1 >= argc) { error_usage(); } // must have arg after flag
924
+ if (argv[i][0] != '-') { error_usage(); } // must start with dash
925
+ if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)
926
+ // read in the args
927
+ if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); }
928
+ else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); }
929
+ else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); }
930
+ else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); }
931
+ else if (argv[i][1] == 'i') { prompt = argv[i + 1]; }
932
+ else if (argv[i][1] == 'z') { tokenizer_path = argv[i + 1]; }
933
+ else if (argv[i][1] == 'm') { mode = argv[i + 1]; }
934
+ else if (argv[i][1] == 'y') { system_prompt = argv[i + 1]; }
935
+ else { error_usage(); }
936
+ }
937
+
938
+ // parameter validation/overrides
939
+ if (rng_seed <= 0) rng_seed = (unsigned int)time(NULL);
940
+ if (temperature < 0.0) temperature = 0.0;
941
+ if (topp < 0.0 || 1.0 < topp) topp = 0.9;
942
+ if (steps < 0) steps = 0;
943
+
944
+ // build the Transformer via the model .bin file
945
+ Transformer transformer;
946
+ build_transformer(&transformer, checkpoint_path);
947
+ if (steps == 0 || steps > transformer.config.seq_len) steps = transformer.config.seq_len; // override to ~max length
948
+
949
+ // build the Tokenizer via the tokenizer .bin file
950
+ Tokenizer tokenizer;
951
+ build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size);
952
+
953
+ // build the Sampler
954
+ Sampler sampler;
955
+ build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, rng_seed);
956
+
957
+ // run!
958
+ if (strcmp(mode, "generate") == 0) {
959
+ generate(&transformer, &tokenizer, &sampler, prompt, steps);
960
+ } else if (strcmp(mode, "chat") == 0) {
961
+ chat(&transformer, &tokenizer, &sampler, prompt, system_prompt, steps);
962
+ } else {
963
+ fprintf(stderr, "unknown mode: %s\n", mode);
964
+ error_usage();
965
+ }
966
+
967
+ // memory and file handles cleanup
968
+ free_sampler(&sampler);
969
+ free_tokenizer(&tokenizer);
970
+ free_transformer(&transformer);
971
+ return 0;
972
+ }
973
+ #endif
Version 2/42M_finetuned/runq.c ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Inference for Llama-2 Transformer model in pure C, int8 quantized forward pass. */
2
+
3
+ #include <stdio.h>
4
+ #include <stdlib.h>
5
+ #include <ctype.h>
6
+ #include <stdint.h>
7
+ #include <time.h>
8
+ #include <math.h>
9
+ #include <string.h>
10
+ #include <fcntl.h>
11
+ #if defined _WIN32
12
+ #include "win.h"
13
+ #else
14
+ #include <unistd.h>
15
+ #include <sys/mman.h>
16
+ #endif
17
+ // ----------------------------------------------------------------------------
18
+ // Globals
19
+ int GS = 0; // group size global for quantization of the weights
20
+
21
+ // ----------------------------------------------------------------------------
22
+ // Transformer model
23
+
24
+ typedef struct {
25
+ int dim; // transformer dimension
26
+ int hidden_dim; // for ffn layers
27
+ int n_layers; // number of layers
28
+ int n_heads; // number of query heads
29
+ int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
30
+ int vocab_size; // vocabulary size, usually 256 (byte-level)
31
+ int seq_len; // max sequence length
32
+ } Config;
33
+
34
+ typedef struct {
35
+ int8_t* q; // quantized values
36
+ float* s; // scaling factors
37
+ } QuantizedTensor;
38
+
39
+ typedef struct {
40
+ // token embedding table
41
+ QuantizedTensor *q_tokens; // (vocab_size, dim)
42
+ float* token_embedding_table; // same, but dequantized
43
+
44
+ // weights for rmsnorms
45
+ float* rms_att_weight; // (layer, dim) rmsnorm weights
46
+ float* rms_ffn_weight; // (layer, dim)
47
+ // weights for matmuls. note dim == n_heads * head_size
48
+ QuantizedTensor *wq; // (layer, dim, n_heads * head_size)
49
+ QuantizedTensor *wk; // (layer, dim, n_kv_heads * head_size)
50
+ QuantizedTensor *wv; // (layer, dim, n_kv_heads * head_size)
51
+ QuantizedTensor *wo; // (layer, n_heads * head_size, dim)
52
+ // weights for ffn
53
+ QuantizedTensor *w1; // (layer, hidden_dim, dim)
54
+ QuantizedTensor *w2; // (layer, dim, hidden_dim)
55
+ QuantizedTensor *w3; // (layer, hidden_dim, dim)
56
+ // final rmsnorm
57
+ float* rms_final_weight; // (dim,)
58
+ // (optional) classifier weights for the logits, on the last layer
59
+ QuantizedTensor *wcls;
60
+ } TransformerWeights;
61
+
62
+ typedef struct {
63
+ // current wave of activations
64
+ float *x; // activation at current time stamp (dim,)
65
+ float *xb; // same, but inside a residual branch (dim,)
66
+ float *xb2; // an additional buffer just for convenience (dim,)
67
+ float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
68
+ float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
69
+ QuantizedTensor xq; // quantized x (dim,)
70
+ QuantizedTensor hq; // quantized hb (hidden_dim,)
71
+ float *q; // query (dim,)
72
+ float *k; // key (dim,)
73
+ float *v; // value (dim,)
74
+ float *att; // buffer for scores/attention values (n_heads, seq_len)
75
+ float *logits; // output logits
76
+ // kv cache
77
+ float* key_cache; // (layer, seq_len, dim)
78
+ float* value_cache; // (layer, seq_len, dim)
79
+ } RunState;
80
+
81
+ typedef struct {
82
+ Config config; // the hyperparameters of the architecture (the blueprint)
83
+ TransformerWeights weights; // the weights of the model
84
+ RunState state; // buffers for the "wave" of activations in the forward pass
85
+ // some more state needed to properly clean up the memory mapping (sigh)
86
+ int fd; // file descriptor for memory mapping
87
+ float* data; // memory mapped data pointer
88
+ ssize_t file_size; // size of the checkpoint file in bytes
89
+ } Transformer;
90
+
91
+ void malloc_run_state(RunState* s, Config* p) {
92
+ // we calloc instead of malloc to keep valgrind happy
93
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
94
+ s->x = calloc(p->dim, sizeof(float));
95
+ s->xb = calloc(p->dim, sizeof(float));
96
+ s->xb2 = calloc(p->dim, sizeof(float));
97
+ s->hb = calloc(p->hidden_dim, sizeof(float));
98
+ s->hb2 = calloc(p->hidden_dim, sizeof(float));
99
+ s->xq = (QuantizedTensor) { .q = calloc(p->dim, sizeof(int8_t)), .s = calloc(p->dim, sizeof(float)) };
100
+ s->hq = (QuantizedTensor) { .q = calloc(p->hidden_dim, sizeof(int8_t)), .s = calloc(p->hidden_dim, sizeof(float)) };
101
+ s->q = calloc(p->dim, sizeof(float));
102
+ s->k = calloc(kv_dim, sizeof(float));
103
+ s->v = calloc(kv_dim, sizeof(float));
104
+ s->att = calloc(p->n_heads * p->seq_len, sizeof(float));
105
+ s->logits = calloc(p->vocab_size, sizeof(float));
106
+ s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
107
+ s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
108
+ // ensure all mallocs went fine
109
+ if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q
110
+ || !s->k || !s->v || !s->att || !s->logits || !s->key_cache
111
+ || !s->value_cache) {
112
+ fprintf(stderr, "malloc failed!\n");
113
+ exit(EXIT_FAILURE);
114
+ }
115
+ }
116
+
117
+ void free_run_state(RunState* s) {
118
+ free(s->x);
119
+ free(s->xb);
120
+ free(s->xb2);
121
+ free(s->hb);
122
+ free(s->hb2);
123
+ free(s->xq.q);
124
+ free(s->xq.s);
125
+ free(s->hq.q);
126
+ free(s->hq.s);
127
+ free(s->q);
128
+ free(s->k);
129
+ free(s->v);
130
+ free(s->att);
131
+ free(s->logits);
132
+ free(s->key_cache);
133
+ free(s->value_cache);
134
+ }
135
+
136
+ // ----------------------------------------------------------------------------
137
+ // Quantization functions
138
+
139
+ void dequantize(QuantizedTensor *qx, float* x, int n) {
140
+ for (int i = 0; i < n; i++) {
141
+ x[i] = qx->q[i] * qx->s[i / GS];
142
+ }
143
+ }
144
+
145
+ void quantize(QuantizedTensor *qx, float* x, int n) {
146
+ int num_groups = n / GS;
147
+ float Q_MAX = 127.0f;
148
+
149
+ for (int group = 0; group < num_groups; group++) {
150
+
151
+ // find the max absolute value in the current group
152
+ float wmax = 0.0;
153
+ for (int i = 0; i < GS; i++) {
154
+ float val = fabs(x[group * GS + i]);
155
+ if (val > wmax) {
156
+ wmax = val;
157
+ }
158
+ }
159
+
160
+ // calculate and write the scaling factor
161
+ float scale = wmax / Q_MAX;
162
+ qx->s[group] = scale;
163
+
164
+ // calculate and write the quantized values
165
+ for (int i = 0; i < GS; i++) {
166
+ float quant_value = x[group * GS + i] / scale; // scale
167
+ int8_t quantized = (int8_t) round(quant_value); // round and clamp
168
+ qx->q[group * GS + i] = quantized;
169
+ }
170
+ }
171
+ }
172
+
173
+ /* initialize `n` x quantized tensor (with `size_each` elements), starting from memory pointed at *ptr */
174
+ QuantizedTensor *init_quantized_tensors(void **ptr, int n, int size_each) {
175
+ void *p = *ptr;
176
+ QuantizedTensor *res = malloc(n * sizeof(QuantizedTensor));
177
+ for(int i=0; i<n; i++) {
178
+ /* map quantized int8 values*/
179
+ res[i].q = (int8_t*)p;
180
+ p = (int8_t*)p + size_each;
181
+ /* map scale factors */
182
+ res[i].s = (float*)p;
183
+ p = (float*)p + size_each / GS;
184
+ }
185
+ *ptr = p; // advance ptr to current position
186
+ return res;
187
+ }
188
+
189
+ void memory_map_weights(TransformerWeights *w, Config* p, void* ptr, uint8_t shared_classifier) {
190
+ int head_size = p->dim / p->n_heads;
191
+ // first are the parameters that are kept in fp32 (the rmsnorm (1D) weights)
192
+ float* fptr = (float*) ptr; // cast our pointer to float*
193
+ w->rms_att_weight = fptr;
194
+ fptr += p->n_layers * p->dim;
195
+ w->rms_ffn_weight = fptr;
196
+ fptr += p->n_layers * p->dim;
197
+ w->rms_final_weight = fptr;
198
+ fptr += p->dim;
199
+
200
+ // now read all the quantized weights
201
+ ptr = (void*)fptr; // now cast the pointer back to void*
202
+ w->q_tokens = init_quantized_tensors(&ptr, 1, p->vocab_size * p->dim);
203
+ // dequantize token embedding table
204
+ w->token_embedding_table = malloc(p->vocab_size * p->dim * sizeof(float));
205
+ dequantize(w->q_tokens, w->token_embedding_table, p->vocab_size * p->dim);
206
+
207
+ w->wq = init_quantized_tensors(&ptr, p->n_layers, p->dim * (p->n_heads * head_size));
208
+ w->wk = init_quantized_tensors(&ptr, p->n_layers, p->dim * (p->n_kv_heads * head_size));
209
+ w->wv = init_quantized_tensors(&ptr, p->n_layers, p->dim * (p->n_kv_heads * head_size));
210
+ w->wo = init_quantized_tensors(&ptr, p->n_layers, (p->n_heads * head_size) * p->dim);
211
+
212
+ w->w1 = init_quantized_tensors(&ptr, p->n_layers, p->dim * p->hidden_dim);
213
+ w->w2 = init_quantized_tensors(&ptr, p->n_layers, p->hidden_dim * p->dim);
214
+ w->w3 = init_quantized_tensors(&ptr, p->n_layers, p->dim * p->hidden_dim);
215
+
216
+ w->wcls = shared_classifier ? w->q_tokens : init_quantized_tensors(&ptr, 1, p->dim * p->vocab_size);
217
+ }
218
+
219
+ void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights,
220
+ int* fd, float** data, ssize_t* file_size) {
221
+ FILE *file = fopen(checkpoint, "rb");
222
+ if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); }
223
+ // read in magic number (uint32), has to be 0x616b3432, i.e. "ak42" in ASCII
224
+ uint32_t magic_number;
225
+ if (fread(&magic_number, sizeof(uint32_t), 1, file) != 1) { exit(EXIT_FAILURE); }
226
+ if (magic_number != 0x616b3432) { fprintf(stderr, "Bad magic number\n"); exit(EXIT_FAILURE); }
227
+ // read in the version number (uint32), has to be 2
228
+ int version;
229
+ if (fread(&version, sizeof(int), 1, file) != 1) { exit(EXIT_FAILURE); }
230
+ if (version != 2) { fprintf(stderr, "Bad version %d, need version 2\n", version); exit(EXIT_FAILURE); }
231
+ int header_size = 256; // the header size for version 2 in bytes
232
+ // read in the Config
233
+ if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); }
234
+ // read in flags
235
+ uint8_t shared_classifier; // a byte to indicate if the classifier is shared
236
+ if (fread(&shared_classifier, sizeof(uint8_t), 1, file) != 1) { exit(EXIT_FAILURE); }
237
+ int group_size; // the group size used in quantization
238
+ if (fread(&group_size, sizeof(int), 1, file) != 1) { exit(EXIT_FAILURE); }
239
+ GS = group_size; // set as global, as it will be used in many places
240
+ // figure out the file size
241
+ fseek(file, 0, SEEK_END); // move file pointer to end of file
242
+ *file_size = ftell(file); // get the file size, in bytes
243
+ fclose(file);
244
+ // memory map the Transformer weights into the data pointer
245
+ *fd = open(checkpoint, O_RDONLY); // open in read only mode
246
+ if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); }
247
+ *data = mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0);
248
+ if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); }
249
+ void* weights_ptr = ((char*)*data) + header_size; // skip header bytes. char is 1 byte
250
+ memory_map_weights(weights, config, weights_ptr, shared_classifier);
251
+ }
252
+
253
+ void build_transformer(Transformer *t, char* checkpoint_path) {
254
+ // read in the Config and the Weights from the checkpoint
255
+ read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size);
256
+ // allocate the RunState buffers
257
+ malloc_run_state(&t->state, &t->config);
258
+ }
259
+
260
+ void free_transformer(Transformer* t) {
261
+ // free QuantizedTensors
262
+ free(t->weights.q_tokens);
263
+ free(t->weights.token_embedding_table);
264
+ free(t->weights.wq);
265
+ free(t->weights.wk);
266
+ free(t->weights.wv);
267
+ free(t->weights.wo);
268
+ free(t->weights.w1);
269
+ free(t->weights.w2);
270
+ free(t->weights.w3);
271
+ if(t->weights.wcls != t->weights.q_tokens) { free(t->weights.wcls); }
272
+ // close the memory mapping
273
+ if (t->data != MAP_FAILED) { munmap(t->data, t->file_size); }
274
+ if (t->fd != -1) { close(t->fd); }
275
+ // free the RunState buffers
276
+ free_run_state(&t->state);
277
+ }
278
+
279
+ // ----------------------------------------------------------------------------
280
+ // neural net blocks; the dynamics of the Transformer
281
+
282
+ void rmsnorm(float* o, float* x, float* weight, int size) {
283
+ // calculate sum of squares
284
+ float ss = 0.0f;
285
+ for (int j = 0; j < size; j++) {
286
+ ss += x[j] * x[j];
287
+ }
288
+ ss /= size;
289
+ ss += 1e-5f;
290
+ ss = 1.0f / sqrtf(ss);
291
+ // normalize and scale
292
+ for (int j = 0; j < size; j++) {
293
+ o[j] = weight[j] * (ss * x[j]);
294
+ }
295
+ }
296
+
297
+ void softmax(float* x, int size) {
298
+ // find max value (for numerical stability)
299
+ float max_val = x[0];
300
+ for (int i = 1; i < size; i++) {
301
+ if (x[i] > max_val) {
302
+ max_val = x[i];
303
+ }
304
+ }
305
+ // exp and sum
306
+ float sum = 0.0f;
307
+ for (int i = 0; i < size; i++) {
308
+ x[i] = expf(x[i] - max_val);
309
+ sum += x[i];
310
+ }
311
+ // normalize
312
+ for (int i = 0; i < size; i++) {
313
+ x[i] /= sum;
314
+ }
315
+ }
316
+
317
+ void matmul(float* xout, QuantizedTensor *x, QuantizedTensor *w, int n, int d) {
318
+ // W (d,n) @ x (n,) -> xout (d,)
319
+ // by far the most amount of time is spent inside this little function
320
+ // inputs to this function are both quantized
321
+
322
+ int i;
323
+ #pragma omp parallel for private(i)
324
+ for (i = 0; i < d; i++) {
325
+
326
+ float val = 0.0f;
327
+ int32_t ival = 0;
328
+ int in = i * n;
329
+
330
+ // do the matmul in groups of GS
331
+ int j;
332
+ for (j = 0; j <= n - GS; j += GS) {
333
+ for (int k = 0; k < GS; k++) {
334
+ ival += ((int32_t) x->q[j + k]) * ((int32_t) w->q[in + j + k]);
335
+ }
336
+ val += ((float) ival) * w->s[(in + j) / GS] * x->s[j / GS];
337
+ ival = 0;
338
+ }
339
+
340
+ xout[i] = val;
341
+ }
342
+ }
343
+
344
+ float* forward(Transformer* transformer, int token, int pos) {
345
+
346
+ // a few convenience variables
347
+ Config* p = &transformer->config;
348
+ TransformerWeights* w = &transformer->weights;
349
+ RunState* s = &transformer->state;
350
+ float *x = s->x;
351
+ int dim = p->dim;
352
+ int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
353
+ int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery
354
+ int hidden_dim = p->hidden_dim;
355
+ int head_size = dim / p->n_heads;
356
+
357
+ // copy the token embedding into x
358
+ memcpy(x, w->token_embedding_table + token*dim, dim * sizeof(float));
359
+
360
+ // forward all the layers
361
+ for(int l = 0; l < p->n_layers; l++) {
362
+
363
+ // attention rmsnorm
364
+ rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim);
365
+
366
+ // qkv matmuls for this position
367
+ quantize(&s->xq, s->xb, dim);
368
+ matmul(s->q, &s->xq, w->wq + l, dim, dim);
369
+ matmul(s->k, &s->xq, w->wk + l, dim, kv_dim);
370
+ matmul(s->v, &s->xq, w->wv + l, dim, kv_dim);
371
+
372
+ // RoPE relative positional encoding: complex-valued rotate q and k in each head
373
+ for (int i = 0; i < dim; i+=2) {
374
+ int head_dim = i % head_size;
375
+ float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size);
376
+ float val = pos * freq;
377
+ float fcr = cosf(val);
378
+ float fci = sinf(val);
379
+ int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
380
+ for (int v = 0; v < rotn; v++) {
381
+ float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key)
382
+ float v0 = vec[i];
383
+ float v1 = vec[i+1];
384
+ vec[i] = v0 * fcr - v1 * fci;
385
+ vec[i+1] = v0 * fci + v1 * fcr;
386
+ }
387
+ }
388
+
389
+ // save key,value at this time step (pos) to our kv cache
390
+ int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience
391
+ float* key_cache_row = s->key_cache + loff + pos * kv_dim;
392
+ float* value_cache_row = s->value_cache + loff + pos * kv_dim;
393
+ memcpy(key_cache_row, s->k, kv_dim * sizeof(*key_cache_row));
394
+ memcpy(value_cache_row, s->v, kv_dim * sizeof(*value_cache_row));
395
+
396
+ // multihead attention. iterate over all heads
397
+ int h;
398
+ #pragma omp parallel for private(h)
399
+ for (h = 0; h < p->n_heads; h++) {
400
+ // get the query vector for this head
401
+ float* q = s->q + h * head_size;
402
+ // attention scores for this head
403
+ float* att = s->att + h * p->seq_len;
404
+ // iterate over all timesteps, including the current one
405
+ for (int t = 0; t <= pos; t++) {
406
+ // get the key vector for this head and at this timestep
407
+ float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
408
+ // calculate the attention score as the dot product of q and k
409
+ float score = 0.0f;
410
+ for (int i = 0; i < head_size; i++) {
411
+ score += q[i] * k[i];
412
+ }
413
+ score /= sqrtf(head_size);
414
+ // save the score to the attention buffer
415
+ att[t] = score;
416
+ }
417
+
418
+ // softmax the scores to get attention weights, from 0..pos inclusively
419
+ softmax(att, pos + 1);
420
+
421
+ // weighted sum of the values, store back into xb
422
+ float* xb = s->xb + h * head_size;
423
+ memset(xb, 0, head_size * sizeof(float));
424
+ for (int t = 0; t <= pos; t++) {
425
+ // get the value vector for this head and at this timestep
426
+ float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
427
+ // get the attention weight for this timestep
428
+ float a = att[t];
429
+ // accumulate the weighted value into xb
430
+ for (int i = 0; i < head_size; i++) {
431
+ xb[i] += a * v[i];
432
+ }
433
+ }
434
+ }
435
+
436
+ // final matmul to get the output of the attention
437
+ quantize(&s->xq, s->xb, dim);
438
+ matmul(s->xb2, &s->xq, w->wo + l, dim, dim);
439
+
440
+ // residual connection back into x
441
+ for (int i = 0; i < dim; i++) {
442
+ x[i] += s->xb2[i];
443
+ }
444
+
445
+ // ffn rmsnorm
446
+ rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim);
447
+
448
+ // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
449
+ // first calculate self.w1(x) and self.w3(x)
450
+ quantize(&s->xq, s->xb, dim);
451
+ matmul(s->hb, &s->xq, w->w1 + l, dim, hidden_dim);
452
+ matmul(s->hb2, &s->xq, w->w3 + l, dim, hidden_dim);
453
+
454
+ // SwiGLU non-linearity
455
+ for (int i = 0; i < hidden_dim; i++) {
456
+ float val = s->hb[i];
457
+ // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid
458
+ val *= (1.0f / (1.0f + expf(-val)));
459
+ // elementwise multiply with w3(x)
460
+ val *= s->hb2[i];
461
+ s->hb[i] = val;
462
+ }
463
+
464
+ // final matmul to get the output of the ffn
465
+ quantize(&s->hq, s->hb, hidden_dim);
466
+ matmul(s->xb, &s->hq, w->w2 + l, hidden_dim, dim);
467
+
468
+ // residual connection
469
+ for (int i = 0; i < dim; i++) {
470
+ x[i] += s->xb[i];
471
+ }
472
+ }
473
+
474
+ // final rmsnorm
475
+ rmsnorm(x, x, w->rms_final_weight, dim);
476
+
477
+ // classifier into logits
478
+ quantize(&s->xq, x, dim);
479
+ matmul(s->logits, &s->xq, w->wcls, dim, p->vocab_size);
480
+ return s->logits;
481
+ }
482
+
483
+ // ----------------------------------------------------------------------------
484
+ // The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens
485
+
486
+ typedef struct {
487
+ char *str;
488
+ int id;
489
+ } TokenIndex;
490
+
491
+ typedef struct {
492
+ char** vocab;
493
+ float* vocab_scores;
494
+ TokenIndex *sorted_vocab;
495
+ int vocab_size;
496
+ unsigned int max_token_length;
497
+ unsigned char byte_pieces[512]; // stores all single-byte strings
498
+ } Tokenizer;
499
+
500
+ int compare_tokens(const void *a, const void *b) {
501
+ return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str);
502
+ }
503
+
504
+ void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size) {
505
+ // i should have written the vocab_size into the tokenizer file... sigh
506
+ t->vocab_size = vocab_size;
507
+ // malloc space to hold the scores and the strings
508
+ t->vocab = (char**)malloc(vocab_size * sizeof(char*));
509
+ t->vocab_scores = (float*)malloc(vocab_size * sizeof(float));
510
+ t->sorted_vocab = NULL; // initialized lazily
511
+ for (int i = 0; i < 256; i++) {
512
+ t->byte_pieces[i * 2] = (unsigned char)i;
513
+ t->byte_pieces[i * 2 + 1] = '\0';
514
+ }
515
+ // read in the file
516
+ FILE *file = fopen(tokenizer_path, "rb");
517
+ if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); }
518
+ if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
519
+ int len;
520
+ for (int i = 0; i < vocab_size; i++) {
521
+ if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);}
522
+ if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
523
+ t->vocab[i] = (char *)malloc(len + 1);
524
+ if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); }
525
+ t->vocab[i][len] = '\0'; // add the string terminating token
526
+ }
527
+ fclose(file);
528
+ }
529
+
530
+ void free_tokenizer(Tokenizer* t) {
531
+ for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); }
532
+ free(t->vocab);
533
+ free(t->vocab_scores);
534
+ free(t->sorted_vocab);
535
+ }
536
+
537
+ char* decode(Tokenizer* t, int prev_token, int token) {
538
+ char *piece = t->vocab[token];
539
+ // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89)
540
+ if (prev_token == 1 && piece[0] == ' ') { piece++; }
541
+ // careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
542
+ // parse this and convert and return the actual byte
543
+ unsigned char byte_val;
544
+ if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) {
545
+ piece = (char*)t->byte_pieces + byte_val * 2;
546
+ }
547
+ return piece;
548
+ }
549
+
550
+ void safe_printf(char *piece) {
551
+ // piece might be a raw byte token, and we only want to print printable chars or whitespace
552
+ // because some of the other bytes can be various control codes, backspace, etc.
553
+ if (piece == NULL) { return; }
554
+ if (piece[0] == '\0') { return; }
555
+ if (piece[1] == '\0') {
556
+ unsigned char byte_val = piece[0];
557
+ if (!(isprint(byte_val) || isspace(byte_val))) {
558
+ return; // bad byte, don't print it
559
+ }
560
+ }
561
+ printf("%s", piece);
562
+ }
563
+
564
+ int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) {
565
+ // efficiently find the perfect match for str in vocab, return its index or -1 if not found
566
+ TokenIndex tok = { .str = str }; // acts as the key to search for
567
+ TokenIndex *res = bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens);
568
+ return res != NULL ? res->id : -1;
569
+ }
570
+
571
+ void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) {
572
+ // encode the string text (input) into an upper-bound preallocated tokens[] array
573
+ // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2)
574
+ if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); }
575
+
576
+ if (t->sorted_vocab == NULL) {
577
+ // lazily malloc and sort the vocabulary
578
+ t->sorted_vocab = malloc(t->vocab_size * sizeof(TokenIndex));
579
+ for (int i = 0; i < t->vocab_size; i++) {
580
+ t->sorted_vocab[i].str = t->vocab[i];
581
+ t->sorted_vocab[i].id = i;
582
+ }
583
+ qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens);
584
+ }
585
+
586
+ // create a temporary buffer that will store merge candidates of always two consecutive tokens
587
+ // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1)
588
+ char* str_buffer = malloc((t->max_token_length*2 +1 +2) * sizeof(char));
589
+ size_t str_len = 0;
590
+
591
+ // start at 0 tokens
592
+ *n_tokens = 0;
593
+
594
+ // add optional BOS (=1) token, if desired
595
+ if (bos) tokens[(*n_tokens)++] = 1;
596
+
597
+ // add_dummy_prefix is true by default
598
+ // so prepend a dummy prefix token to the input string, but only if text != ""
599
+ // TODO: pretty sure this isn't correct in the general case but I don't have the
600
+ // energy to read more of the sentencepiece code to figure out what it's doing
601
+ if (text[0] != '\0') {
602
+ int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);
603
+ tokens[(*n_tokens)++] = dummy_prefix;
604
+ }
605
+
606
+ // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:
607
+ // Code point ↔ UTF-8 conversion
608
+ // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4
609
+ // U+0000 U+007F 0xxxxxxx
610
+ // U+0080 U+07FF 110xxxxx 10xxxxxx
611
+ // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx
612
+ // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
613
+
614
+ // process the raw (UTF-8) byte sequence of the input string
615
+ for (char *c = text; *c != '\0'; c++) {
616
+
617
+ // reset buffer if the current byte is ASCII or a leading byte
618
+ // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest
619
+ // 0x80 is 10000000
620
+ // in UTF-8, all continuation bytes start with "10" in first two bits
621
+ // so in English this is: "if this byte is not a continuation byte"
622
+ if ((*c & 0xC0) != 0x80) {
623
+ // this byte must be either a leading byte (11...) or an ASCII char (0x...)
624
+ // => reset our location, as we're starting a new UTF-8 codepoint
625
+ str_len = 0;
626
+ }
627
+
628
+ // append the current byte to the buffer
629
+ str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line
630
+ str_buffer[str_len] = '\0';
631
+
632
+ // while the next character is a continuation byte, continue appending
633
+ // but if there are too many of them, just stop to avoid overruning str_buffer size.
634
+ if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {
635
+ continue;
636
+ }
637
+
638
+ // ok c+1 is not a continuation byte, so we've read in a full codepoint
639
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
640
+
641
+ if (id != -1) {
642
+ // we found this codepoint in vocab, add it as a token
643
+ tokens[(*n_tokens)++] = id;
644
+ } else {
645
+ // byte_fallback encoding: just encode each byte as a token
646
+ // +3 is here because the first 3 vocab elements are <unk>, <s>, </s>
647
+ // so the individual bytes only start at index 3
648
+ for (int i=0; i < str_len; i++) {
649
+ tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;
650
+ }
651
+ }
652
+ str_len = 0; // protect against a sequence of stray UTF8 continuation bytes
653
+ }
654
+
655
+ // merge the best consecutive pair each iteration, according the scores in vocab_scores
656
+ while (1) {
657
+ float best_score = -1e10;
658
+ int best_id = -1;
659
+ int best_idx = -1;
660
+
661
+ for (int i=0; i < (*n_tokens-1); i++) {
662
+ // check if we can merge the pair (tokens[i], tokens[i+1])
663
+ sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]);
664
+ int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);
665
+ if (id != -1 && t->vocab_scores[id] > best_score) {
666
+ // this merge pair exists in vocab! record its score and position
667
+ best_score = t->vocab_scores[id];
668
+ best_id = id;
669
+ best_idx = i;
670
+ }
671
+ }
672
+
673
+ if (best_idx == -1) {
674
+ break; // we couldn't find any more pairs to merge, so we're done
675
+ }
676
+
677
+ // merge the consecutive pair (best_idx, best_idx+1) into new token best_id
678
+ tokens[best_idx] = best_id;
679
+ // delete token at position best_idx+1, shift the entire sequence back 1
680
+ for (int i = best_idx+1; i < (*n_tokens-1); i++) {
681
+ tokens[i] = tokens[i+1];
682
+ }
683
+ (*n_tokens)--; // token length decreased
684
+ }
685
+
686
+ // add optional EOS (=2) token, if desired
687
+ if (eos) tokens[(*n_tokens)++] = 2;
688
+
689
+ free(str_buffer);
690
+ }
691
+
692
+ // ----------------------------------------------------------------------------
693
+ // The Sampler, which takes logits and returns a sampled token
694
+ // sampling can be done in a few ways: greedy argmax, sampling, top-p sampling
695
+
696
+ typedef struct {
697
+ float prob;
698
+ int index;
699
+ } ProbIndex; // struct used when sorting probabilities during top-p sampling
700
+
701
+ typedef struct {
702
+ int vocab_size;
703
+ ProbIndex* probindex; // buffer used in top-p sampling
704
+ float temperature;
705
+ float topp;
706
+ unsigned long long rng_state;
707
+ } Sampler;
708
+
709
+ int sample_argmax(float* probabilities, int n) {
710
+ // return the index that has the highest probability
711
+ int max_i = 0;
712
+ float max_p = probabilities[0];
713
+ for (int i = 1; i < n; i++) {
714
+ if (probabilities[i] > max_p) {
715
+ max_i = i;
716
+ max_p = probabilities[i];
717
+ }
718
+ }
719
+ return max_i;
720
+ }
721
+
722
+ int sample_mult(float* probabilities, int n, float coin) {
723
+ // sample index from probabilities (they must sum to 1!)
724
+ // coin is a random number in [0, 1), usually from random_f32()
725
+ float cdf = 0.0f;
726
+ for (int i = 0; i < n; i++) {
727
+ cdf += probabilities[i];
728
+ if (coin < cdf) {
729
+ return i;
730
+ }
731
+ }
732
+ return n - 1; // in case of rounding errors
733
+ }
734
+
735
+ int compare(const void* a, const void* b) {
736
+ ProbIndex* a_ = (ProbIndex*) a;
737
+ ProbIndex* b_ = (ProbIndex*) b;
738
+ if (a_->prob > b_->prob) return -1;
739
+ if (a_->prob < b_->prob) return 1;
740
+ return 0;
741
+ }
742
+
743
+ int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) {
744
+ // top-p sampling (or "nucleus sampling") samples from the smallest set of
745
+ // tokens that exceed probability topp. This way we never sample tokens that
746
+ // have very low probabilities and are less likely to go "off the rails".
747
+ // coin is a random number in [0, 1), usually from random_f32()
748
+
749
+ int n0 = 0;
750
+ // quicksort indices in descending order of probabilities
751
+ // values smaller than (1 - topp) / (n - 1) cannot be part of the result
752
+ // so for efficiency we crop these out as candidates before sorting
753
+ const float cutoff = (1.0f - topp) / (n - 1);
754
+ for (int i = 0; i < n; i++) {
755
+ if (probabilities[i] >= cutoff) {
756
+ probindex[n0].index = i;
757
+ probindex[n0].prob = probabilities[i];
758
+ n0++;
759
+ }
760
+ }
761
+ qsort(probindex, n0, sizeof(ProbIndex), compare);
762
+
763
+ // truncate the list where cumulative probability exceeds topp
764
+ float cumulative_prob = 0.0f;
765
+ int last_idx = n0 - 1; // in case of rounding errors consider all elements
766
+ for (int i = 0; i < n0; i++) {
767
+ cumulative_prob += probindex[i].prob;
768
+ if (cumulative_prob > topp) {
769
+ last_idx = i;
770
+ break; // we've exceeded topp by including last_idx
771
+ }
772
+ }
773
+
774
+ // sample from the truncated list
775
+ float r = coin * cumulative_prob;
776
+ float cdf = 0.0f;
777
+ for (int i = 0; i <= last_idx; i++) {
778
+ cdf += probindex[i].prob;
779
+ if (r < cdf) {
780
+ return probindex[i].index;
781
+ }
782
+ }
783
+ return probindex[last_idx].index; // in case of rounding errors
784
+ }
785
+
786
+ void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) {
787
+ sampler->vocab_size = vocab_size;
788
+ sampler->temperature = temperature;
789
+ sampler->topp = topp;
790
+ sampler->rng_state = rng_seed;
791
+ // buffer only used with nucleus sampling; may not need but it's ~small
792
+ sampler->probindex = malloc(sampler->vocab_size * sizeof(ProbIndex));
793
+ }
794
+
795
+ void free_sampler(Sampler* sampler) {
796
+ free(sampler->probindex);
797
+ }
798
+
799
+ unsigned int random_u32(unsigned long long *state) {
800
+ // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
801
+ *state ^= *state >> 12;
802
+ *state ^= *state << 25;
803
+ *state ^= *state >> 27;
804
+ return (*state * 0x2545F4914F6CDD1Dull) >> 32;
805
+ }
806
+ float random_f32(unsigned long long *state) { // random float32 in [0,1)
807
+ return (random_u32(state) >> 8) / 16777216.0f;
808
+ }
809
+
810
+ int sample(Sampler* sampler, float* logits) {
811
+ // sample the token given the logits and some hyperparameters
812
+ int next;
813
+ if (sampler->temperature == 0.0f) {
814
+ // greedy argmax sampling: take the token with the highest probability
815
+ next = sample_argmax(logits, sampler->vocab_size);
816
+ } else {
817
+ // apply the temperature to the logits
818
+ for (int q=0; q<sampler->vocab_size; q++) { logits[q] /= sampler->temperature; }
819
+ // apply softmax to the logits to get the probabilities for next token
820
+ softmax(logits, sampler->vocab_size);
821
+ // flip a (float) coin (this is our source of entropy for sampling)
822
+ float coin = random_f32(&sampler->rng_state);
823
+ // we sample from this distribution to get the next token
824
+ if (sampler->topp <= 0 || sampler->topp >= 1) {
825
+ // simply sample from the predicted probability distribution
826
+ next = sample_mult(logits, sampler->vocab_size, coin);
827
+ } else {
828
+ // top-p (nucleus) sampling, clamping the least likely tokens to zero
829
+ next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin);
830
+ }
831
+ }
832
+ return next;
833
+ }
834
+
835
+ // ----------------------------------------------------------------------------
836
+ // utilities: time
837
+
838
+ long time_in_ms() {
839
+ // return time in milliseconds, for benchmarking the model speed
840
+ struct timespec time;
841
+ clock_gettime(CLOCK_REALTIME, &time);
842
+ return time.tv_sec * 1000 + time.tv_nsec / 1000000;
843
+ }
844
+
845
+ // ----------------------------------------------------------------------------
846
+ // generation loop
847
+
848
+ void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, char *prompt, int steps) {
849
+ char *empty_prompt = "";
850
+ if (prompt == NULL) { prompt = empty_prompt; }
851
+
852
+ // encode the (string) prompt into tokens sequence
853
+ int num_prompt_tokens = 0;
854
+ int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS
855
+ encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
856
+ if (num_prompt_tokens < 1) {
857
+ fprintf(stderr, "something is wrong, expected at least 1 prompt token\n");
858
+ exit(EXIT_FAILURE);
859
+ }
860
+
861
+ // start the main loop
862
+ long start = 0; // used to time our code, only initialized after first iteration
863
+ int next; // will store the next token in the sequence
864
+ int token = prompt_tokens[0]; // kick off with the first token in the prompt
865
+ int pos = 0; // position in the sequence
866
+ while (pos < steps) {
867
+
868
+ // forward the transformer to get logits for the next token
869
+ float* logits = forward(transformer, token, pos);
870
+
871
+ // advance the state state machine
872
+ if (pos < num_prompt_tokens - 1) {
873
+ // if we are still processing the input prompt, force the next prompt token
874
+ next = prompt_tokens[pos + 1];
875
+ } else {
876
+ // otherwise sample the next token from the logits
877
+ next = sample(sampler, logits);
878
+ }
879
+ pos++;
880
+
881
+ // data-dependent terminating condition: the BOS (=1) token delimits sequences
882
+ if (next == 1) { break; }
883
+
884
+ // print the token as string, decode it with the Tokenizer object
885
+ char* piece = decode(tokenizer, token, next);
886
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
887
+ fflush(stdout);
888
+ token = next;
889
+
890
+ // init the timer here because the first iteration can be slower
891
+ if (start == 0) { start = time_in_ms(); }
892
+ }
893
+ printf("\n");
894
+
895
+ // report achieved tok/s (pos-1 because the timer starts after first iteration)
896
+ if (pos > 1) {
897
+ long end = time_in_ms();
898
+ fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000);
899
+ }
900
+
901
+ free(prompt_tokens);
902
+ }
903
+
904
+ void read_stdin(const char* guide, char* buffer, size_t bufsize) {
905
+ // read a line from stdin, up to but not including \n
906
+ printf("%s", guide);
907
+ if (fgets(buffer, bufsize, stdin) != NULL) {
908
+ size_t len = strlen(buffer);
909
+ if (len > 0 && buffer[len - 1] == '\n') {
910
+ buffer[len - 1] = '\0'; // strip newline
911
+ }
912
+ }
913
+ }
914
+
915
+ // ----------------------------------------------------------------------------
916
+ // chat loop
917
+ // I manually inspected the tokens for a few chat conversations compared to
918
+ // python reference and that seemed ok, but this was not thoroughly tested and
919
+ // is not safely implemented, it's more a proof of concept atm.
920
+
921
+ void chat(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler,
922
+ char *cli_user_prompt, char *cli_system_prompt, int steps) {
923
+
924
+ // buffers for reading the system prompt and user prompt from stdin
925
+ // you'll notice they are soomewhat haphazardly and unsafely set atm
926
+ char system_prompt[512];
927
+ char user_prompt[512];
928
+ char rendered_prompt[1152];
929
+ int num_prompt_tokens = 0;
930
+ int* prompt_tokens = (int*)malloc(1152 * sizeof(int));
931
+ int user_idx;
932
+
933
+ // start the main loop
934
+ int8_t user_turn = 1; // user starts
935
+ int next; // will store the next token in the sequence
936
+ int token; // stores the current token to feed into the transformer
937
+ int prev_token;
938
+ int pos = 0; // position in the sequence
939
+ while (pos < steps) {
940
+
941
+ // when it is the user's turn to contribute tokens to the dialog...
942
+ if (user_turn) {
943
+ // get the (optional) system prompt at position 0
944
+ if (pos == 0) {
945
+ // at position 0, the user can also contribute a system prompt
946
+ if (cli_system_prompt == NULL) {
947
+ // system prompt was not passed in, attempt to get it from stdin
948
+ read_stdin("Enter system prompt (optional): ", system_prompt, sizeof(system_prompt));
949
+ } else {
950
+ // system prompt was passed in, use it
951
+ strcpy(system_prompt, cli_system_prompt);
952
+ }
953
+ }
954
+ // get the user prompt
955
+ if (pos == 0 && cli_user_prompt != NULL) {
956
+ // user prompt for position 0 was passed in, use it
957
+ strcpy(user_prompt, cli_user_prompt);
958
+ } else {
959
+ // otherwise get user prompt from stdin
960
+ read_stdin("User: ", user_prompt, sizeof(user_prompt));
961
+ }
962
+ // render user/system prompts into the Llama 2 Chat schema
963
+ if (pos == 0 && system_prompt[0] != '\0') {
964
+ char system_template[] = "[INST] <<SYS>>\n%s\n<</SYS>>\n\n%s [/INST]";
965
+ sprintf(rendered_prompt, system_template, system_prompt, user_prompt);
966
+ } else {
967
+ char user_template[] = "[INST] %s [/INST]";
968
+ sprintf(rendered_prompt, user_template, user_prompt);
969
+ }
970
+ // encode the rendered prompt into tokens
971
+ encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
972
+ user_idx = 0; // reset the user index
973
+ user_turn = 0;
974
+ printf("Assistant: ");
975
+ }
976
+
977
+ // determine the token to pass into the transformer next
978
+ if (user_idx < num_prompt_tokens) {
979
+ // if we are still processing the input prompt, force the next prompt token
980
+ token = prompt_tokens[user_idx++];
981
+ } else {
982
+ // otherwise use the next token sampled from previous turn
983
+ token = next;
984
+ }
985
+ // EOS (=2) token ends the Assistant turn
986
+ if (token == 2) { user_turn = 1; }
987
+
988
+ // forward the transformer to get logits for the next token
989
+ float* logits = forward(transformer, token, pos);
990
+ next = sample(sampler, logits);
991
+ pos++;
992
+
993
+ if (user_idx >= num_prompt_tokens && next != 2) {
994
+ // the Assistant is responding, so print its output
995
+ char* piece = decode(tokenizer, token, next);
996
+ safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes
997
+ fflush(stdout);
998
+ }
999
+ if (next == 2) { printf("\n"); }
1000
+ }
1001
+ printf("\n");
1002
+ free(prompt_tokens);
1003
+ }
1004
+
1005
+
1006
+ // ----------------------------------------------------------------------------
1007
+ // CLI, include only if not testing
1008
+ #ifndef TESTING
1009
+
1010
+ void error_usage() {
1011
+ fprintf(stderr, "Usage: run <checkpoint> [options]\n");
1012
+ fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n");
1013
+ fprintf(stderr, "Options:\n");
1014
+ fprintf(stderr, " -t <float> temperature in [0,inf], default 1.0\n");
1015
+ fprintf(stderr, " -p <float> p value in top-p (nucleus) sampling in [0,1] default 0.9\n");
1016
+ fprintf(stderr, " -s <int> random seed, default time(NULL)\n");
1017
+ fprintf(stderr, " -n <int> number of steps to run for, default 256. 0 = max_seq_len\n");
1018
+ fprintf(stderr, " -i <string> input prompt\n");
1019
+ fprintf(stderr, " -z <string> optional path to custom tokenizer\n");
1020
+ fprintf(stderr, " -m <string> mode: generate|chat, default: generate\n");
1021
+ fprintf(stderr, " -y <string> (optional) system prompt in chat mode\n");
1022
+ exit(EXIT_FAILURE);
1023
+ }
1024
+
1025
+ int main(int argc, char *argv[]) {
1026
+
1027
+ // default parameters
1028
+ char *checkpoint_path = NULL; // e.g. out/model.bin
1029
+ char *tokenizer_path = "tokenizer.bin";
1030
+ float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher
1031
+ float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower
1032
+ int steps = 256; // number of steps to run for
1033
+ char *prompt = NULL; // prompt string
1034
+ unsigned long long rng_seed = 0; // seed rng with time by default
1035
+ char *mode = "generate"; // generate|chat
1036
+ char *system_prompt = NULL; // the (optional) system prompt to use in chat mode
1037
+
1038
+ // poor man's C argparse so we can override the defaults above from the command line
1039
+ if (argc >= 2) { checkpoint_path = argv[1]; } else { error_usage(); }
1040
+ for (int i = 2; i < argc; i+=2) {
1041
+ // do some basic validation
1042
+ if (i + 1 >= argc) { error_usage(); } // must have arg after flag
1043
+ if (argv[i][0] != '-') { error_usage(); } // must start with dash
1044
+ if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)
1045
+ // read in the args
1046
+ if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); }
1047
+ else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); }
1048
+ else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); }
1049
+ else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); }
1050
+ else if (argv[i][1] == 'i') { prompt = argv[i + 1]; }
1051
+ else if (argv[i][1] == 'z') { tokenizer_path = argv[i + 1]; }
1052
+ else if (argv[i][1] == 'm') { mode = argv[i + 1]; }
1053
+ else if (argv[i][1] == 'y') { system_prompt = argv[i + 1]; }
1054
+ else { error_usage(); }
1055
+ }
1056
+
1057
+ // parameter validation/overrides
1058
+ if (rng_seed <= 0) rng_seed = (unsigned int)time(NULL);
1059
+ if (temperature < 0.0) temperature = 0.0;
1060
+ if (topp < 0.0 || 1.0 < topp) topp = 0.9;
1061
+ if (steps < 0) steps = 0;
1062
+
1063
+ // build the Transformer via the model .bin file
1064
+ Transformer transformer;
1065
+ build_transformer(&transformer, checkpoint_path);
1066
+ if (steps == 0 || steps > transformer.config.seq_len) steps = transformer.config.seq_len; // override to ~max length
1067
+
1068
+ // build the Tokenizer via the tokenizer .bin file
1069
+ Tokenizer tokenizer;
1070
+ build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size);
1071
+
1072
+ // build the Sampler
1073
+ Sampler sampler;
1074
+ build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, rng_seed);
1075
+
1076
+ // run!
1077
+ if (strcmp(mode, "generate") == 0) {
1078
+ generate(&transformer, &tokenizer, &sampler, prompt, steps);
1079
+ } else if (strcmp(mode, "chat") == 0) {
1080
+ chat(&transformer, &tokenizer, &sampler, prompt, system_prompt, steps);
1081
+ } else {
1082
+ fprintf(stderr, "unknown mode: %s\n", mode);
1083
+ error_usage();
1084
+ }
1085
+
1086
+ // memory and file handles cleanup
1087
+ free_sampler(&sampler);
1088
+ free_tokenizer(&tokenizer);
1089
+ free_transformer(&transformer);
1090
+ return 0;
1091
+ }
1092
+ #endif
Version 2/42M_finetuned/test.c ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TESTING
2
+ #include "run.c"
3
+
4
+ void assert_eq(int a, int b) {
5
+ if (a != b) {
6
+ printf("Assertion failed: %d != %d\n", a, b);
7
+ exit(EXIT_FAILURE);
8
+ }
9
+ }
10
+
11
+ void test_prompt_encoding(Tokenizer* tokenizer, char* prompt, int* expected_tokens, int num_expected_tokens) {
12
+ // encode
13
+ int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int));
14
+ int num_prompt_tokens = 0; // the total number of prompt tokens
15
+ encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);
16
+
17
+ #if VERBOSITY == 1
18
+ // print maybe
19
+ printf("expected tokens:\n");
20
+ for (int i = 0; i < num_expected_tokens; i++) printf("%d ", expected_tokens[i]);
21
+ printf("\n");
22
+ printf("actual tokens:\n");
23
+ for (int i = 0; i < num_prompt_tokens; i++) printf("%d ", prompt_tokens[i]);
24
+ printf("\n");
25
+ #endif
26
+
27
+ // verify
28
+ assert_eq(num_prompt_tokens, num_expected_tokens);
29
+ for (int i = 0; i < num_prompt_tokens; i++) {
30
+ assert_eq(prompt_tokens[i], expected_tokens[i]);
31
+ }
32
+
33
+ #if VERBOSITY == 1
34
+ printf("OK\n");
35
+ printf("---\n");
36
+ #endif
37
+ free(prompt_tokens);
38
+ }
39
+
40
+ void test_prompt_encodings() {
41
+ // let's verify that the Tokenizer works as expected
42
+
43
+ char *tokenizer_path = "tokenizer.bin";
44
+ int vocab_size = 32000;
45
+ Tokenizer tokenizer;
46
+ build_tokenizer(&tokenizer, tokenizer_path, vocab_size);
47
+
48
+ // test 0 (test the empty string) (I added this as a simple case)
49
+ char *prompt0 = "";
50
+ int expected_tokens0[] = {1};
51
+ test_prompt_encoding(&tokenizer, prompt0, expected_tokens0, sizeof(expected_tokens0) / sizeof(int));
52
+
53
+ // the tests below are taken from the Meta Llama 2 repo example code
54
+ // https://github.com/facebookresearch/llama/blob/main/example_text_completion.py
55
+ // and the expected tokens come from me breaking in the debugger in Python
56
+
57
+ // test 1
58
+ char *prompt = "I believe the meaning of life is";
59
+ int expected_tokens[] = {1, 306, 4658, 278, 6593, 310, 2834, 338};
60
+ test_prompt_encoding(&tokenizer, prompt, expected_tokens, sizeof(expected_tokens) / sizeof(int));
61
+
62
+ // test 2
63
+ char* prompt2 = "Simply put, the theory of relativity states that ";
64
+ int expected_tokens2[] = {1, 3439, 17632, 1925, 29892, 278, 6368, 310, 14215, 537, 5922, 393, 29871};
65
+ test_prompt_encoding(&tokenizer, prompt2, expected_tokens2, sizeof(expected_tokens2) / sizeof(int));
66
+
67
+ // test 3
68
+ char* prompt3 = "A brief message congratulating the team on the launch:\n\n Hi everyone,\n\n I just ";
69
+ int expected_tokens3[] = {1, 319, 11473, 2643, 378, 629, 271, 18099, 278, 3815, 373, 278, 6826, 29901, 13, 13, 4706, 6324, 14332, 29892, 13, 13, 4706, 306, 925, 29871};
70
+ test_prompt_encoding(&tokenizer, prompt3, expected_tokens3, sizeof(expected_tokens3) / sizeof(int));
71
+
72
+ // test 4
73
+ char* prompt4 = "Translate English to French:\n\n sea otter => loutre de mer\n peppermint => menthe poivrée\n plush girafe => girafe peluche\n cheese =>";
74
+ int expected_tokens4[] = {1, 4103, 9632, 4223, 304, 5176, 29901, 13, 13, 4706, 7205, 4932, 357, 1149, 301, 449, 276, 316, 2778, 13, 4706, 1236, 407, 837, 524, 1149, 6042, 354, 772, 440, 29878, 1318, 13, 4706, 715, 1878, 330, 3055, 1725, 1149, 330, 3055, 1725, 4639, 28754, 13, 4706, 923, 968, 1149};
75
+ test_prompt_encoding(&tokenizer, prompt4, expected_tokens4, sizeof(expected_tokens4) / sizeof(int));
76
+
77
+ // memory and file handles cleanup
78
+ free_tokenizer(&tokenizer);
79
+ }
80
+
81
+ int main(int argc, char *argv[]) {
82
+ test_prompt_encodings();
83
+ printf("ALL OK\n");
84
+ }
Version 2/42M_finetuned/tokenizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ec2f99b3631bb74d9a839ede430c8b9fc192f06ca470dbca7585c33077908af
3
+ size 259440
Version 2/42M_finetuned/win.c ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "win.h"
2
+ #include <errno.h>
3
+ #include <io.h>
4
+
5
+ #ifndef FILE_MAP_EXECUTE
6
+ #define FILE_MAP_EXECUTE 0x0020
7
+ #endif /* FILE_MAP_EXECUTE */
8
+
9
+ static int __map_mman_error(const uint32_t err, const int deferr)
10
+ {
11
+ if (err == 0)
12
+ return 0;
13
+ //TODO: implement
14
+ return err;
15
+ }
16
+
17
+ static uint32_t __map_mmap_prot_page(const int prot)
18
+ {
19
+ uint32_t protect = 0;
20
+
21
+ if (prot == PROT_NONE)
22
+ return protect;
23
+
24
+ if ((prot & PROT_EXEC) != 0)
25
+ {
26
+ protect = ((prot & PROT_WRITE) != 0) ?
27
+ PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
28
+ }
29
+ else
30
+ {
31
+ protect = ((prot & PROT_WRITE) != 0) ?
32
+ PAGE_READWRITE : PAGE_READONLY;
33
+ }
34
+
35
+ return protect;
36
+ }
37
+
38
+ static uint32_t __map_mmap_prot_file(const int prot)
39
+ {
40
+ uint32_t desiredAccess = 0;
41
+
42
+ if (prot == PROT_NONE)
43
+ return desiredAccess;
44
+
45
+ if ((prot & PROT_READ) != 0)
46
+ desiredAccess |= FILE_MAP_READ;
47
+ if ((prot & PROT_WRITE) != 0)
48
+ desiredAccess |= FILE_MAP_WRITE;
49
+ if ((prot & PROT_EXEC) != 0)
50
+ desiredAccess |= FILE_MAP_EXECUTE;
51
+
52
+ return desiredAccess;
53
+ }
54
+
55
+ void* mmap(void *addr, size_t len, int prot, int flags, int fildes, ssize_t off)
56
+ {
57
+ HANDLE fm, h;
58
+ void * map = MAP_FAILED;
59
+
60
+ #ifdef _MSC_VER
61
+ #pragma warning(push)
62
+ #pragma warning(disable: 4293)
63
+ #endif
64
+
65
+ const uint32_t dwFileOffsetLow = (uint32_t)(off & 0xFFFFFFFFL);
66
+ const uint32_t dwFileOffsetHigh = (uint32_t)((off >> 32) & 0xFFFFFFFFL);
67
+ const uint32_t protect = __map_mmap_prot_page(prot);
68
+ const uint32_t desiredAccess = __map_mmap_prot_file(prot);
69
+
70
+ const ssize_t maxSize = off + (ssize_t)len;
71
+
72
+ const uint32_t dwMaxSizeLow = (uint32_t)(maxSize & 0xFFFFFFFFL);
73
+ const uint32_t dwMaxSizeHigh = (uint32_t)((maxSize >> 32) & 0xFFFFFFFFL);
74
+
75
+ #ifdef _MSC_VER
76
+ #pragma warning(pop)
77
+ #endif
78
+
79
+ errno = 0;
80
+
81
+ if (len == 0
82
+ /* Unsupported flag combinations */
83
+ || (flags & MAP_FIXED) != 0
84
+ /* Unsupported protection combinations */
85
+ || prot == PROT_EXEC)
86
+ {
87
+ errno = EINVAL;
88
+ return MAP_FAILED;
89
+ }
90
+
91
+ h = ((flags & MAP_ANONYMOUS) == 0) ?
92
+ (HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE;
93
+
94
+ if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE)
95
+ {
96
+ errno = EBADF;
97
+ return MAP_FAILED;
98
+ }
99
+
100
+ fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL);
101
+
102
+ if (fm == NULL)
103
+ {
104
+ errno = __map_mman_error(GetLastError(), EPERM);
105
+ return MAP_FAILED;
106
+ }
107
+
108
+ map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len);
109
+
110
+ CloseHandle(fm);
111
+
112
+ if (map == NULL)
113
+ {
114
+ errno = __map_mman_error(GetLastError(), EPERM);
115
+ return MAP_FAILED;
116
+ }
117
+
118
+ return map;
119
+ }
120
+
121
+ int munmap(void *addr, size_t len)
122
+ {
123
+ if (UnmapViewOfFile(addr))
124
+ return 0;
125
+
126
+ errno = __map_mman_error(GetLastError(), EPERM);
127
+
128
+ return -1;
129
+ }
130
+
131
+ int mprotect(void *addr, size_t len, int prot)
132
+ {
133
+ uint32_t newProtect = __map_mmap_prot_page(prot);
134
+ uint32_t oldProtect = 0;
135
+
136
+ if (VirtualProtect(addr, len, newProtect, &oldProtect))
137
+ return 0;
138
+
139
+ errno = __map_mman_error(GetLastError(), EPERM);
140
+
141
+ return -1;
142
+ }
143
+
144
+ int msync(void *addr, size_t len, int flags)
145
+ {
146
+ if (FlushViewOfFile(addr, len))
147
+ return 0;
148
+
149
+ errno = __map_mman_error(GetLastError(), EPERM);
150
+
151
+ return -1;
152
+ }
153
+
154
+ int mlock(const void *addr, size_t len)
155
+ {
156
+ if (VirtualLock((LPVOID)addr, len))
157
+ return 0;
158
+
159
+ errno = __map_mman_error(GetLastError(), EPERM);
160
+
161
+ return -1;
162
+ }
163
+
164
+ int munlock(const void *addr, size_t len)
165
+ {
166
+ if (VirtualUnlock((LPVOID)addr, len))
167
+ return 0;
168
+
169
+ errno = __map_mman_error(GetLastError(), EPERM);
170
+
171
+ return -1;
172
+ }
173
+
174
+ // Portable clock_gettime function for Windows
175
+ int clock_gettime(int clk_id, struct timespec *tp) {
176
+ uint32_t ticks = GetTickCount();
177
+ tp->tv_sec = ticks / 1000;
178
+ tp->tv_nsec = (ticks % 1000) * 1000000;
179
+ return 0;
180
+ }
Version 2/42M_finetuned/win.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef _WIN_H_
2
+ #define _WIN_H_
3
+
4
+ #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
5
+ #include <windows.h>
6
+ #include <time.h>
7
+ #include <stdint.h>
8
+
9
+ #define ssize_t int64_t
10
+ #define ftell _ftelli64
11
+
12
+ // Below code is originally from mman-win32
13
+ //
14
+ /*
15
+ * sys/mman.h
16
+ * mman-win32
17
+ */
18
+
19
+ #ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later.
20
+ #define _WIN32_WINNT 0x0501 // Change this to the appropriate value to target other versions of Windows.
21
+ #endif
22
+
23
+ /* All the headers include this file. */
24
+ #ifndef _MSC_VER
25
+ #include <_mingw.h>
26
+ #endif
27
+
28
+ #include <sys/types.h>
29
+
30
+ #ifdef __cplusplus
31
+ extern "C" {
32
+ #endif
33
+
34
+ #define PROT_NONE 0
35
+ #define PROT_READ 1
36
+ #define PROT_WRITE 2
37
+ #define PROT_EXEC 4
38
+
39
+ #define MAP_FILE 0
40
+ #define MAP_SHARED 1
41
+ #define MAP_PRIVATE 2
42
+ #define MAP_TYPE 0xf
43
+ #define MAP_FIXED 0x10
44
+ #define MAP_ANONYMOUS 0x20
45
+ #define MAP_ANON MAP_ANONYMOUS
46
+
47
+ #define MAP_FAILED ((void *)-1)
48
+
49
+ /* Flags for msync. */
50
+ #define MS_ASYNC 1
51
+ #define MS_SYNC 2
52
+ #define MS_INVALIDATE 4
53
+
54
+ /* Flags for portable clock_gettime call. */
55
+ #define CLOCK_REALTIME 0
56
+
57
+ void* mmap(void *addr, size_t len, int prot, int flags, int fildes, ssize_t off);
58
+ int munmap(void *addr, size_t len);
59
+ int mprotect(void *addr, size_t len, int prot);
60
+ int msync(void *addr, size_t len, int flags);
61
+ int mlock(const void *addr, size_t len);
62
+ int munlock(const void *addr, size_t len);
63
+ int clock_gettime(int clk_id, struct timespec *tp);
64
+
65
+ #ifdef __cplusplus
66
+ };
67
+ #endif
68
+
69
+ #endif /* _WIN_H_ */