ilintar commited on
Commit
9cd68d5
·
verified ·
1 Parent(s): 81e86ae

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ problem-tensors-weights.gguf filter=lfs diff=lfs merge=lfs -text
problem-tensors-ids.gguf ADDED
Binary file (416 Bytes). View file
 
problem-tensors-norm.gguf ADDED
Binary file (98.5 kB). View file
 
problem-tensors-weights.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:131199d20aea5cd72abb1a288e9615bf145a052bc62a390b0f2cc1f592d8dbf0
3
+ size 519045280
test_problematic_tensors.cpp ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "arg.h"
2
+ #include "common.h"
3
+ #include "log.h"
4
+ #include "llama.h"
5
+ #include "ggml.h"
6
+ #include "gguf.h"
7
+
8
+ #include <cstdio>
9
+ #include <string>
10
+ #include <vector>
11
+ #include <numeric>
12
+ #include <fstream>
13
+
14
+ int main(int argc, char ** argv) {
15
+
16
+ llama_log_set(nullptr, nullptr);
17
+ llama_backend_init();
18
+ ggml_backend_load_all_from_path("build/bin");
19
+
20
+ // Initialize GGML context
21
+ struct ggml_init_params params = {
22
+ /*.mem_size =*/ 10 * ggml_tensor_overhead() + ggml_graph_overhead(),
23
+ /*.mem_buffer =*/ NULL,
24
+ /*.no_alloc =*/ true,
25
+ };
26
+
27
+ ggml_context * gctx = ggml_init(params);
28
+ ggml_context * gctx_cpu = ggml_init(params);
29
+ ggml_context * wctx = nullptr;
30
+ ggml_context * nctx = nullptr;
31
+ ggml_context * ictx = nullptr;
32
+ struct gguf_init_params wparams = {
33
+ /*.no_alloc = */ false,
34
+ /*.ctx = */ &wctx,
35
+ };
36
+ struct gguf_init_params nparams = {
37
+ /*.no_alloc = */ false,
38
+ /*.ctx = */ &nctx,
39
+ };
40
+ struct gguf_init_params iparams = {
41
+ /*.no_alloc = */ false,
42
+ /*.ctx = */ &ictx,
43
+ };
44
+ gguf_context * wgctx = gguf_init_from_file("problem-tensors-weights.gguf", wparams);
45
+ gguf_context * ngctx = gguf_init_from_file("problem-tensors-norm.gguf", nparams);
46
+ gguf_context * igctx = gguf_init_from_file("problem-tensors-ids.gguf", iparams);
47
+
48
+ ggml_tensor * weights = ggml_get_next_tensor(wctx, ggml_get_first_tensor(wctx));
49
+ ggml_tensor * norm = ggml_get_next_tensor(nctx, ggml_get_first_tensor(nctx));
50
+ ggml_tensor * ids = ggml_get_next_tensor(ictx, ggml_get_first_tensor(ictx));
51
+
52
+ ggml_context * gctx_cpu_comp = ggml_init(params);
53
+ struct ggml_cgraph * gf_cpu = ggml_new_graph(gctx_cpu_comp);
54
+ ggml_tensor * mul_mat_id_cpu = ggml_mul_mat_id(gctx_cpu, weights, norm, ids);
55
+ ggml_build_forward_expand(gf_cpu, mul_mat_id_cpu);
56
+
57
+ ggml_backend_t cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
58
+
59
+ ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(cpu));
60
+ ggml_gallocr_alloc_graph(allocr, gf_cpu);
61
+
62
+ ggml_backend_graph_compute(cpu, gf_cpu);
63
+
64
+ double sum_cpu = 0.0f;
65
+ float max_cpu = ((float *) mul_mat_id_cpu->data)[0];
66
+ float min_cpu = ((float *) mul_mat_id_cpu->data)[0];
67
+ for (uint64_t i = 0; i < ggml_nelements(mul_mat_id_cpu); i++) {
68
+ float elt = ((float *) mul_mat_id_cpu->data)[i];
69
+ sum_cpu += elt;
70
+ max_cpu = elt > max_cpu ? elt : max_cpu;
71
+ min_cpu = elt < min_cpu ? elt : min_cpu;
72
+ }
73
+ printf("\n CPU sum of matmul: %.8f, max: %.8f, min: %.8f, nelements: %lu\n\n", sum_cpu, max_cpu, min_cpu, ggml_nelements(mul_mat_id_cpu));
74
+
75
+ struct ggml_cgraph * gf = ggml_new_graph(gctx);
76
+
77
+ ggml_tensor * w_cuda = ggml_new_tensor_4d(gctx, weights->type, weights->ne[0], weights->ne[1], weights->ne[2], weights->ne[3]);
78
+ ggml_tensor * n_cuda = ggml_new_tensor_4d(gctx, norm->type, norm->ne[0], norm->ne[1], norm->ne[2], norm->ne[3]);
79
+ ggml_tensor * i_cuda = ggml_new_tensor_4d(gctx, ids->type, ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3]);
80
+
81
+ ggml_backend_t cuda = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
82
+ ggml_backend_alloc_ctx_tensors(gctx, cuda);
83
+
84
+ ggml_backend_tensor_set(w_cuda, weights->data, 0, ggml_nbytes(w_cuda));
85
+ ggml_backend_tensor_set(n_cuda, norm->data, 0, ggml_nbytes(n_cuda));
86
+ ggml_backend_tensor_set(i_cuda, ids->data, 0, ggml_nbytes(i_cuda));
87
+
88
+ ggml_context * gctx_cuda_comp = ggml_init(params);
89
+ struct ggml_cgraph * gf_cuda = ggml_new_graph(gctx_cuda_comp);
90
+ ggml_tensor * mul_mat_id_cuda = ggml_mul_mat_id(gctx_cuda_comp, w_cuda, n_cuda, i_cuda);
91
+ ggml_build_forward_expand(gf_cuda, mul_mat_id_cuda);
92
+
93
+ ggml_gallocr_t cuda_allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(cuda));
94
+ ggml_gallocr_alloc_graph(cuda_allocr, gf_cuda);
95
+
96
+ ggml_backend_graph_compute(cuda, gf_cuda);
97
+
98
+ std::vector<float> vec;
99
+
100
+ auto n_bytes = ggml_nbytes(mul_mat_id_cuda);
101
+ vec.resize(n_bytes);
102
+ ggml_backend_tensor_get(mul_mat_id_cuda, vec.data(), 0, n_bytes);
103
+ double sum = 0.0f;
104
+ float max = vec[0];
105
+ float min = vec[0];
106
+ float maxdiff = 0;
107
+ uint64_t maxdiff_pos = -1;
108
+ for (uint64_t i = 0; i < ggml_nelements(mul_mat_id_cuda); i++) {
109
+ float elt = vec[i];
110
+ float org_elt = ((float *) mul_mat_id_cpu->data)[i];
111
+ float diff = fabs(elt - org_elt);
112
+ if (diff > maxdiff) {
113
+ maxdiff = diff;
114
+ maxdiff_pos = i;
115
+ }
116
+ sum += elt;
117
+ max = elt > max ? elt : max;
118
+ min = elt < min ? elt : min;
119
+ }
120
+ printf("\n CUDA sum of matmul: %.8f, max: %.8f, min: %.8f, max diff: %.8f at pos %lu, nelements: %lu\n\n", sum, max, min, maxdiff, maxdiff_pos, ggml_nelements(mul_mat_id_cuda));
121
+ return 0;
122
+ }