Jamie@TitanML commited on
Commit
e1fc4da
·
1 Parent(s): 06a905b

Upload ggml_cache.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. ggml_cache.json +140 -0
ggml_cache.json ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "llama-2-7b": {
3
+ "repo": "TheBloke/Llama-2-7B-GGML",
4
+ "filename": "llama-2-7b.ggmlv3.q4_0.bin",
5
+ "model_type": "llama",
6
+ "tokenizer": "llama-local"
7
+ },
8
+ "llama-2-7b-chat": {
9
+ "repo": "TheBloke/Llama-2-7B-Chat-GGML",
10
+ "filename": "llama-2-7b-chat.ggmlv3.q4_0.bin",
11
+ "model_type": "llama",
12
+ "tokenizer": "llama-local"
13
+ },
14
+ "llama-2-7b-qlora": {
15
+ "repo": "TheBloke/llama-2-7B-Guanaco-QLoRA-GGML",
16
+ "filename": "llama-2-7b-guanaco-qlora.ggmlv3.q4_0.bin",
17
+ "model_type": "llama",
18
+ "tokenizer": "llama-local"
19
+ },
20
+ "llama-2-13b": {
21
+ "repo": "TheBloke/Llama-2-13B-GGML",
22
+ "filename": "llama-2-13b.ggmlv3.q4_0.bin",
23
+ "model_type": "llama",
24
+ "tokenizer": "llama-local"
25
+ },
26
+ "llama-2-13b-chat": {
27
+ "repo": "TheBloke/Llama-2-13B-Chat-GGML",
28
+ "filename": "llama-2-13b-chat.ggmlv3.q4_0.bin",
29
+ "model_type": "llama",
30
+ "tokenizer": "llama-local"
31
+ },
32
+ "llama-2-70b": {
33
+ "repo": "TheBloke/Llama-2-70B-GGML",
34
+ "filename": "llama-2-70b.ggmlv3.q4_0.bin",
35
+ "model_type": "llama",
36
+ "tokenizer": "llama-local"
37
+ },
38
+ "llama-2-70b-chat": {
39
+ "repo": "TheBloke/Llama-2-70B-Chat-GGML",
40
+ "filename": "llama-2-70b-chat.ggmlv3.q4_0.bin",
41
+ "model_type": "llama",
42
+ "tokenizer": "llama-local"
43
+ },
44
+ "llama-2-13b-qlora": {
45
+ "repo": "TheBloke/llama-2-13B-Guanaco-QLoRA-GGML",
46
+ "filename": "llama-2-13b-guanaco-qlora.ggmlv3.q4_0.bin",
47
+ "model_type": "llama",
48
+ "tokenizer": "llama-local"
49
+ },
50
+ "falcon-7b": {
51
+ "repo": "TheBloke/falcon-7b-instruct-GGML",
52
+ "filename": "falcon-7b-instruct.ggccv1.q4_0.bin",
53
+ "model_type": "falcon",
54
+ "tokenizer": "tiiuae/falcon-7b"
55
+ },
56
+ "falcon-40b": {
57
+ "repo": "TheBloke/falcon-40b-instruct-GGML",
58
+ "filename": "falcon-40b-instruct.ggccv1.q4_0.bin",
59
+ "model_type": "falcon",
60
+ "tokenizer": "tiiuae/falcon-7b"
61
+ },
62
+ "gpt2": {
63
+ "repo": "marella/gpt-2-ggml",
64
+ "filename": "ggml-model.bin",
65
+ "model_type": "gpt2",
66
+ "tokenizer": "gpt2"
67
+ },
68
+ "llama-2-70b-chat-q2": {
69
+ "repo": "TheBloke/Llama-2-70B-Chat-GGML",
70
+ "filename": "llama-2-70b-chat.ggmlv3.q2_K.bin",
71
+ "model_type": "llama",
72
+ "tokenizer": "llama-local"
73
+ },
74
+ "llama-2-70b-q2": {
75
+ "repo": "TheBloke/Llama-2-70B-GGML",
76
+ "filename": "llama-2-70b.ggmlv3.q2_K.bin",
77
+ "model_type": "llama",
78
+ "tokenizer": "llama-local"
79
+ },
80
+ "llama-2-70b-q3_k_s": {
81
+ "repo": "TheBloke/Llama-2-70B-GGML",
82
+ "filename": "llama-2-70b.ggmlv3.q3_K_S.bin",
83
+ "model_type": "llama",
84
+ "tokenizer": "llama-local"
85
+ },
86
+ "llama-2-70b-q3_k_m": {
87
+ "repo": "TheBloke/Llama-2-70B-GGML",
88
+ "filename": "llama-2-70b.ggmlv3.q3_K_M.bin",
89
+ "model_type": "llama",
90
+ "tokenizer": "llama-local"
91
+ },
92
+ "llama-2-70b-q3_k_l": {
93
+ "repo": "TheBloke/Llama-2-70B-GGML",
94
+ "filename": "llama-2-70b.ggmlv3.q3_K_L.bin",
95
+ "model_type": "llama",
96
+ "tokenizer": "llama-local"
97
+ },
98
+ "llama-2-70b-q4_0": {
99
+ "repo": "TheBloke/Llama-2-70B-GGML",
100
+ "filename": "llama-2-70b.ggmlv3.q4_0.bin",
101
+ "model_type": "llama",
102
+ "tokenizer": "llama-local"
103
+ },
104
+ "llama-2-70b-q4_1": {
105
+ "repo": "TheBloke/Llama-2-70B-GGML",
106
+ "filename": "llama-2-70b.ggmlv3.q4_1.bin",
107
+ "model_type": "llama",
108
+ "tokenizer": "llama-local"
109
+ },
110
+ "llama-2-70b-q4_k_m": {
111
+ "repo": "TheBloke/Llama-2-70B-GGML",
112
+ "filename": "llama-2-70b.ggmlv3.q4_K_M.bin",
113
+ "model_type": "llama",
114
+ "tokenizer": "llama-local"
115
+ },
116
+ "llama-2-70b-q4_k_s": {
117
+ "repo": "TheBloke/Llama-2-70B-GGML",
118
+ "filename": "llama-2-70b.ggmlv3.q4_K_S.bin",
119
+ "model_type": "llama",
120
+ "tokenizer": "llama-local"
121
+ },
122
+ "llama-2-70b-q5_0": {
123
+ "repo": "TheBloke/Llama-2-70B-GGML",
124
+ "filename": "llama-2-70b.ggmlv3.q5_0.bin",
125
+ "model_type": "llama",
126
+ "tokenizer": "llama-local"
127
+ },
128
+ "llama-2-70b-q5_k_m": {
129
+ "repo": "TheBloke/Llama-2-70B-GGML",
130
+ "filename": "llama-2-70b.ggmlv3.q5_K_M.bin",
131
+ "model_type": "llama",
132
+ "tokenizer": "llama-local"
133
+ },
134
+ "llama-2-70b-q5_k_s": {
135
+ "repo": "TheBloke/Llama-2-70B-GGML",
136
+ "filename": "llama-2-70b.ggmlv3.q5_K_S.bin",
137
+ "model_type": "llama",
138
+ "tokenizer": "llama-local"
139
+ }
140
+ }