Spaces:
Running
Running
Commit
Β·
506e239
1
Parent(s):
619c9ac
Add dolma index
Browse files- app.py +2 -1
- constants.py +17 -6
app.py
CHANGED
|
@@ -151,9 +151,10 @@ with gr.Blocks() as demo:
|
|
| 151 |
gr.HTML(
|
| 152 |
'''<h1 text-align="center">Infini-gram: An Engine for n-gram / β-gram Language Modeling with Trillion-Token Corpora</h1>
|
| 153 |
|
| 154 |
-
<p style='font-size: 16px;'>This is an engine that processes n-gram / β-gram queries on massive text corpora. Please first select the corpus and the type of query, then enter your query and submit
|
| 155 |
<p style='font-size: 16px;'>The engine is developed by <a href="https://liujch1998.github.io">Jiacheng (Gary) Liu</a> and documented in our paper: <a href="https://huggingface.co/papers/2401.17377">Infini-gram: Scaling Unbounded n-gram Language Models to a Trillion Tokens</a>.</p>
|
| 156 |
<p style='font-size: 16px;'><b>API Endpoint:</b> If you'd like to issue batch queries to infini-gram, you may invoke our API endpoint. Please refer to the <a href="https://infini-gram.io/api_doc">API documentation</a>.</p>
|
|
|
|
| 157 |
'''
|
| 158 |
)
|
| 159 |
with gr.Row():
|
|
|
|
| 151 |
gr.HTML(
|
| 152 |
'''<h1 text-align="center">Infini-gram: An Engine for n-gram / β-gram Language Modeling with Trillion-Token Corpora</h1>
|
| 153 |
|
| 154 |
+
<p style='font-size: 16px;'>This is an engine that processes n-gram / β-gram queries on massive text corpora. Please first select the corpus and the type of query, then enter your query and submit.</p>
|
| 155 |
<p style='font-size: 16px;'>The engine is developed by <a href="https://liujch1998.github.io">Jiacheng (Gary) Liu</a> and documented in our paper: <a href="https://huggingface.co/papers/2401.17377">Infini-gram: Scaling Unbounded n-gram Language Models to a Trillion Tokens</a>.</p>
|
| 156 |
<p style='font-size: 16px;'><b>API Endpoint:</b> If you'd like to issue batch queries to infini-gram, you may invoke our API endpoint. Please refer to the <a href="https://infini-gram.io/api_doc">API documentation</a>.</p>
|
| 157 |
+
<p style='font-size: 16px;'><b>Note:</b> The query is <b>case-sensitive</b>. Your query will be tokenized with the Llama-2 tokenizer (unless otherwise specified).</p>
|
| 158 |
'''
|
| 159 |
)
|
| 160 |
with gr.Row():
|
constants.py
CHANGED
|
@@ -2,12 +2,23 @@ import os
|
|
| 2 |
|
| 3 |
# options
|
| 4 |
CORPUS_BY_DESC = {
|
| 5 |
-
'
|
| 6 |
-
'
|
| 7 |
-
'
|
| 8 |
-
'
|
| 9 |
-
'Pile-val (
|
| 10 |
-
'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
}
|
| 12 |
CORPUS_DESCS = list(CORPUS_BY_DESC.keys())
|
| 13 |
ENGINE_BY_DESC = {
|
|
|
|
| 2 |
|
| 3 |
# options
|
| 4 |
CORPUS_BY_DESC = {
|
| 5 |
+
'Dolma (3.1T tokens)': 'v4_dolma-v1_6_llama',
|
| 6 |
+
'RedPajama (1.4T tokens)': 'v4_rpj_llama_s4',
|
| 7 |
+
'Pile-train (380B tokens)': 'v4_piletrain_llama',
|
| 8 |
+
'C4-train (200B tokens)': 'v4_c4train_llama',
|
| 9 |
+
'Pile-val (390M tokens)': 'v4_pileval_llama',
|
| 10 |
+
# 'Pile-val (GPT-2 tokenizer), 380M tokens': 'v4_pileval_gpt2',
|
| 11 |
+
# 'Dolma-v1.6-sample (OLMo tokenizer), 8.0B tokens': 'v4_dolmasample_olmo',
|
| 12 |
+
# 'Dolma-v1.6-sample (9.2B tokens)': 'v4_dolma-v1_6-sample_llama',
|
| 13 |
+
# 'Dolma-v1.6-wiki (4.3B tokens)': 'v4_dolma-v1_6-wiki_llama',
|
| 14 |
+
# 'Dolma-v1.6-books (5.8B tokens)': 'v4_dolma-v1_6-books_llama',
|
| 15 |
+
# 'Dolma-v1.6-pes2o (69B tokens)': 'v4_dolma-v1_6-pes2o_llama',
|
| 16 |
+
# 'Dolma-v1.6-reddit (89B tokens)': 'v4_dolma-v1_6-reddit_llama',
|
| 17 |
+
# 'Dolma-v1.6-c4 (200B tokens)': 'v4_dolma-v1_6-c4_llama',
|
| 18 |
+
# 'Dolma-v1.6-stack (420B tokens)': 'v4_dolma-v1_6-stack_llama',
|
| 19 |
+
# 'Dolma-v1.6-cc_en_head (660B tokens): 'v4_dolma-v1_6-cc_en_head_llama',
|
| 20 |
+
# 'Dolma-v1.6-cc_en_middle (650B tokens): 'v4_dolma-v1_6-cc_en_middle_llama',
|
| 21 |
+
# 'Dolma-v1.6-cc_en_tail (970B tokens): 'v4_dolma-v1_6-cc_en_tail_llama',
|
| 22 |
}
|
| 23 |
CORPUS_DESCS = list(CORPUS_BY_DESC.keys())
|
| 24 |
ENGINE_BY_DESC = {
|