Spaces:
Sleeping
Sleeping
add lab model
Browse filesAdd lab model spaces
app.py
CHANGED
|
@@ -20,6 +20,8 @@ import base64
|
|
| 20 |
import json
|
| 21 |
import os
|
| 22 |
import re
|
|
|
|
|
|
|
| 23 |
|
| 24 |
@st.cache_resource
|
| 25 |
def load_sp():
|
|
@@ -146,12 +148,19 @@ def load_model(model_name, file_name):
|
|
| 146 |
model = fasttext.load_model(model_path)
|
| 147 |
return model
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
model_1 = load_model(constants.MODEL_NAME, "model_v1.bin")
|
| 151 |
model_2 = load_model(constants.MODEL_NAME, "model_v2.bin")
|
| 152 |
model_3 = load_model(constants.MODEL_NAME, "model_v3.bin")
|
| 153 |
openlid = load_model('laurievb/OpenLID', "model.bin")
|
| 154 |
nllb = load_model('facebook/fasttext-language-identification', "model.bin")
|
|
|
|
| 155 |
|
| 156 |
|
| 157 |
# @st.cache_resource
|
|
@@ -196,8 +205,10 @@ def compute(sentences, version = 'v3'):
|
|
| 196 |
model_choice = model_3
|
| 197 |
elif version == 'v2':
|
| 198 |
model_choice = model_2
|
| 199 |
-
|
| 200 |
model_choice = model_1
|
|
|
|
|
|
|
| 201 |
|
| 202 |
my_bar = st.progress(0, text=progress_text)
|
| 203 |
|
|
@@ -257,8 +268,8 @@ with tab1:
|
|
| 257 |
|
| 258 |
version = st.radio(
|
| 259 |
"Choose model",
|
| 260 |
-
["nllb-218", "openlid-201", "v1", "v2", "v3"],
|
| 261 |
-
captions=["NLLB", "OpenLID", "GlotLID version 1", "GlotLID version 2", "GlotLID version 3 (More languages, better quality data)"],
|
| 262 |
index = 4,
|
| 263 |
key = 'version_tab1',
|
| 264 |
horizontal = True
|
|
@@ -296,8 +307,8 @@ with tab2:
|
|
| 296 |
|
| 297 |
version = st.radio(
|
| 298 |
"Choose model",
|
| 299 |
-
["nllb-218", "openlid-201", "v1", "v2", "v3"],
|
| 300 |
-
captions=["NLLB", "OpenLID", "GlotLID version 1", "GlotLID version 2 (more data and languages), GlotLID version 3 (More languages, better quality data)"],
|
| 301 |
index = 4,
|
| 302 |
key = 'version_tab2',
|
| 303 |
horizontal = True
|
|
|
|
| 20 |
import json
|
| 21 |
import os
|
| 22 |
import re
|
| 23 |
+
import transformers
|
| 24 |
+
from transformers import pipeline
|
| 25 |
|
| 26 |
@st.cache_resource
|
| 27 |
def load_sp():
|
|
|
|
| 148 |
model = fasttext.load_model(model_path)
|
| 149 |
return model
|
| 150 |
|
| 151 |
+
@st.cache_resource
|
| 152 |
+
def load_model_pipeline(model_name, file_name):
|
| 153 |
+
model = pipeline("text-classification", model=model_name)
|
| 154 |
+
return model
|
| 155 |
+
|
| 156 |
+
|
| 157 |
|
| 158 |
model_1 = load_model(constants.MODEL_NAME, "model_v1.bin")
|
| 159 |
model_2 = load_model(constants.MODEL_NAME, "model_v2.bin")
|
| 160 |
model_3 = load_model(constants.MODEL_NAME, "model_v3.bin")
|
| 161 |
openlid = load_model('laurievb/OpenLID', "model.bin")
|
| 162 |
nllb = load_model('facebook/fasttext-language-identification', "model.bin")
|
| 163 |
+
za_lid = load_model('dsfsi/za-lid-bert', "model.bin")
|
| 164 |
|
| 165 |
|
| 166 |
# @st.cache_resource
|
|
|
|
| 205 |
model_choice = model_3
|
| 206 |
elif version == 'v2':
|
| 207 |
model_choice = model_2
|
| 208 |
+
elif:
|
| 209 |
model_choice = model_1
|
| 210 |
+
else:
|
| 211 |
+
model_choice = za_lid
|
| 212 |
|
| 213 |
my_bar = st.progress(0, text=progress_text)
|
| 214 |
|
|
|
|
| 268 |
|
| 269 |
version = st.radio(
|
| 270 |
"Choose model",
|
| 271 |
+
["nllb-218", "openlid-201", "v1", "v2", "v3", "Za_LID"],
|
| 272 |
+
captions=["NLLB", "OpenLID", "GlotLID version 1", "GlotLID version 2", "GlotLID version 3 (More languages, better quality data)", "za BERT"],
|
| 273 |
index = 4,
|
| 274 |
key = 'version_tab1',
|
| 275 |
horizontal = True
|
|
|
|
| 307 |
|
| 308 |
version = st.radio(
|
| 309 |
"Choose model",
|
| 310 |
+
["nllb-218", "openlid-201", "v1", "v2", "v3", "Za_LID"],
|
| 311 |
+
captions=["NLLB", "OpenLID", "GlotLID version 1", "GlotLID version 2 (more data and languages), GlotLID version 3 (More languages, better quality data)", "ZA BERT"],
|
| 312 |
index = 4,
|
| 313 |
key = 'version_tab2',
|
| 314 |
horizontal = True
|