Update app.py
Browse files
app.py
CHANGED
|
@@ -6,13 +6,13 @@ from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassificatio
|
|
| 6 |
# ================== КОНФИГУРАЦИЯ ==================
|
| 7 |
# Можно легко добавить новые модели
|
| 8 |
MODELS = {
|
| 9 |
-
"Davlan/xlm-roberta-
|
| 10 |
-
"Babelscape/wikineural-multilingual-ner"
|
| 11 |
-
"
|
| 12 |
}
|
| 13 |
|
| 14 |
# Выбранная модель по умолчанию
|
| 15 |
-
DEFAULT_MODEL = "
|
| 16 |
|
| 17 |
# Цвета для разных типов сущностей (для красивого отображения)
|
| 18 |
ENTITY_COLORS = {
|
|
@@ -29,7 +29,6 @@ ENTITY_COLORS = {
|
|
| 29 |
|
| 30 |
MAX_CHARS = 2000 # ограничение длины текста
|
| 31 |
|
| 32 |
-
# ================== ИНИЦИАЛИЗАЦИЯ ==================
|
| 33 |
def load_model(model_name):
|
| 34 |
"""Загрузка модели и токенизатора"""
|
| 35 |
try:
|
|
@@ -39,8 +38,8 @@ def load_model(model_name):
|
|
| 39 |
"ner",
|
| 40 |
model=model,
|
| 41 |
tokenizer=tokenizer,
|
| 42 |
-
aggregation_strategy="simple",
|
| 43 |
-
device=-1
|
| 44 |
)
|
| 45 |
return nlp_pipeline
|
| 46 |
except Exception as e:
|
|
@@ -55,7 +54,7 @@ except Exception as e:
|
|
| 55 |
pipe = None
|
| 56 |
current_model_name = None
|
| 57 |
|
| 58 |
-
|
| 59 |
def extract_entities(text, model_choice):
|
| 60 |
global pipe, current_model_name
|
| 61 |
|
|
|
|
| 6 |
# ================== КОНФИГУРАЦИЯ ==================
|
| 7 |
# Можно легко добавить новые модели
|
| 8 |
MODELS = {
|
| 9 |
+
"Davlan/xlm-roberta-base-ner-hrl",
|
| 10 |
+
"Babelscape/wikineural-multilingual-ner",
|
| 11 |
+
"CAMeL-Lab/bert-base-arabic-camelbert-mix-ner",
|
| 12 |
}
|
| 13 |
|
| 14 |
# Выбранная модель по умолчанию
|
| 15 |
+
DEFAULT_MODEL = "DDavlan/xlm-roberta-base-ner-hrl"
|
| 16 |
|
| 17 |
# Цвета для разных типов сущностей (для красивого отображения)
|
| 18 |
ENTITY_COLORS = {
|
|
|
|
| 29 |
|
| 30 |
MAX_CHARS = 2000 # ограничение длины текста
|
| 31 |
|
|
|
|
| 32 |
def load_model(model_name):
|
| 33 |
"""Загрузка модели и токенизатора"""
|
| 34 |
try:
|
|
|
|
| 38 |
"ner",
|
| 39 |
model=model,
|
| 40 |
tokenizer=tokenizer,
|
| 41 |
+
aggregation_strategy="simple",
|
| 42 |
+
device=-1
|
| 43 |
)
|
| 44 |
return nlp_pipeline
|
| 45 |
except Exception as e:
|
|
|
|
| 54 |
pipe = None
|
| 55 |
current_model_name = None
|
| 56 |
|
| 57 |
+
|
| 58 |
def extract_entities(text, model_choice):
|
| 59 |
global pipe, current_model_name
|
| 60 |
|