Spaces:
Build error
Build error
Добавлена основная часть кода.
Browse files
app.py
CHANGED
|
@@ -3,15 +3,25 @@ import streamlit as st
|
|
| 3 |
st.markdown("""### TL;DR: give me the keywords!
|
| 4 |
Here you can get the keywords and topic of the article based on it's title or abstract.""")
|
| 5 |
|
| 6 |
-
st.markdown("<p style=\"text-align:center\"><img width=
|
| 7 |
|
|
|
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
pipe = pipeline("ner", "Davlan/distilbert-base-multilingual-cased-ner-hrl")
|
| 12 |
|
| 13 |
#st.markdown("#### Title:")
|
| 14 |
title = st.text_area("Title:")
|
| 15 |
abstract = st.text_area("abstract:")
|
| 16 |
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
st.markdown("""### TL;DR: give me the keywords!
|
| 4 |
Here you can get the keywords and topic of the article based on it's title or abstract.""")
|
| 5 |
|
| 6 |
+
st.markdown("<p style=\"text-align:center\"><img width=700px src='https://c.tenor.com/IKt-6tAk9CUAAAAd/thats-a-lot-of-words-lots-of-words.gif'></p>", unsafe_allow_html=True)
|
| 7 |
|
| 8 |
+
#from transformers import pipeline
|
| 9 |
|
| 10 |
+
#pipe = pipeline("ner", "Davlan/distilbert-base-multilingual-cased-ner-hrl")
|
|
|
|
|
|
|
| 11 |
|
| 12 |
#st.markdown("#### Title:")
|
| 13 |
title = st.text_area("Title:")
|
| 14 |
abstract = st.text_area("abstract:")
|
| 15 |
|
| 16 |
+
|
| 17 |
+
import .utils
|
| 18 |
+
import spacy
|
| 19 |
+
|
| 20 |
+
# Вообще, стоит найти pipeline, заточенный под научный текст.
|
| 21 |
+
# Но этим займёмся потом, если будет время.
|
| 22 |
+
main_nlp = spacy.load('en_core_web_sm')
|
| 23 |
+
|
| 24 |
+
text = title + abstract
|
| 25 |
+
#text = preprocess(text)
|
| 26 |
+
|
| 27 |
+
st.markdown(f"{get_candidates(text, main_nlp)}")
|
utils.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from sklearn.feature_extraction.text import CountVectorizer
|
| 3 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 4 |
+
from sklearn.metrics.pairwise import euclidean_distances
|
| 5 |
+
from scipy.special import softmax
|
| 6 |
+
|
| 7 |
+
def preprocess(strings):
|
| 8 |
+
"""
|
| 9 |
+
Заменить символы '\n' на пробелы и убрать лишние пробелы.
|
| 10 |
+
|
| 11 |
+
strings - список строк.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
for index in range(len(strings)):
|
| 15 |
+
strings[index] = strings[index].replace('\n', ' ')
|
| 16 |
+
strings[index] = re.sub(' +', ' ', strings[index])
|
| 17 |
+
|
| 18 |
+
return strings
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def get_candidates(text, nlp, min_df=0.0, ngram_range=(1, 3), max_words=None):
|
| 22 |
+
"""
|
| 23 |
+
Получить список из max(max_words, #слов в text) кандидатов в ключевые слова.
|
| 24 |
+
|
| 25 |
+
text - входной текст.
|
| 26 |
+
nlp - инструмент для анализа языка (см. spacy)
|
| 27 |
+
min_df - минимальная частота вхождения слова в текст.
|
| 28 |
+
ngram_range - число грам в ключевом слове.
|
| 29 |
+
max_words - максимальное число слов на выходе.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
# Получим самый базовый набор грам.
|
| 33 |
+
count = CountVectorizer(ngram_range=ngram_range,
|
| 34 |
+
stop_words="english",
|
| 35 |
+
min_df=min_df,
|
| 36 |
+
max_features=max_words).fit([text])
|
| 37 |
+
candidates = count.get_feature_names()
|
| 38 |
+
#print(candidates)
|
| 39 |
+
|
| 40 |
+
# Обработаем полученный список.
|
| 41 |
+
nlp_result = nlp(text)
|
| 42 |
+
|
| 43 |
+
# Фразы, содержащие существительные.
|
| 44 |
+
noun_phrases = set(chunk.text.strip().lower() for chunk in nlp_result.noun_chunks)
|
| 45 |
+
#print(noun_phrases)
|
| 46 |
+
|
| 47 |
+
# Отдельно существительные.
|
| 48 |
+
noun_lemmas = set()
|
| 49 |
+
for token in nlp_result:
|
| 50 |
+
if token.pos_ == "NOUN":
|
| 51 |
+
noun_lemmas.add(token.lemma_) # Для одного слова всё-таки бессмысленно хранить форму.
|
| 52 |
+
#print(noun_lemmas)
|
| 53 |
+
|
| 54 |
+
nouns = set()
|
| 55 |
+
for token in nlp_result:
|
| 56 |
+
if token.pos_ == "NOUN" and not (token.text in noun_lemmas):
|
| 57 |
+
nouns.add(token.text)
|
| 58 |
+
#print(nouns)
|
| 59 |
+
nouns = nouns.union(noun_lemmas)
|
| 60 |
+
|
| 61 |
+
# Объединение.
|
| 62 |
+
with_nouns = nouns.union(noun_phrases)
|
| 63 |
+
|
| 64 |
+
# Отфильтровывание.
|
| 65 |
+
candidates = list(filter(lambda candidate: candidate in with_nouns, candidates))
|
| 66 |
+
|
| 67 |
+
return candidates
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_embedding(texts, model, tokenizer, chunk_size=128):
|
| 71 |
+
"""
|
| 72 |
+
Перевести набор текстов в эмбеддинги.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
n_chunks = len(texts) // chunk_size + int(len(texts) % chunk_size != 0)
|
| 76 |
+
embeddings = []
|
| 77 |
+
|
| 78 |
+
for chunk_index in range(n_chunks):
|
| 79 |
+
start = chunk_index * chunk_size
|
| 80 |
+
end = min(start + chunk_size, len(texts))
|
| 81 |
+
chunk = texts[start:end]
|
| 82 |
+
|
| 83 |
+
chunk_tokens = tokenizer(chunk, padding=True, truncation=True, return_tensors="pt")
|
| 84 |
+
chunk_embeddings = model(**chunk_tokens)["pooler_output"]
|
| 85 |
+
chunk_embeddings = chunk_embeddings.detach().numpy()
|
| 86 |
+
|
| 87 |
+
embeddings.append(chunk_embeddings)
|
| 88 |
+
|
| 89 |
+
embeddings = np.vstack(embeddings)
|
| 90 |
+
|
| 91 |
+
return embeddings
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def score_candidates(text, candidates, model, tokenizer):
|
| 95 |
+
"""
|
| 96 |
+
Ранжирование ключевых слов.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
if len(candidates) == 1:
|
| 100 |
+
return np.array([1.0])
|
| 101 |
+
elif len(candidates) == 0:
|
| 102 |
+
return np.array([])
|
| 103 |
+
|
| 104 |
+
# Эмбеддинг для текста.
|
| 105 |
+
text_embedding = get_embedding([text], model, tokenizer)
|
| 106 |
+
|
| 107 |
+
# Эмбеддинг для ключевых слов.
|
| 108 |
+
candidate_embeddings = get_embedding(candidates, model, tokenizer)
|
| 109 |
+
|
| 110 |
+
# Будем брать softmax от нормированных косинусных расстояний.
|
| 111 |
+
distances = cosine_similarity(text_embedding, candidate_embeddings)
|
| 112 |
+
score = softmax((distances - np.mean(distances)) / np.std(distances))[0]
|
| 113 |
+
|
| 114 |
+
return score
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def get_keywords(text, nlp, model, tokenizer, top=0.95, max_words=None):
|
| 118 |
+
candidates = get_candidates(text, nlp)
|
| 119 |
+
score = score_candidates(text, candidates, model, tokenizer)
|
| 120 |
+
|
| 121 |
+
candidates_scored = [(candidates[index], score[index]) for index in score.argsort()[::-1]]
|
| 122 |
+
|
| 123 |
+
result = []
|
| 124 |
+
sum_probability = 0.0
|
| 125 |
+
max_words = len(candidates_scored) if max_words is None else min(len(candidates_scored), max_words)
|
| 126 |
+
for index in range(max_words):
|
| 127 |
+
if sum_probability > top:
|
| 128 |
+
break
|
| 129 |
+
|
| 130 |
+
result.append(candidates_scored[index])
|
| 131 |
+
sum_probability += candidates_scored[index][1]
|
| 132 |
+
|
| 133 |
+
return result
|