Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ from sklearn.decomposition import LatentDirichletAllocation
|
|
| 5 |
import re
|
| 6 |
import warnings
|
| 7 |
from collections import Counter
|
| 8 |
-
from soynlp.noun import
|
| 9 |
from soynlp.tokenizer import RegexTokenizer
|
| 10 |
|
| 11 |
warnings.filterwarnings("ignore")
|
|
@@ -24,7 +24,7 @@ default_stopwords = set([
|
|
| 24 |
|
| 25 |
@st.cache_resource
|
| 26 |
def load_noun_extractor():
|
| 27 |
-
return
|
| 28 |
|
| 29 |
noun_extractor = load_noun_extractor()
|
| 30 |
tokenizer = RegexTokenizer()
|
|
@@ -36,7 +36,7 @@ def extract_nouns(text):
|
|
| 36 |
# ๋ช
์ฌ ์ถ์ถ
|
| 37 |
nouns = []
|
| 38 |
for sentence in sentences:
|
| 39 |
-
extracted = noun_extractor.
|
| 40 |
nouns.extend(extracted.keys())
|
| 41 |
|
| 42 |
# 2์์ ์ด์์ ๋ช
์ฌ๋ง ์ ํ
|
|
|
|
| 5 |
import re
|
| 6 |
import warnings
|
| 7 |
from collections import Counter
|
| 8 |
+
from soynlp.noun import LRNounExtractor
|
| 9 |
from soynlp.tokenizer import RegexTokenizer
|
| 10 |
|
| 11 |
warnings.filterwarnings("ignore")
|
|
|
|
| 24 |
|
| 25 |
@st.cache_resource
|
| 26 |
def load_noun_extractor():
|
| 27 |
+
return LRNounExtractor()
|
| 28 |
|
| 29 |
noun_extractor = load_noun_extractor()
|
| 30 |
tokenizer = RegexTokenizer()
|
|
|
|
| 36 |
# ๋ช
์ฌ ์ถ์ถ
|
| 37 |
nouns = []
|
| 38 |
for sentence in sentences:
|
| 39 |
+
extracted = noun_extractor.extract(sentence)
|
| 40 |
nouns.extend(extracted.keys())
|
| 41 |
|
| 42 |
# 2์์ ์ด์์ ๋ช
์ฌ๋ง ์ ํ
|