SorrelC commited on
Commit
8a60436
·
verified ·
1 Parent(s): ed3049f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -245
app.py CHANGED
@@ -6,19 +6,14 @@ import re
6
  import time
7
  warnings.filterwarnings('ignore')
8
 
9
- # Model names and descriptions
10
- KEYWORD_MODELS = {
11
- 'pke_multipartiterank': 'MultipartiteRank - Graph-based ranking using topic clustering',
12
- 'pke_singlerank': 'SingleRank - Graph-based ranking algorithm',
13
- 'pke_tfidf': 'TF-IDF - Term Frequency-Inverse Document Frequency',
14
- 'pke_topicrank': 'TopicRank - Graph-based with topic clustering',
15
- 'pke_textrank': 'TextRank - Graph-based ranking algorithm',
16
- 'pke_positionrank': 'PositionRank - Incorporates word positions',
17
- 'yake_yake': 'YAKE - Yet Another Keyword Extractor (statistical approach)',
18
- 'keybert_all-mpnet-base-v2': 'KeyBERT - BERT-based with all-mpnet-base-v2 embeddings',
19
- 'keybert_all-MiniLM-L6-v2': 'KeyBERT - BERT-based with all-MiniLM-L6-v2 embeddings',
20
- 'keybert_paraphrase-mpnet-base-v2': 'KeyBERT - BERT-based with paraphrase embeddings',
21
- 'rakun_rakun': 'RaKUn - Graph-based unsupervised keyword extraction'
22
  }
23
 
24
  # Color palette for keywords based on scores
@@ -39,9 +34,6 @@ class KeywordExtractionManager:
39
  def __init__(self):
40
  self.pke_models = {}
41
  self.spacy_model = None
42
- self.yake_model = None
43
- self.keybert_models = {}
44
- self.rakun_model = None
45
 
46
  def load_spacy_model(self):
47
  """Load spaCy model for preprocessing"""
@@ -60,48 +52,28 @@ class KeywordExtractionManager:
60
  return self.spacy_model
61
 
62
  def extract_keywords(self, text, model_name, num_keywords=10, ngram_range=(1, 3), progress=None):
63
- """Extract keywords using the specified model"""
64
  try:
 
 
65
  if progress:
66
  progress(0.3, desc="Loading model...")
67
 
68
- # Route to appropriate extraction method based on model type
69
- if 'pke_' in model_name:
70
- return self.extract_pke_keywords(text, model_name, num_keywords, ngram_range, progress)
71
- elif 'yake_' in model_name:
72
- return self.extract_yake_keywords(text, num_keywords, ngram_range, progress)
73
- elif 'keybert_' in model_name:
74
- return self.extract_keybert_keywords(text, model_name, num_keywords, ngram_range, progress)
75
- elif 'rakun_' in model_name:
76
- return self.extract_rakun_keywords(text, num_keywords, progress)
77
- else:
78
- raise ValueError(f"Unknown model: {model_name}")
79
-
80
- except Exception as e:
81
- print(f"Error with {model_name}: {str(e)}")
82
- return self.fallback_keyword_extraction(text, num_keywords)
83
-
84
- def extract_pke_keywords(self, text, model_name, num_keywords, ngram_range, progress):
85
- """Extract keywords using PKE models"""
86
- try:
87
- import pke
88
-
89
  # Initialize the extractor based on model name
90
- model_type = model_name.replace('pke_', '')
91
- if 'multipartiterank' in model_type:
92
  extractor = pke.unsupervised.MultipartiteRank()
93
- elif 'singlerank' in model_type:
94
  extractor = pke.unsupervised.SingleRank()
95
- elif 'tfidf' in model_type:
96
  extractor = pke.unsupervised.TfIdf()
97
- elif 'topicrank' in model_type:
98
  extractor = pke.unsupervised.TopicRank()
99
- elif 'textrank' in model_type:
100
  extractor = pke.unsupervised.TextRank()
101
- elif 'positionrank' in model_type:
102
  extractor = pke.unsupervised.PositionRank()
103
  else:
104
- raise ValueError(f"Unknown PKE model: {model_type}")
105
 
106
  if progress:
107
  progress(0.5, desc="Processing text...")
@@ -110,16 +82,16 @@ class KeywordExtractionManager:
110
  extractor.load_document(input=text, language='en')
111
 
112
  # Select candidates based on model
113
- if 'multipartiterank' in model_type:
114
  extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
115
  extractor.candidate_weighting(alpha=1.1, threshold=0.75, method='average')
116
- elif 'topicrank' in model_type:
117
  extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
118
  extractor.candidate_weighting(threshold=0.74, method='average')
119
- elif 'positionrank' in model_type:
120
  extractor.candidate_selection(maximum_word_number=3)
121
  extractor.candidate_weighting(window=10)
122
- elif 'tfidf' in model_type:
123
  extractor.candidate_selection(n=ngram_range[1], stoplist=['en'])
124
  extractor.candidate_weighting()
125
  else:
@@ -139,7 +111,7 @@ class KeywordExtractionManager:
139
  results.append({
140
  'keyword': keyword,
141
  'score': score,
142
- 'model': model_type.title()
143
  })
144
 
145
  return results
@@ -147,142 +119,8 @@ class KeywordExtractionManager:
147
  except ImportError:
148
  print("PKE library not found. Using fallback keyword extraction...")
149
  return self.fallback_keyword_extraction(text, num_keywords)
150
-
151
- def extract_yake_keywords(self, text, num_keywords, ngram_range, progress):
152
- """Extract keywords using YAKE"""
153
- try:
154
- import yake
155
-
156
- if progress:
157
- progress(0.5, desc="Processing with YAKE...")
158
-
159
- # Initialize YAKE
160
- kw_extractor = yake.KeywordExtractor(
161
- lan="en",
162
- n=ngram_range[1],
163
- dedupLim=0.9,
164
- dedupFunc='seqm',
165
- windowsSize=1,
166
- top=num_keywords
167
- )
168
-
169
- # Extract keywords
170
- keywords = kw_extractor.extract_keywords(text)
171
-
172
- if progress:
173
- progress(0.7, desc="Formatting results...")
174
-
175
- # Format results (YAKE returns lower scores for better keywords, so we invert)
176
- results = []
177
- max_score = max([score for _, score in keywords]) if keywords else 1
178
-
179
- for keyword, score in keywords:
180
- # Invert and normalize score
181
- normalized_score = (max_score - score) / max_score if max_score > 0 else 0
182
- results.append({
183
- 'keyword': keyword,
184
- 'score': normalized_score,
185
- 'model': 'YAKE'
186
- })
187
-
188
- return results
189
-
190
- except ImportError:
191
- print("YAKE not found. Please install with: pip install yake")
192
- return self.fallback_keyword_extraction(text, num_keywords)
193
-
194
- def extract_keybert_keywords(self, text, model_name, num_keywords, ngram_range, progress):
195
- """Extract keywords using KeyBERT"""
196
- try:
197
- from keybert import KeyBERT
198
-
199
- if progress:
200
- progress(0.5, desc="Loading KeyBERT model...")
201
-
202
- # Get the embedding model name
203
- embedding_model = model_name.replace('keybert_', 'sentence-transformers/')
204
-
205
- # Initialize or retrieve KeyBERT model
206
- if model_name not in self.keybert_models:
207
- self.keybert_models[model_name] = KeyBERT(embedding_model)
208
- print(f"✓ KeyBERT model {embedding_model} loaded successfully")
209
-
210
- kw_model = self.keybert_models[model_name]
211
-
212
- if progress:
213
- progress(0.6, desc="Extracting keywords with KeyBERT...")
214
-
215
- # Extract keywords
216
- keywords = kw_model.extract_keywords(
217
- text,
218
- keyphrase_ngram_range=ngram_range,
219
- stop_words='english',
220
- top_n=num_keywords,
221
- use_mmr=True,
222
- diversity=0.5
223
- )
224
-
225
- if progress:
226
- progress(0.7, desc="Formatting results...")
227
-
228
- # Format results
229
- results = []
230
- for keyword, score in keywords:
231
- results.append({
232
- 'keyword': keyword,
233
- 'score': score,
234
- 'model': f'KeyBERT-{embedding_model.split("/")[-1]}'
235
- })
236
-
237
- return results
238
-
239
- except ImportError:
240
- print("KeyBERT not found. Please install with: pip install keybert")
241
- return self.fallback_keyword_extraction(text, num_keywords)
242
-
243
- def extract_rakun_keywords(self, text, num_keywords, progress):
244
- """Extract keywords using RaKUn"""
245
- try:
246
- from mrakun import RakunDetector
247
-
248
- if progress:
249
- progress(0.5, desc="Processing with RaKUn...")
250
-
251
- # Initialize RaKUn
252
- hyperparameters = {
253
- "distance_threshold": 2,
254
- "distance_method": "editdistance",
255
- "num_keywords": num_keywords,
256
- "pair_diff_length": 2,
257
- "stopwords": "english",
258
- "bigram_count_threshold": 2,
259
- "num_tokens": [1, 2, 3],
260
- "max_similar": 3,
261
- "max_occurrence": 3
262
- }
263
-
264
- keyword_detector = RakunDetector(hyperparameters)
265
-
266
- # Extract keywords
267
- keywords = keyword_detector.find_keywords(text)
268
-
269
- if progress:
270
- progress(0.7, desc="Formatting results...")
271
-
272
- # Format results
273
- results = []
274
- # RaKUn returns tuples of (keyword, score)
275
- for keyword, score in keywords:
276
- results.append({
277
- 'keyword': keyword,
278
- 'score': score,
279
- 'model': 'RaKUn'
280
- })
281
-
282
- return results
283
-
284
- except ImportError:
285
- print("RaKUn not found. Please install with: pip install mrakun")
286
  return self.fallback_keyword_extraction(text, num_keywords)
287
 
288
  def fallback_keyword_extraction(self, text, num_keywords=10):
@@ -484,7 +322,7 @@ def process_text(text, selected_model, num_keywords, ngram_min, ngram_max, progr
484
  summary = f"""
485
  ## 📊 Analysis Summary
486
  - **Keywords extracted:** {len(keywords)}
487
- - **Model used:** {selected_model.replace('pke_', '').replace('yake_', '').replace('keybert_', '').replace('rakun_', '').replace('_', ' ').title()}
488
  - **Average relevance score:** {avg_score:.4f}
489
  - **N-gram range:** {ngram_min}-{ngram_max} words
490
  """
@@ -499,12 +337,12 @@ def create_interface():
499
  gr.Markdown("""
500
  # Keyword Extraction Explorer Tool
501
 
502
- Extract the most important keywords and phrases from your text using various algorithms! This tool uses multiple state-of-the-art keyword extraction models for comprehensive analysis.
503
 
504
  ### How to use:
505
  1. **📝 Enter your text** in the text area below
506
  2. **🎯 Select a model** from the dropdown for keyword extraction
507
- 3. **⚙️ Adjust parameters** (number of keywords, n-gram range)
508
  4. **🔍 Click "Extract Keywords"** to see results with organized output
509
  """)
510
 
@@ -527,8 +365,8 @@ def create_interface():
527
  with gr.Column(scale=1):
528
  # Model selector
529
  model_dropdown = gr.Dropdown(
530
- choices=list(KEYWORD_MODELS.keys()),
531
- value='pke_multipartiterank',
532
  label="🎯 Select Keyword Extraction Model"
533
  )
534
 
@@ -556,15 +394,6 @@ def create_interface():
556
  step=1,
557
  label="Max N-gram"
558
  )
559
-
560
- # Add n-gram tip box
561
- gr.HTML("""
562
- <div style="background-color: #fff3cd; border: 1px solid #ffeaa7; border-radius: 8px; padding: 10px; margin-top: 10px;">
563
- <strong style="color: #856404;">💡 What are n-grams?</strong> N-grams are sequences of words.
564
- For example: "1-gram" = single words (e.g., "science"), "2-gram" = two-word phrases (e.g., "data science"),
565
- "3-gram" = three-word phrases (e.g., "machine learning algorithm"). Adjust the sliders to control the length of extracted phrases.
566
- </div>
567
- """)
568
 
569
  # Add model descriptions
570
  gr.HTML("""
@@ -573,8 +402,7 @@ def create_interface():
573
  ℹ️ Model Descriptions
574
  </summary>
575
  <div style="margin-top: 10px; padding: 10px;">
576
- <h5 style="margin: 10px 0 5px 0; color: #333;">PKE-based Models:</h5>
577
- <dl style="margin: 0 0 15px 0; font-size: 14px;">
578
  <div style="margin-bottom: 8px;">
579
  <dt style="font-weight: bold; display: inline; color: #4ECDC4;">MultipartiteRank:</dt>
580
  <dd style="display: inline; margin-left: 5px;">Graph-based ranking using topic clustering - excellent for diverse texts</dd>
@@ -600,30 +428,6 @@ def create_interface():
600
  <dd style="display: inline; margin-left: 5px;">Incorporates word positions - good for structured documents</dd>
601
  </div>
602
  </dl>
603
-
604
- <h5 style="margin: 10px 0 5px 0; color: #333;">Other Models:</h5>
605
- <dl style="margin: 0; font-size: 14px;">
606
- <div style="margin-bottom: 8px;">
607
- <dt style="font-weight: bold; display: inline; color: #FF9F43;">YAKE:</dt>
608
- <dd style="display: inline; margin-left: 5px;">Statistical approach using word features - language independent, no training needed</dd>
609
- </div>
610
- <div style="margin-bottom: 8px;">
611
- <dt style="font-weight: bold; display: inline; color: #10AC84;">KeyBERT (all-mpnet-base-v2):</dt>
612
- <dd style="display: inline; margin-left: 5px;">BERT-based extraction with high-quality sentence embeddings</dd>
613
- </div>
614
- <div style="margin-bottom: 8px;">
615
- <dt style="font-weight: bold; display: inline; color: #EE5A24;">KeyBERT (all-MiniLM-L6-v2):</dt>
616
- <dd style="display: inline; margin-left: 5px;">Lightweight BERT model - faster with good performance</dd>
617
- </div>
618
- <div style="margin-bottom: 8px;">
619
- <dt style="font-weight: bold; display: inline; color: #0FBC89;">KeyBERT (paraphrase-mpnet-base-v2):</dt>
620
- <dd style="display: inline; margin-left: 5px;">BERT model optimized for paraphrase detection</dd>
621
- </div>
622
- <div style="margin-bottom: 8px;">
623
- <dt style="font-weight: bold; display: inline; color: #5F27CD;">RaKUn:</dt>
624
- <dd style="display: inline; margin-left: 5px;">Graph-based method using word co-occurrences and edit distances</dd>
625
- </div>
626
- </dl>
627
  </div>
628
  </details>
629
  """)
@@ -660,21 +464,21 @@ def create_interface():
660
  examples=[
661
  [
662
  "On June 6, 1944, Allied forces launched Operation Overlord, the invasion of Normandy. General Dwight D. Eisenhower commanded the operation, while Field Marshal Bernard Montgomery led ground forces. The BBC broadcast coded messages to the French Resistance, including the famous line 'The long sobs of autumn violins.'",
663
- "pke_multipartiterank",
664
  10,
665
  1,
666
  3
667
  ],
668
  [
669
  "In Jane Austen's 'Pride and Prejudice', Elizabeth Bennet first meets Mr. Darcy at the Meryton assembly. The novel, published in 1813, explores themes of marriage and social class in Regency England. Austen wrote to her sister Cassandra about the manuscript while staying at Chawton Cottage.",
670
- "keybert_all-MiniLM-L6-v2",
671
  10,
672
  1,
673
  3
674
  ],
675
  [
676
  "Charles Darwin arrived at the Galápagos Islands aboard HMS Beagle in September 1835. During his five-week visit, Darwin collected specimens of finches, tortoises, and mockingbirds. His observations of these species' variations across different islands later contributed to his theory of evolution by natural selection, published in 'On the Origin of Species' in 1859.",
677
- "yake_yake",
678
  10,
679
  1,
680
  3
@@ -701,21 +505,6 @@ def create_interface():
701
  Python Keyphrase Extraction (PKE) GitHub ↗
702
  </a>
703
  </li>
704
- <li><strong>YAKE:</strong>
705
- <a href="https://github.com/LIAAD/yake" target="_blank" style="color: #1976d2;">
706
- Yet Another Keyword Extractor GitHub ↗
707
- </a>
708
- </li>
709
- <li><strong>KeyBERT:</strong>
710
- <a href="https://github.com/MaartenGr/KeyBERT" target="_blank" style="color: #1976d2;">
711
- KeyBERT Documentation ↗
712
- </a>
713
- </li>
714
- <li><strong>RaKUn:</strong>
715
- <a href="https://github.com/SkBlaz/rakun" target="_blank" style="color: #1976d2;">
716
- RaKUn GitHub Repository ↗
717
- </a>
718
- </li>
719
  <li><strong>Algorithm Papers:</strong>
720
  <a href="https://boudinfl.github.io/pke/" target="_blank" style="color: #1976d2;">
721
  PKE Documentation & References ↗
 
6
  import time
7
  warnings.filterwarnings('ignore')
8
 
9
+ # PKE model names and descriptions
10
+ PKE_MODELS = {
11
+ 'kw_pke_multipartiterank': 'MultipartiteRank - Graph-based ranking using topic clustering',
12
+ 'kw_pke_singlerank': 'SingleRank - Graph-based ranking algorithm',
13
+ 'kw_pke_tfidf': 'TF-IDF - Term Frequency-Inverse Document Frequency',
14
+ 'kw_pke_topicrank': 'TopicRank - Graph-based with topic clustering',
15
+ 'kw_pke_textrank': 'TextRank - Graph-based ranking algorithm',
16
+ 'kw_pke_positionrank': 'PositionRank - Incorporates word positions'
 
 
 
 
 
17
  }
18
 
19
  # Color palette for keywords based on scores
 
34
  def __init__(self):
35
  self.pke_models = {}
36
  self.spacy_model = None
 
 
 
37
 
38
  def load_spacy_model(self):
39
  """Load spaCy model for preprocessing"""
 
52
  return self.spacy_model
53
 
54
  def extract_keywords(self, text, model_name, num_keywords=10, ngram_range=(1, 3), progress=None):
55
+ """Extract keywords using the specified PKE model"""
56
  try:
57
+ import pke
58
+
59
  if progress:
60
  progress(0.3, desc="Loading model...")
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  # Initialize the extractor based on model name
63
+ if 'multipartiterank' in model_name:
 
64
  extractor = pke.unsupervised.MultipartiteRank()
65
+ elif 'singlerank' in model_name:
66
  extractor = pke.unsupervised.SingleRank()
67
+ elif 'tfidf' in model_name:
68
  extractor = pke.unsupervised.TfIdf()
69
+ elif 'topicrank' in model_name:
70
  extractor = pke.unsupervised.TopicRank()
71
+ elif 'textrank' in model_name:
72
  extractor = pke.unsupervised.TextRank()
73
+ elif 'positionrank' in model_name:
74
  extractor = pke.unsupervised.PositionRank()
75
  else:
76
+ raise ValueError(f"Unknown model: {model_name}")
77
 
78
  if progress:
79
  progress(0.5, desc="Processing text...")
 
82
  extractor.load_document(input=text, language='en')
83
 
84
  # Select candidates based on model
85
+ if 'multipartiterank' in model_name:
86
  extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
87
  extractor.candidate_weighting(alpha=1.1, threshold=0.75, method='average')
88
+ elif 'topicrank' in model_name:
89
  extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
90
  extractor.candidate_weighting(threshold=0.74, method='average')
91
+ elif 'positionrank' in model_name:
92
  extractor.candidate_selection(maximum_word_number=3)
93
  extractor.candidate_weighting(window=10)
94
+ elif 'tfidf' in model_name:
95
  extractor.candidate_selection(n=ngram_range[1], stoplist=['en'])
96
  extractor.candidate_weighting()
97
  else:
 
111
  results.append({
112
  'keyword': keyword,
113
  'score': score,
114
+ 'model': model_name.replace('kw_pke_', '').title()
115
  })
116
 
117
  return results
 
119
  except ImportError:
120
  print("PKE library not found. Using fallback keyword extraction...")
121
  return self.fallback_keyword_extraction(text, num_keywords)
122
+ except Exception as e:
123
+ print(f"Error with {model_name}: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  return self.fallback_keyword_extraction(text, num_keywords)
125
 
126
  def fallback_keyword_extraction(self, text, num_keywords=10):
 
322
  summary = f"""
323
  ## 📊 Analysis Summary
324
  - **Keywords extracted:** {len(keywords)}
325
+ - **Model used:** {selected_model.replace('kw_pke_', '').title()}
326
  - **Average relevance score:** {avg_score:.4f}
327
  - **N-gram range:** {ngram_min}-{ngram_max} words
328
  """
 
337
  gr.Markdown("""
338
  # Keyword Extraction Explorer Tool
339
 
340
+ Extract the most important keywords and phrases from your text using various algorithms! This tool uses PKE (Python Keyphrase Extraction) models for comprehensive keyword extraction.
341
 
342
  ### How to use:
343
  1. **📝 Enter your text** in the text area below
344
  2. **🎯 Select a model** from the dropdown for keyword extraction
345
+ 3. *⚙️ Adjust parameters** (number of keywords, n-gram range)
346
  4. **🔍 Click "Extract Keywords"** to see results with organized output
347
  """)
348
 
 
365
  with gr.Column(scale=1):
366
  # Model selector
367
  model_dropdown = gr.Dropdown(
368
+ choices=list(PKE_MODELS.keys()),
369
+ value='kw_pke_multipartiterank',
370
  label="🎯 Select Keyword Extraction Model"
371
  )
372
 
 
394
  step=1,
395
  label="Max N-gram"
396
  )
 
 
 
 
 
 
 
 
 
397
 
398
  # Add model descriptions
399
  gr.HTML("""
 
402
  ℹ️ Model Descriptions
403
  </summary>
404
  <div style="margin-top: 10px; padding: 10px;">
405
+ <dl style="margin: 0; font-size: 14px;">
 
406
  <div style="margin-bottom: 8px;">
407
  <dt style="font-weight: bold; display: inline; color: #4ECDC4;">MultipartiteRank:</dt>
408
  <dd style="display: inline; margin-left: 5px;">Graph-based ranking using topic clustering - excellent for diverse texts</dd>
 
428
  <dd style="display: inline; margin-left: 5px;">Incorporates word positions - good for structured documents</dd>
429
  </div>
430
  </dl>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
  </div>
432
  </details>
433
  """)
 
464
  examples=[
465
  [
466
  "On June 6, 1944, Allied forces launched Operation Overlord, the invasion of Normandy. General Dwight D. Eisenhower commanded the operation, while Field Marshal Bernard Montgomery led ground forces. The BBC broadcast coded messages to the French Resistance, including the famous line 'The long sobs of autumn violins.'",
467
+ "kw_pke_multipartiterank",
468
  10,
469
  1,
470
  3
471
  ],
472
  [
473
  "In Jane Austen's 'Pride and Prejudice', Elizabeth Bennet first meets Mr. Darcy at the Meryton assembly. The novel, published in 1813, explores themes of marriage and social class in Regency England. Austen wrote to her sister Cassandra about the manuscript while staying at Chawton Cottage.",
474
+ "kw_pke_topicrank",
475
  10,
476
  1,
477
  3
478
  ],
479
  [
480
  "Charles Darwin arrived at the Galápagos Islands aboard HMS Beagle in September 1835. During his five-week visit, Darwin collected specimens of finches, tortoises, and mockingbirds. His observations of these species' variations across different islands later contributed to his theory of evolution by natural selection, published in 'On the Origin of Species' in 1859.",
481
+ "kw_pke_textrank",
482
  10,
483
  1,
484
  3
 
505
  Python Keyphrase Extraction (PKE) GitHub ↗
506
  </a>
507
  </li>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
508
  <li><strong>Algorithm Papers:</strong>
509
  <a href="https://boudinfl.github.io/pke/" target="_blank" style="color: #1976d2;">
510
  PKE Documentation & References ↗