AIEcosystem commited on
Commit
6771df0
·
verified ·
1 Parent(s): 815e315

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +173 -120
src/streamlit_app.py CHANGED
@@ -7,11 +7,11 @@ import io
7
  import plotly.express as px
8
  import zipfile
9
  import json
10
- from cryptography.fernet import Fernet # This import is not used
11
  from streamlit_extras.stylable_container import stylable_container
12
  from typing import Optional
13
  from gliner import GLiNER
14
  from comet_ml import Experiment
 
15
  # --- CSS Styling for the App ---
16
  st.markdown(
17
  """
@@ -52,7 +52,9 @@ st.markdown(
52
  }
53
  </style>
54
  """,
55
- unsafe_allow_html=True)
 
 
56
  # --- Page Configuration and UI Elements ---
57
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
58
  st.subheader("RetailTag", divider="violet")
@@ -89,6 +91,7 @@ with st.sidebar:
89
  st.divider()
90
  st.subheader("🚀 Ready to build your own AI Web App?", divider="violet")
91
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
 
92
  # --- Comet ML Setup ---
93
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
94
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
@@ -96,8 +99,8 @@ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
96
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
97
  if not comet_initialized:
98
  st.warning("Comet ML not initialized. Check environment variables.")
 
99
  # --- Label Definitions ---
100
- # The list of labels is assigned to a variable for use in the model loading function
101
  labels = [
102
  "Product_Name",
103
  "Product_Type",
@@ -116,7 +119,9 @@ labels = [
116
  "Location",
117
  "Person",
118
  "Date",
119
- "Time"]
 
 
120
  # Create a mapping dictionary for labels to categories
121
  category_mapping = {
122
  "Product & Service Entities": [
@@ -142,147 +147,195 @@ category_mapping = {
142
  "Person",
143
  "Date",
144
  "Time"
145
- ]}
 
 
146
  # --- Model Loading ---
147
  @st.cache_resource
148
  def load_ner_model():
149
  """Loads the GLiNER model and caches it."""
150
  try:
151
- # The 'labels' variable is now correctly passed to the function
152
  return GLiNER.from_pretrained("knowledgator/gliner-multitask-large-v0.5", nested_ner=True, num_gen_sequences=2, gen_constraints=labels)
153
  except Exception as e:
154
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
155
  st.stop()
156
  model = load_ner_model()
 
157
  # Flatten the mapping to a single dictionary
158
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
 
 
 
 
 
 
 
 
 
 
 
159
  # --- Text Input and Clear Button ---
160
  word_limit = 200
161
  text = st.text_area(f"Type or paste your text below (max {word_limit} words), and then press Ctrl + Enter", height=250, key='my_text_area')
162
  word_count = len(text.split())
163
  st.markdown(f"**Word count:** {word_count}/{word_limit}")
 
164
  def clear_text():
165
- """Clears the text area."""
166
  st.session_state['my_text_area'] = ""
 
 
 
 
167
  st.button("Clear text", on_click=clear_text)
 
168
  # --- Results Section ---
169
  if st.button("Results"):
170
- start_time = time.time()
171
  if not text.strip():
172
  st.warning("Please enter some text to extract entities.")
 
173
  elif word_count > word_limit:
174
  st.warning(f"Your text exceeds the {word_limit} word limit. Please shorten it to continue.")
 
175
  else:
176
- with st.spinner("Extracting entities...", show_time=True):
177
- entities = model.predict_entities(text, labels)
178
- df = pd.DataFrame(entities)
179
- if not df.empty:
180
- df['category'] = df['label'].map(reverse_category_mapping)
181
- if comet_initialized:
182
- experiment = Experiment(
183
- api_key=COMET_API_KEY,
184
- workspace=COMET_WORKSPACE,
185
- project_name=COMET_PROJECT_NAME,
186
- )
187
- experiment.log_parameter("input_text", text)
188
- experiment.log_table("predicted_entities", df)
189
- st.subheader("Grouped Entities by Category", divider="violet")
190
- # Create tabs for each category
191
- category_names = sorted(list(category_mapping.keys()))
192
- category_tabs = st.tabs(category_names)
193
- for i, category_name in enumerate(category_names):
194
- with category_tabs[i]:
195
- df_category_filtered = df[df['category'] == category_name]
196
- if not df_category_filtered.empty:
197
- st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
198
- else:
199
- st.info(f"No entities found for the '{category_name}' category.")
200
- with st.expander("See Glossary of tags"):
201
- st.write('''
202
- - **text**: ['entity extracted from your text data']
203
- - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
204
- - **label**: ['label (tag) assigned to a given extracted entity']
205
- - **start**: ['index of the start of the corresponding entity']
206
- - **end**: ['index of the end of the corresponding entity']
207
- ''')
208
- st.divider()
209
- # Tree map
210
- st.subheader("Tree map", divider="violet")
211
- fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
212
- fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#E8F5E9', plot_bgcolor='#E8F5E9')
213
- st.plotly_chart(fig_treemap)
214
- # Pie and Bar charts
215
- grouped_counts = df['category'].value_counts().reset_index()
216
- grouped_counts.columns = ['category', 'count']
217
- col1, col2 = st.columns(2)
218
- with col1:
219
- st.subheader("Pie chart", divider="violet")
220
- fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
221
- fig_pie.update_traces(textposition='inside', textinfo='percent+label')
222
- fig_pie.update_layout(
223
- paper_bgcolor='#E8F5E9',
224
- plot_bgcolor='#E8F5E9'
225
- )
226
- st.plotly_chart(fig_pie)
227
- with col2:
228
- st.subheader("Bar chart", divider="violet")
229
- fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
230
- fig_bar.update_layout(
231
- paper_bgcolor='#E8F5E9',
232
- plot_bgcolor='#E8F5E9'
233
- )
234
- st.plotly_chart(fig_bar)
235
- # Most Frequent Entities
236
- st.subheader("Most Frequent Entities", divider="violet")
237
- word_counts = df['text'].value_counts().reset_index()
238
- word_counts.columns = ['Entity', 'Count']
239
- repeating_entities = word_counts[word_counts['Count'] > 1]
240
- if not repeating_entities.empty:
241
- st.dataframe(repeating_entities, use_container_width=True)
242
- fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
243
- fig_repeating_bar.update_layout(
244
- xaxis={'categoryorder': 'total descending'},
245
- paper_bgcolor='#E8F5E9',
246
- plot_bgcolor='#E8F5E9'
247
- )
248
- st.plotly_chart(fig_repeating_bar)
249
  else:
250
- st.warning("No entities were found that occur more than once.")
251
- # Download Section
252
- st.divider()
253
- dfa = pd.DataFrame(
254
- data={
255
- 'Column Name': ['text', 'label', 'score', 'start', 'end'],
256
- 'Description': [
257
- 'entity extracted from your text data',
258
- 'label (tag) assigned to a given extracted entity',
259
- 'accuracy score; how accurately a tag has been assigned to a given entity',
260
- 'index of the start of the corresponding entity',
261
- 'index of the end of the corresponding entity',
262
- ]
263
- }
264
- )
265
- buf = io.BytesIO()
266
- with zipfile.ZipFile(buf, "w") as myzip:
267
- myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
268
- myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
269
- with stylable_container(
270
- key="download_button",
271
- css_styles="""button { background-color: #81C784; border: 1px solid black; padding: 5px; color: #1B5E20; }""",
272
- ):
273
- st.download_button(
274
- label="Download results and glossary (zip)",
275
- data=buf.getvalue(),
276
- file_name="nlpblogs_results.zip",
277
- mime="application/zip",
278
- )
279
- if comet_initialized:
280
- experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
281
- experiment.end()
282
- else: # If df is empty
283
- st.warning("No entities were found in the provided text.")
284
- end_time = time.time()
285
- elapsed_time = end_time - start_time
286
- st.text("")
287
- st.text("")
288
- st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  import plotly.express as px
8
  import zipfile
9
  import json
 
10
  from streamlit_extras.stylable_container import stylable_container
11
  from typing import Optional
12
  from gliner import GLiNER
13
  from comet_ml import Experiment
14
+
15
  # --- CSS Styling for the App ---
16
  st.markdown(
17
  """
 
52
  }
53
  </style>
54
  """,
55
+ unsafe_allow_html=True
56
+ )
57
+
58
  # --- Page Configuration and UI Elements ---
59
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
60
  st.subheader("RetailTag", divider="violet")
 
91
  st.divider()
92
  st.subheader("🚀 Ready to build your own AI Web App?", divider="violet")
93
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
94
+
95
  # --- Comet ML Setup ---
96
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
97
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
 
99
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
100
  if not comet_initialized:
101
  st.warning("Comet ML not initialized. Check environment variables.")
102
+
103
  # --- Label Definitions ---
 
104
  labels = [
105
  "Product_Name",
106
  "Product_Type",
 
119
  "Location",
120
  "Person",
121
  "Date",
122
+ "Time"
123
+ ]
124
+
125
  # Create a mapping dictionary for labels to categories
126
  category_mapping = {
127
  "Product & Service Entities": [
 
147
  "Person",
148
  "Date",
149
  "Time"
150
+ ]
151
+ }
152
+
153
  # --- Model Loading ---
154
  @st.cache_resource
155
  def load_ner_model():
156
  """Loads the GLiNER model and caches it."""
157
  try:
 
158
  return GLiNER.from_pretrained("knowledgator/gliner-multitask-large-v0.5", nested_ner=True, num_gen_sequences=2, gen_constraints=labels)
159
  except Exception as e:
160
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
161
  st.stop()
162
  model = load_ner_model()
163
+
164
  # Flatten the mapping to a single dictionary
165
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
166
+
167
+ # --- Session State Initialization ---
168
+ if 'show_results' not in st.session_state:
169
+ st.session_state.show_results = False
170
+ if 'last_text' not in st.session_state:
171
+ st.session_state.last_text = ""
172
+ if 'results_df' not in st.session_state:
173
+ st.session_state.results_df = pd.DataFrame()
174
+ if 'elapsed_time' not in st.session_state:
175
+ st.session_state.elapsed_time = 0.0
176
+
177
  # --- Text Input and Clear Button ---
178
  word_limit = 200
179
  text = st.text_area(f"Type or paste your text below (max {word_limit} words), and then press Ctrl + Enter", height=250, key='my_text_area')
180
  word_count = len(text.split())
181
  st.markdown(f"**Word count:** {word_count}/{word_limit}")
182
+
183
  def clear_text():
184
+ """Clears the text area and hides results."""
185
  st.session_state['my_text_area'] = ""
186
+ st.session_state.show_results = False
187
+ st.session_state.last_text = ""
188
+ st.session_state.results_df = pd.DataFrame()
189
+ st.session_state.elapsed_time = 0.0
190
  st.button("Clear text", on_click=clear_text)
191
+
192
  # --- Results Section ---
193
  if st.button("Results"):
 
194
  if not text.strip():
195
  st.warning("Please enter some text to extract entities.")
196
+ st.session_state.show_results = False
197
  elif word_count > word_limit:
198
  st.warning(f"Your text exceeds the {word_limit} word limit. Please shorten it to continue.")
199
+ st.session_state.show_results = False
200
  else:
201
+ # Check if the text is different from the last time
202
+ if text != st.session_state.last_text:
203
+ st.session_state.show_results = True
204
+ st.session_state.last_text = text
205
+ start_time = time.time()
206
+ with st.spinner("Extracting entities...", show_time=True):
207
+ entities = model.predict_entities(text, labels)
208
+ df = pd.DataFrame(entities)
209
+ st.session_state.results_df = df
210
+ if not df.empty:
211
+ df['category'] = df['label'].map(reverse_category_mapping)
212
+ if comet_initialized:
213
+ experiment = Experiment(
214
+ api_key=COMET_API_KEY,
215
+ workspace=COMET_WORKSPACE,
216
+ project_name=COMET_PROJECT_NAME,
217
+ )
218
+ experiment.log_parameter("input_text", text)
219
+ experiment.log_table("predicted_entities", df)
220
+ experiment.end()
221
+ end_time = time.time()
222
+ st.session_state.elapsed_time = end_time - start_time
223
+ else:
224
+ # If the text is the same, just show the cached results without re-running
225
+ st.session_state.show_results = True
226
+
227
+ # Display results if the state variable is True
228
+ if st.session_state.show_results:
229
+ df = st.session_state.results_df
230
+ if not df.empty:
231
+ df['category'] = df['label'].map(reverse_category_mapping)
232
+ st.subheader("Grouped Entities by Category", divider="violet")
233
+
234
+ # Create tabs for each category
235
+ category_names = sorted(list(category_mapping.keys()))
236
+ category_tabs = st.tabs(category_names)
237
+
238
+ for i, category_name in enumerate(category_names):
239
+ with category_tabs[i]:
240
+ df_category_filtered = df[df['category'] == category_name]
241
+ if not df_category_filtered.empty:
242
+ st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  else:
244
+ st.info(f"No entities found for the '{category_name}' category.")
245
+
246
+ with st.expander("See Glossary of tags"):
247
+ st.write('''
248
+ - **text**: ['entity extracted from your text data']
249
+ - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
250
+ - **label**: ['label (tag) assigned to a given extracted entity']
251
+ - **start**: ['index of the start of the corresponding entity']
252
+ - **end**: ['index of the end of the corresponding entity']
253
+ ''')
254
+ st.divider()
255
+
256
+ # Tree map
257
+ st.subheader("Tree map", divider="violet")
258
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
259
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#E8F5E9', plot_bgcolor='#E8F5E9')
260
+ st.plotly_chart(fig_treemap)
261
+
262
+ # Pie and Bar charts
263
+ grouped_counts = df['category'].value_counts().reset_index()
264
+ grouped_counts.columns = ['category', 'count']
265
+ col1, col2 = st.columns(2)
266
+
267
+ with col1:
268
+ st.subheader("Pie chart", divider="violet")
269
+ fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
270
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
271
+ fig_pie.update_layout(
272
+ paper_bgcolor='#E8F5E9',
273
+ plot_bgcolor='#E8F5E9'
274
+ )
275
+ st.plotly_chart(fig_pie)
276
+
277
+ with col2:
278
+ st.subheader("Bar chart", divider="violet")
279
+ fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
280
+ fig_bar.update_layout(
281
+ paper_bgcolor='#E8F5E9',
282
+ plot_bgcolor='#E8F5E9'
283
+ )
284
+ st.plotly_chart(fig_bar)
285
+
286
+ # Most Frequent Entities
287
+ st.subheader("Most Frequent Entities", divider="violet")
288
+ word_counts = df['text'].value_counts().reset_index()
289
+ word_counts.columns = ['Entity', 'Count']
290
+ repeating_entities = word_counts[word_counts['Count'] > 1]
291
+
292
+ if not repeating_entities.empty:
293
+ st.dataframe(repeating_entities, use_container_width=True)
294
+ fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
295
+ fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
296
+ paper_bgcolor='#E8F5E9',
297
+ plot_bgcolor='#E8F5E9')
298
+ st.plotly_chart(fig_repeating_bar)
299
+ else:
300
+ st.warning("No entities were found that occur more than once.")
301
+
302
+ # Download Section
303
+ st.divider()
304
+ dfa = pd.DataFrame(
305
+ data={
306
+ 'Column Name': ['text', 'label', 'score', 'start', 'end'],
307
+ 'Description': [
308
+ 'entity extracted from your text data',
309
+ 'label (tag) assigned to a given extracted entity',
310
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
311
+ 'index of the start of the corresponding entity',
312
+ 'index of the end of the corresponding entity',
313
+ ]
314
+ }
315
+ )
316
+ buf = io.BytesIO()
317
+ with zipfile.ZipFile(buf, "w") as myzip:
318
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
319
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
320
+
321
+ with stylable_container(
322
+ key="download_button",
323
+ css_styles="""button { background-color: #81C784; border: 1px solid black; padding: 5px; color: #1B5E20; }""",
324
+ ):
325
+ st.download_button(
326
+ label="Download results and glossary (zip)",
327
+ data=buf.getvalue(),
328
+ file_name="nlpblogs_results.zip",
329
+ mime="application/zip",
330
+ )
331
+
332
+ if comet_initialized:
333
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
334
+ experiment.end()
335
+
336
+ else: # If df is empty
337
+ st.warning("No entities were found in the provided text.")
338
+
339
+ st.text("")
340
+ st.text("")
341
+ st.info(f"Results processed in **{st.session_state.elapsed_time:.2f} seconds**.")