AIEcosystem commited on
Commit
cc563f7
·
verified ·
1 Parent(s): a10b630

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +176 -124
src/streamlit_app.py CHANGED
@@ -7,11 +7,11 @@ import io
7
  import plotly.express as px
8
  import zipfile
9
  import json
10
- from cryptography.fernet import Fernet
11
  from streamlit_extras.stylable_container import stylable_container
12
  from typing import Optional
13
  from gliner import GLiNER
14
  from comet_ml import Experiment
 
15
  st.markdown(
16
  """
17
  <style>
@@ -25,11 +25,11 @@ st.markdown(
25
  background-color: #ADD8E6; /* Light blue for the sidebar */
26
  secondary-background-color: #ADD8E6;
27
  }
28
- /* Expander background color */
29
  .streamlit-expanderContent {
30
  background-color: #E0FFFF;
31
  }
32
- /* Expander header background color */
33
  .streamlit-expanderHeader {
34
  background-color: #E0FFFF;
35
  }
@@ -55,7 +55,9 @@ st.markdown(
55
  }
56
  </style>
57
  """,
58
- unsafe_allow_html=True)
 
 
59
  # --- Page Configuration and UI Elements ---
60
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
61
  st.subheader("StoryCraft", divider="blue")
@@ -63,17 +65,15 @@ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
63
  expander = st.expander("**Important notes**")
64
  expander.write("""**Named Entities:** This StoryCraft web app predicts eighteen (18) labels: "Person", "Organization", "Location", "Date", "Time", "Quantity", "Product", "Event", "Title", "Job_title", "Artwork", "Media", "URL", "Website", "Hashtag", "Email", "IP_address", "File_path"
65
 
66
- Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
67
 
68
- **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
69
 
70
- **Usage Limits:** You can request results unlimited times for one (1) month.
71
 
72
- **Supported Languages:** English
73
 
74
- **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL.
75
-
76
- For any errors or inquiries, please contact us at info@nlpblogs.com""")
77
 
78
  with st.sidebar:
79
  st.write("Use the following code to embed the StoryCraft web app on your website. Feel free to adjust the width and height values to fit your page.")
@@ -92,6 +92,7 @@ with st.sidebar:
92
  st.divider()
93
  st.subheader("🚀 Ready to build your own AI Web App?", divider="blue")
94
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
 
95
  # --- Comet ML Setup ---
96
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
97
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
@@ -99,152 +100,203 @@ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
99
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
100
  if not comet_initialized:
101
  st.warning("Comet ML not initialized. Check environment variables.")
 
102
  # --- Label Definitions ---
103
- labels = ["Person","Organization","Location","Date","Time","Quantity","Product","Event","Title","Job_title","Artwork","Media","URL","Website","Hashtag","Email","IP_address","File_path"]
104
- # Corrected mapping dictionary
105
  # Create a mapping dictionary for labels to categories
106
  category_mapping = {
107
  "Core Foundational Entities": ["Person", "Organization", "Location", "Date", "Time", "Quantity"],
108
  "Content Enrichment Entities": ["Product", "Event", "Title", "Job_title", "Artwork", "Media"],
109
  "Digital & Technical Entities": ["URL", "Website", "Hashtag", "Email", "IP_address", "File_path"],
110
  }
 
111
  # --- Model Loading ---
112
  @st.cache_resource
113
  def load_ner_model():
114
  """Loads the GLiNER model and caches it."""
115
  try:
116
- return GLiNER.from_pretrained("gliner-community/gliner_large-v2.5", nested_ner=True, num_gen_sequences=2, gen_constraints= labels)
117
  except Exception as e:
118
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
119
  st.stop()
120
  model = load_ner_model()
 
121
  # Flatten the mapping to a single dictionary
122
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
 
 
 
 
 
 
 
 
 
 
 
123
  # --- Text Input and Clear Button ---
124
  word_limit = 200
125
  text = st.text_area(f"Type or paste your text below (max {word_limit} words), and then press Ctrl + Enter", height=250, key='my_text_area')
126
  word_count = len(text.split())
127
  st.markdown(f"**Word count:** {word_count}/{word_limit}")
 
128
  def clear_text():
129
- """Clears the text area."""
130
  st.session_state['my_text_area'] = ""
 
 
 
 
131
  st.button("Clear text", on_click=clear_text)
 
132
  # --- Results Section ---
133
  if st.button("Results"):
134
- start_time = time.time()
135
  if not text.strip():
136
  st.warning("Please enter some text to extract entities.")
 
137
  elif word_count > word_limit:
138
  st.warning(f"Your text exceeds the {word_limit} word limit. Please shorten it to continue.")
 
139
  else:
140
- with st.spinner("Extracting entities...", show_time=True):
141
- entities = model.predict_entities(text, labels)
142
- df = pd.DataFrame(entities)
143
- if not df.empty:
144
- df['category'] = df['label'].map(reverse_category_mapping)
145
- if comet_initialized:
146
- experiment = Experiment(
147
- api_key=COMET_API_KEY,
148
- workspace=COMET_WORKSPACE,
149
- project_name=COMET_PROJECT_NAME,
150
- )
151
- experiment.log_parameter("input_text", text)
152
- experiment.log_table("predicted_entities", df)
153
- st.subheader("Grouped Entities by Category", divider = "blue")
154
- # Create tabs for each category
155
- category_names = sorted(list(category_mapping.keys()))
156
- category_tabs = st.tabs(category_names)
157
- for i, category_name in enumerate(category_names):
158
- with category_tabs[i]:
159
- df_category_filtered = df[df['category'] == category_name]
160
- if not df_category_filtered.empty:
161
- st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
162
- else:
163
- st.info(f"No entities found for the '{category_name}' category.")
164
- with st.expander("See Glossary of tags"):
165
- st.write('''
166
- - **text**: ['entity extracted from your text data']
167
- - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
168
- - **label**: ['label (tag) assigned to a given extracted entity']
169
- - **start**: ['index of the start of the corresponding entity']
170
- - **end**: ['index of the end of the corresponding entity']
171
- ''')
172
- st.divider()
173
- # Tree map
174
- st.subheader("Tree map", divider = "blue")
175
- fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
176
- fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#E0FFFF', plot_bgcolor='#E0FFFF')
177
- st.plotly_chart(fig_treemap)
178
- # Pie and Bar charts
179
- grouped_counts = df['category'].value_counts().reset_index()
180
- grouped_counts.columns = ['category', 'count']
181
- col1, col2 = st.columns(2)
182
- with col1:
183
- st.subheader("Pie chart", divider = "blue")
184
- fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
185
- fig_pie.update_traces(textposition='inside', textinfo='percent+label')
186
- fig_pie.update_layout(
187
- paper_bgcolor='#E0FFFF',
188
- plot_bgcolor='#E0FFFF'
189
- )
190
- st.plotly_chart(fig_pie)
191
- with col2:
192
- st.subheader("Bar chart", divider = "blue")
193
- fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
194
- fig_bar.update_layout( # Changed from fig_pie to fig_bar
195
- paper_bgcolor='#E0FFFF',
196
- plot_bgcolor='#E0FFFF'
197
- )
198
- st.plotly_chart(fig_bar)
199
- # Most Frequent Entities
200
- st.subheader("Most Frequent Entities", divider="blue")
201
- word_counts = df['text'].value_counts().reset_index()
202
- word_counts.columns = ['Entity', 'Count']
203
- repeating_entities = word_counts[word_counts['Count'] > 1]
204
- if not repeating_entities.empty:
205
- st.dataframe(repeating_entities, use_container_width=True)
206
- fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
207
- fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
208
- paper_bgcolor='#E0FFFF',
209
- plot_bgcolor='#E0FFFF')
210
- st.plotly_chart(fig_repeating_bar)
211
  else:
212
- st.warning("No entities were found that occur more than once.")
213
- # Download Section
214
- st.divider()
215
- dfa = pd.DataFrame(
216
- data={
217
- 'Column Name': ['text', 'label', 'score', 'start', 'end'],
218
- 'Description': [
219
- 'entity extracted from your text data',
220
- 'label (tag) assigned to a given extracted entity',
221
- 'accuracy score; how accurately a tag has been assigned to a given entity',
222
- 'index of the start of the corresponding entity',
223
- 'index of the end of the corresponding entity',
224
- ]
225
- }
226
- )
227
- buf = io.BytesIO()
228
- with zipfile.ZipFile(buf, "w") as myzip:
229
- myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
230
- myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
231
- with stylable_container(
232
- key="download_button",
233
- css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
234
- ):
235
- st.download_button(
236
- label="Download results and glossary (zip)",
237
- data=buf.getvalue(),
238
- file_name="nlpblogs_results.zip",
239
- mime="application/zip",
240
- )
241
- if comet_initialized:
242
- experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
243
- experiment.end()
244
- else: # If df is empty
245
- st.warning("No entities were found in the provided text.")
246
- end_time = time.time()
247
- elapsed_time = end_time - start_time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  st.text("")
249
  st.text("")
250
- st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")
 
7
  import plotly.express as px
8
  import zipfile
9
  import json
 
10
  from streamlit_extras.stylable_container import stylable_container
11
  from typing import Optional
12
  from gliner import GLiNER
13
  from comet_ml import Experiment
14
+
15
  st.markdown(
16
  """
17
  <style>
 
25
  background-color: #ADD8E6; /* Light blue for the sidebar */
26
  secondary-background-color: #ADD8E6;
27
  }
28
+ /* Expander background color */
29
  .streamlit-expanderContent {
30
  background-color: #E0FFFF;
31
  }
32
+ /* Expander header background color */
33
  .streamlit-expanderHeader {
34
  background-color: #E0FFFF;
35
  }
 
55
  }
56
  </style>
57
  """,
58
+ unsafe_allow_html=True
59
+ )
60
+
61
  # --- Page Configuration and UI Elements ---
62
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
63
  st.subheader("StoryCraft", divider="blue")
 
65
  expander = st.expander("**Important notes**")
66
  expander.write("""**Named Entities:** This StoryCraft web app predicts eighteen (18) labels: "Person", "Organization", "Location", "Date", "Time", "Quantity", "Product", "Event", "Title", "Job_title", "Artwork", "Media", "URL", "Website", "Hashtag", "Email", "IP_address", "File_path"
67
 
68
+ Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags.
69
 
70
+ **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data.
71
 
72
+ **Usage Limits:** You can request results unlimited times for one (1) month.
73
 
74
+ **Supported Languages:** English
75
 
76
+ **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL. For any errors or inquiries, please contact us at info@nlpblogs.com""")
 
 
77
 
78
  with st.sidebar:
79
  st.write("Use the following code to embed the StoryCraft web app on your website. Feel free to adjust the width and height values to fit your page.")
 
92
  st.divider()
93
  st.subheader("🚀 Ready to build your own AI Web App?", divider="blue")
94
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
95
+
96
  # --- Comet ML Setup ---
97
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
98
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
 
100
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
101
  if not comet_initialized:
102
  st.warning("Comet ML not initialized. Check environment variables.")
103
+
104
  # --- Label Definitions ---
105
+ labels = ["Person", "Organization", "Location", "Date", "Time", "Quantity", "Product", "Event", "Title", "Job_title", "Artwork", "Media", "URL", "Website", "Hashtag", "Email", "IP_address", "File_path"]
106
+
107
  # Create a mapping dictionary for labels to categories
108
  category_mapping = {
109
  "Core Foundational Entities": ["Person", "Organization", "Location", "Date", "Time", "Quantity"],
110
  "Content Enrichment Entities": ["Product", "Event", "Title", "Job_title", "Artwork", "Media"],
111
  "Digital & Technical Entities": ["URL", "Website", "Hashtag", "Email", "IP_address", "File_path"],
112
  }
113
+
114
  # --- Model Loading ---
115
  @st.cache_resource
116
  def load_ner_model():
117
  """Loads the GLiNER model and caches it."""
118
  try:
119
+ return GLiNER.from_pretrained("gliner-community/gliner_large-v2.5", nested_ner=True, num_gen_sequences=2, gen_constraints=labels)
120
  except Exception as e:
121
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
122
  st.stop()
123
  model = load_ner_model()
124
+
125
  # Flatten the mapping to a single dictionary
126
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
127
+
128
+ # --- Session State Initialization ---
129
+ if 'show_results' not in st.session_state:
130
+ st.session_state.show_results = False
131
+ if 'last_text' not in st.session_state:
132
+ st.session_state.last_text = ""
133
+ if 'results_df' not in st.session_state:
134
+ st.session_state.results_df = pd.DataFrame()
135
+ if 'elapsed_time' not in st.session_state:
136
+ st.session_state.elapsed_time = 0.0
137
+
138
  # --- Text Input and Clear Button ---
139
  word_limit = 200
140
  text = st.text_area(f"Type or paste your text below (max {word_limit} words), and then press Ctrl + Enter", height=250, key='my_text_area')
141
  word_count = len(text.split())
142
  st.markdown(f"**Word count:** {word_count}/{word_limit}")
143
+
144
  def clear_text():
145
+ """Clears the text area and hides results."""
146
  st.session_state['my_text_area'] = ""
147
+ st.session_state.show_results = False
148
+ st.session_state.last_text = ""
149
+ st.session_state.results_df = pd.DataFrame()
150
+ st.session_state.elapsed_time = 0.0
151
  st.button("Clear text", on_click=clear_text)
152
+
153
  # --- Results Section ---
154
  if st.button("Results"):
 
155
  if not text.strip():
156
  st.warning("Please enter some text to extract entities.")
157
+ st.session_state.show_results = False
158
  elif word_count > word_limit:
159
  st.warning(f"Your text exceeds the {word_limit} word limit. Please shorten it to continue.")
160
+ st.session_state.show_results = False
161
  else:
162
+ # Check if the text is different from the last time
163
+ if text != st.session_state.last_text:
164
+ st.session_state.show_results = True
165
+ st.session_state.last_text = text
166
+ start_time = time.time()
167
+ with st.spinner("Extracting entities...", show_time=True):
168
+ entities = model.predict_entities(text, labels)
169
+ df = pd.DataFrame(entities)
170
+ st.session_state.results_df = df
171
+ if not df.empty:
172
+ df['category'] = df['label'].map(reverse_category_mapping)
173
+ if comet_initialized:
174
+ experiment = Experiment(
175
+ api_key=COMET_API_KEY,
176
+ workspace=COMET_WORKSPACE,
177
+ project_name=COMET_PROJECT_NAME,
178
+ )
179
+ experiment.log_parameter("input_text", text)
180
+ experiment.log_table("predicted_entities", df)
181
+ experiment.end()
182
+ end_time = time.time()
183
+ st.session_state.elapsed_time = end_time - start_time
184
+ else:
185
+ # If the text is the same, just show the cached results without re-running
186
+ st.session_state.show_results = True
187
+
188
+ # Display results if the state variable is True
189
+ if st.session_state.show_results:
190
+ df = st.session_state.results_df
191
+ if not df.empty:
192
+ df['category'] = df['label'].map(reverse_category_mapping)
193
+ st.subheader("Grouped Entities by Category", divider="blue")
194
+
195
+ # Create tabs for each category
196
+ category_names = sorted(list(category_mapping.keys()))
197
+ category_tabs = st.tabs(category_names)
198
+
199
+ for i, category_name in enumerate(category_names):
200
+ with category_tabs[i]:
201
+ df_category_filtered = df[df['category'] == category_name]
202
+ if not df_category_filtered.empty:
203
+ st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  else:
205
+ st.info(f"No entities found for the '{category_name}' category.")
206
+
207
+ with st.expander("See Glossary of tags"):
208
+ st.write('''
209
+ - **text**: ['entity extracted from your text data']
210
+ - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity']
211
+ - **label**: ['label (tag) assigned to a given extracted entity']
212
+ - **start**: ['index of the start of the corresponding entity']
213
+ - **end**: ['index of the end of the corresponding entity']
214
+ ''')
215
+ st.divider()
216
+
217
+ # Tree map
218
+ st.subheader("Tree map", divider="blue")
219
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
220
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#E0FFFF', plot_bgcolor='#E0FFFF')
221
+ st.plotly_chart(fig_treemap)
222
+
223
+ # Pie and Bar charts
224
+ grouped_counts = df['category'].value_counts().reset_index()
225
+ grouped_counts.columns = ['category', 'count']
226
+ col1, col2 = st.columns(2)
227
+
228
+ with col1:
229
+ st.subheader("Pie chart", divider="blue")
230
+ fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
231
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
232
+ fig_pie.update_layout(
233
+ paper_bgcolor='#E0FFFF',
234
+ plot_bgcolor='#E0FFFF'
235
+ )
236
+ st.plotly_chart(fig_pie)
237
+
238
+ with col2:
239
+ st.subheader("Bar chart", divider="blue")
240
+ fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
241
+ fig_bar.update_layout(
242
+ paper_bgcolor='#E0FFFF',
243
+ plot_bgcolor='#E0FFFF'
244
+ )
245
+ st.plotly_chart(fig_bar)
246
+
247
+ # Most Frequent Entities
248
+ st.subheader("Most Frequent Entities", divider="blue")
249
+ word_counts = df['text'].value_counts().reset_index()
250
+ word_counts.columns = ['Entity', 'Count']
251
+ repeating_entities = word_counts[word_counts['Count'] > 1]
252
+
253
+ if not repeating_entities.empty:
254
+ st.dataframe(repeating_entities, use_container_width=True)
255
+ fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity')
256
+ fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'},
257
+ paper_bgcolor='#E0FFFF',
258
+ plot_bgcolor='#E0FFFF')
259
+ st.plotly_chart(fig_repeating_bar)
260
+ else:
261
+ st.warning("No entities were found that occur more than once.")
262
+
263
+ # Download Section
264
+ st.divider()
265
+ dfa = pd.DataFrame(
266
+ data={
267
+ 'Column Name': ['text', 'label', 'score', 'start', 'end'],
268
+ 'Description': [
269
+ 'entity extracted from your text data',
270
+ 'label (tag) assigned to a given extracted entity',
271
+ 'accuracy score; how accurately a tag has been assigned to a given entity',
272
+ 'index of the start of the corresponding entity',
273
+ 'index of the end of the corresponding entity',
274
+ ]
275
+ }
276
+ )
277
+ buf = io.BytesIO()
278
+ with zipfile.ZipFile(buf, "w") as myzip:
279
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
280
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
281
+
282
+ with stylable_container(
283
+ key="download_button",
284
+ css_styles="""button { background-color: #B0E0E6; border: 1px solid black; padding: 5px; color: #000000; }""",
285
+ ):
286
+ st.download_button(
287
+ label="Download results and glossary (zip)",
288
+ data=buf.getvalue(),
289
+ file_name="nlpblogs_results.zip",
290
+ mime="application/zip",
291
+ )
292
+
293
+ if comet_initialized:
294
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
295
+ experiment.end()
296
+
297
+ else: # If df is empty
298
+ st.warning("No entities were found in the provided text.")
299
+
300
  st.text("")
301
  st.text("")
302
+ st.info(f"Results processed in **{st.session_state.elapsed_time:.2f} seconds**.")