AIEcosystem commited on
Commit
8a77caa
Β·
verified Β·
1 Parent(s): 5363948

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +12 -60
src/streamlit_app.py CHANGED
@@ -12,8 +12,6 @@ from streamlit_extras.stylable_container import stylable_container
12
  from typing import Optional
13
  from gliner import GLiNER
14
  from comet_ml import Experiment
15
-
16
-
17
  st.markdown(
18
  """
19
  <style>
@@ -60,18 +58,11 @@ st.markdown(
60
  }
61
  </style>
62
  """,
63
- unsafe_allow_html=True
64
- )
65
-
66
-
67
-
68
-
69
-
70
  # --- Page Configuration and UI Elements ---
71
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
72
  st.subheader("MediExtract", divider="gray")
73
  st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
74
-
75
  expander = st.expander("**Important notes**")
76
  expander.write("""**Named Entities:** This MediExtract web app predicts sixteen (16) labels: "Disease", "Symptom", "Medication", "Dosage", "Frequency", "Procedure", "Diagnostic_test", "Lab_value", "Gene", "Protein", "Anatomy", "Cell_type", "Chemical", "Person", "Organization", "Date"
77
 
@@ -90,13 +81,8 @@ For any errors or inquiries, please contact us at info@nlpblogs.com""")
90
  with st.sidebar:
91
  st.write("Use the following code to embed the MediExtract web app on your website. Feel free to adjust the width and height values to fit your page.")
92
  code = '''
93
- <iframe
94
- src="https://aiecosystem-mediextract.hf.space"
95
- frameborder="0"
96
- width="850"
97
- height="450"
98
  ></iframe>
99
-
100
  '''
101
  st.code(code, language="html")
102
  st.text("")
@@ -104,16 +90,13 @@ with st.sidebar:
104
  st.divider()
105
  st.subheader("πŸš€ Ready to build your own AI Web App?", divider="gray")
106
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
107
-
108
  # --- Comet ML Setup ---
109
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
110
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
111
  COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
112
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
113
-
114
  if not comet_initialized:
115
  st.warning("Comet ML not initialized. Check environment variables.")
116
-
117
  # --- Label Definitions ---
118
  labels = [
119
  "Disease",
@@ -131,11 +114,8 @@ labels = [
131
  "Chemical",
132
  "Person",
133
  "Organization",
134
- "Date"
135
- ]
136
-
137
  # Corrected mapping dictionary
138
-
139
  # Create a mapping dictionary for labels to categories
140
  category_mapping = {
141
  "Clinical & Procedural": [
@@ -165,15 +145,9 @@ category_mapping = {
165
  ],
166
  "Temporal": [
167
  "Date"
168
- ]
169
- }
170
-
171
-
172
-
173
-
174
  # --- Model Loading ---
175
- @st.cache_resource
176
- def load_ner_model():
177
  """Loads the GLiNER model and caches it."""
178
  try:
179
  return GLiNER.from_pretrained("Ihor/gliner-biomed-bi-large-v1.0", nested_ner=True, num_gen_sequences=2, gen_constraints= labels)
@@ -181,30 +155,28 @@ def load_ner_model():
181
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
182
  st.stop()
183
  model = load_ner_model()
184
-
185
  # Flatten the mapping to a single dictionary
186
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
187
-
188
  # --- Text Input and Clear Button ---
189
- text = st.text_area("Type or paste your text below, and then press Ctrl + Enter", height=250, key='my_text_area')
190
-
 
 
191
  def clear_text():
192
  """Clears the text area."""
193
  st.session_state['my_text_area'] = ""
194
-
195
  st.button("Clear text", on_click=clear_text)
196
-
197
-
198
  # --- Results Section ---
199
  if st.button("Results"):
200
  start_time = time.time()
201
  if not text.strip():
202
  st.warning("Please enter some text to extract entities.")
 
 
203
  else:
204
  with st.spinner("Extracting entities...", show_time=True):
205
  entities = model.predict_entities(text, labels)
206
  df = pd.DataFrame(entities)
207
-
208
  if not df.empty:
209
  df['category'] = df['label'].map(reverse_category_mapping)
210
  if comet_initialized:
@@ -215,13 +187,10 @@ if st.button("Results"):
215
  )
216
  experiment.log_parameter("input_text", text)
217
  experiment.log_table("predicted_entities", df)
218
-
219
  st.subheader("Grouped Entities by Category", divider = "gray")
220
-
221
  # Create tabs for each category
222
  category_names = sorted(list(category_mapping.keys()))
223
  category_tabs = st.tabs(category_names)
224
-
225
  for i, category_name in enumerate(category_names):
226
  with category_tabs[i]:
227
  df_category_filtered = df[df['category'] == category_name]
@@ -229,9 +198,6 @@ if st.button("Results"):
229
  st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
230
  else:
231
  st.info(f"No entities found for the '{category_name}' category.")
232
-
233
-
234
-
235
  with st.expander("See Glossary of tags"):
236
  st.write('''
237
  - **text**: ['entity extracted from your text data']
@@ -241,18 +207,15 @@ if st.button("Results"):
241
  - **end**: ['index of the end of the corresponding entity']
242
  ''')
243
  st.divider()
244
-
245
  # Tree map
246
  st.subheader("Tree map", divider = "gray")
247
  fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
248
  fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#F5F5F5', plot_bgcolor='#F5F5F5')
249
  st.plotly_chart(fig_treemap)
250
-
251
  # Pie and Bar charts
252
  grouped_counts = df['category'].value_counts().reset_index()
253
  grouped_counts.columns = ['category', 'count']
254
  col1, col2 = st.columns(2)
255
-
256
  with col1:
257
  st.subheader("Pie chart", divider = "gray")
258
  fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
@@ -262,10 +225,6 @@ if st.button("Results"):
262
  plot_bgcolor='#F5F5F5'
263
  )
264
  st.plotly_chart(fig_pie)
265
-
266
-
267
-
268
-
269
  with col2:
270
  st.subheader("Bar chart", divider = "gray")
271
  fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
@@ -274,7 +233,6 @@ if st.button("Results"):
274
  plot_bgcolor='#F5F5F5'
275
  )
276
  st.plotly_chart(fig_bar)
277
-
278
  # Most Frequent Entities
279
  st.subheader("Most Frequent Entities", divider="gray")
280
  word_counts = df['text'].value_counts().reset_index()
@@ -289,10 +247,8 @@ if st.button("Results"):
289
  st.plotly_chart(fig_repeating_bar)
290
  else:
291
  st.warning("No entities were found that occur more than once.")
292
-
293
  # Download Section
294
  st.divider()
295
-
296
  dfa = pd.DataFrame(
297
  data={
298
  'Column Name': ['text', 'label', 'score', 'start', 'end'],
@@ -302,7 +258,6 @@ if st.button("Results"):
302
  'accuracy score; how accurately a tag has been assigned to a given entity',
303
  'index of the start of the corresponding entity',
304
  'index of the end of the corresponding entity',
305
-
306
  ]
307
  }
308
  )
@@ -310,7 +265,6 @@ if st.button("Results"):
310
  with zipfile.ZipFile(buf, "w") as myzip:
311
  myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
312
  myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
313
-
314
  with stylable_container(
315
  key="download_button",
316
  css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
@@ -321,14 +275,12 @@ if st.button("Results"):
321
  file_name="nlpblogs_results.zip",
322
  mime="application/zip",
323
  )
324
-
325
  if comet_initialized:
326
  experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
327
  experiment.end()
328
  else: # If df is empty
329
  st.warning("No entities were found in the provided text.")
330
-
331
- end_time = time.time()
332
  elapsed_time = end_time - start_time
333
  st.text("")
334
  st.text("")
 
12
  from typing import Optional
13
  from gliner import GLiNER
14
  from comet_ml import Experiment
 
 
15
  st.markdown(
16
  """
17
  <style>
 
58
  }
59
  </style>
60
  """,
61
+ unsafe_allow_html=True)
 
 
 
 
 
 
62
  # --- Page Configuration and UI Elements ---
63
  st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
64
  st.subheader("MediExtract", divider="gray")
65
  st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
 
66
  expander = st.expander("**Important notes**")
67
  expander.write("""**Named Entities:** This MediExtract web app predicts sixteen (16) labels: "Disease", "Symptom", "Medication", "Dosage", "Frequency", "Procedure", "Diagnostic_test", "Lab_value", "Gene", "Protein", "Anatomy", "Cell_type", "Chemical", "Person", "Organization", "Date"
68
 
 
81
  with st.sidebar:
82
  st.write("Use the following code to embed the MediExtract web app on your website. Feel free to adjust the width and height values to fit your page.")
83
  code = '''
84
+ <iframe src="https://aiecosystem-mediextract.hf.space" frameborder="0" width="850" height="450"
 
 
 
 
85
  ></iframe>
 
86
  '''
87
  st.code(code, language="html")
88
  st.text("")
 
90
  st.divider()
91
  st.subheader("πŸš€ Ready to build your own AI Web App?", divider="gray")
92
  st.link_button("AI Web App Builder", "https://nlpblogs.com/build-your-named-entity-recognition-app/", type="primary")
 
93
  # --- Comet ML Setup ---
94
  COMET_API_KEY = os.environ.get("COMET_API_KEY")
95
  COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
96
  COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
97
  comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME)
 
98
  if not comet_initialized:
99
  st.warning("Comet ML not initialized. Check environment variables.")
 
100
  # --- Label Definitions ---
101
  labels = [
102
  "Disease",
 
114
  "Chemical",
115
  "Person",
116
  "Organization",
117
+ "Date"]
 
 
118
  # Corrected mapping dictionary
 
119
  # Create a mapping dictionary for labels to categories
120
  category_mapping = {
121
  "Clinical & Procedural": [
 
145
  ],
146
  "Temporal": [
147
  "Date"
148
+ ]}
 
 
 
 
 
149
  # --- Model Loading ---
150
+ @st.cache_resourcedef load_ner_model():
 
151
  """Loads the GLiNER model and caches it."""
152
  try:
153
  return GLiNER.from_pretrained("Ihor/gliner-biomed-bi-large-v1.0", nested_ner=True, num_gen_sequences=2, gen_constraints= labels)
 
155
  st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}")
156
  st.stop()
157
  model = load_ner_model()
 
158
  # Flatten the mapping to a single dictionary
159
  reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list}
 
160
  # --- Text Input and Clear Button ---
161
+ word_limit = 200
162
+ text = st.text_area(f"Type or paste your text below (max {word_limit} words), and then press Ctrl + Enter", height=250, key='my_text_area')
163
+ word_count = len(text.split())
164
+ st.markdown(f"**Word count:** {word_count}/{word_limit}")
165
  def clear_text():
166
  """Clears the text area."""
167
  st.session_state['my_text_area'] = ""
 
168
  st.button("Clear text", on_click=clear_text)
 
 
169
  # --- Results Section ---
170
  if st.button("Results"):
171
  start_time = time.time()
172
  if not text.strip():
173
  st.warning("Please enter some text to extract entities.")
174
+ elif word_count > word_limit:
175
+ st.warning(f"Your text exceeds the {word_limit} word limit. Please shorten it to continue.")
176
  else:
177
  with st.spinner("Extracting entities...", show_time=True):
178
  entities = model.predict_entities(text, labels)
179
  df = pd.DataFrame(entities)
 
180
  if not df.empty:
181
  df['category'] = df['label'].map(reverse_category_mapping)
182
  if comet_initialized:
 
187
  )
188
  experiment.log_parameter("input_text", text)
189
  experiment.log_table("predicted_entities", df)
 
190
  st.subheader("Grouped Entities by Category", divider = "gray")
 
191
  # Create tabs for each category
192
  category_names = sorted(list(category_mapping.keys()))
193
  category_tabs = st.tabs(category_names)
 
194
  for i, category_name in enumerate(category_names):
195
  with category_tabs[i]:
196
  df_category_filtered = df[df['category'] == category_name]
 
198
  st.dataframe(df_category_filtered.drop(columns=['category']), use_container_width=True)
199
  else:
200
  st.info(f"No entities found for the '{category_name}' category.")
 
 
 
201
  with st.expander("See Glossary of tags"):
202
  st.write('''
203
  - **text**: ['entity extracted from your text data']
 
207
  - **end**: ['index of the end of the corresponding entity']
208
  ''')
209
  st.divider()
 
210
  # Tree map
211
  st.subheader("Tree map", divider = "gray")
212
  fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category')
213
  fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25), paper_bgcolor='#F5F5F5', plot_bgcolor='#F5F5F5')
214
  st.plotly_chart(fig_treemap)
 
215
  # Pie and Bar charts
216
  grouped_counts = df['category'].value_counts().reset_index()
217
  grouped_counts.columns = ['category', 'count']
218
  col1, col2 = st.columns(2)
 
219
  with col1:
220
  st.subheader("Pie chart", divider = "gray")
221
  fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories')
 
225
  plot_bgcolor='#F5F5F5'
226
  )
227
  st.plotly_chart(fig_pie)
 
 
 
 
228
  with col2:
229
  st.subheader("Bar chart", divider = "gray")
230
  fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories')
 
233
  plot_bgcolor='#F5F5F5'
234
  )
235
  st.plotly_chart(fig_bar)
 
236
  # Most Frequent Entities
237
  st.subheader("Most Frequent Entities", divider="gray")
238
  word_counts = df['text'].value_counts().reset_index()
 
247
  st.plotly_chart(fig_repeating_bar)
248
  else:
249
  st.warning("No entities were found that occur more than once.")
 
250
  # Download Section
251
  st.divider()
 
252
  dfa = pd.DataFrame(
253
  data={
254
  'Column Name': ['text', 'label', 'score', 'start', 'end'],
 
258
  'accuracy score; how accurately a tag has been assigned to a given entity',
259
  'index of the start of the corresponding entity',
260
  'index of the end of the corresponding entity',
 
261
  ]
262
  }
263
  )
 
265
  with zipfile.ZipFile(buf, "w") as myzip:
266
  myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
267
  myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
 
268
  with stylable_container(
269
  key="download_button",
270
  css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""",
 
275
  file_name="nlpblogs_results.zip",
276
  mime="application/zip",
277
  )
 
278
  if comet_initialized:
279
  experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories")
280
  experiment.end()
281
  else: # If df is empty
282
  st.warning("No entities were found in the provided text.")
283
+ end_time = time.time()
 
284
  elapsed_time = end_time - start_time
285
  st.text("")
286
  st.text("")