Prageeth-1 commited on
Commit
512eff9
·
verified ·
1 Parent(s): 0eaa186

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -10
app.py CHANGED
@@ -219,6 +219,9 @@ with tab1:
219
  # Keep only necessary columns
220
  df = df[['content','Class']]
221
 
 
 
 
222
 
223
  # Word Cloud Visualization
224
  def create_wordcloud(text_data):
@@ -232,11 +235,7 @@ with tab1:
232
  st.subheader("Word Cloud of News Content")
233
  create_wordcloud(df['content'])
234
 
235
-
236
- #show Classification Results
237
- st.subheader("Classification Results")
238
- st.write(df)
239
-
240
  st.subheader("Class Distribution")
241
  class_dist = df['Class'].value_counts()
242
  st.bar_chart(class_dist)
@@ -256,19 +255,23 @@ with tab2:
256
  st.markdown('<div class="card">', unsafe_allow_html=True)
257
  st.header("Ask Questions Based on Uploaded News Content File")
258
  st.write("Ask questions about news content and get answers from our AI model.")
259
-
 
260
  if uploaded_file is not None:
261
  context = ' '.join(df['content'].tolist())
262
  st.write(f"Loaded {len(df)} news excerpts")
263
  else:
264
  st.warning("Please upload a CSV file.")
265
-
 
266
  question = st.text_input("Enter your question:")
267
  if st.button("Get Answer"):
 
268
  if uploaded_file is None:
269
  st.error("Please upload a CSV file before asking a question.")
270
  elif context and question:
271
  with st.spinner("Searching for answers..."):
 
272
  qa_pipeline = load_qa_model()
273
  result = qa_pipeline(question=question, context=context)
274
  st.subheader("Answer")
@@ -277,12 +280,15 @@ with tab2:
277
  st.write(f"Confidence: {result['score']:.2f}")
278
  else:
279
  st.error("Please enter a question.")
280
-
 
281
  st.markdown("---")
282
  st.header("Ask Questions Based on Your News Content")
283
- context_1 = st.text_area("Enter the news content (context):")
 
284
  question_1 = st.text_input("Enter your question:", key="question_input")
285
  if st.button("Get Answer", key="get_answer_1"):
 
286
  if context_1 and question_1:
287
  qa_pipeline = load_qa_model()
288
  answer_1 = qa_pipeline(question=question_1, context=context_1)
@@ -297,11 +303,12 @@ with tab3:
297
  st.write("Explore additional functionalities to enhance your news analysis.")
298
 
299
 
300
- # Named Entity Recognition
301
  st.subheader("Named Entity Recognition Of News Content")
302
  ner_text = st.text_area("Enter News Content for entity recognition:", height=100)
303
  if st.button("Extract Entities"):
304
  with st.spinner("Identifying entities..."):
 
305
  ner_pipeline = pipeline("ner", grouped_entities=True)
306
  results = ner_pipeline(ner_text)
307
  entities = []
@@ -318,6 +325,7 @@ with tab3:
318
  summary_text = st.text_area("Enter news content to summarize:", height=150)
319
  if st.button("Generate Summary"):
320
  with st.spinner("Generating summary..."):
 
321
  summarizer = pipeline("summarization")
322
  summary = summarizer(summary_text, max_length=130, min_length=30)
323
  st.write(summary[0]['summary_text'])
@@ -329,6 +337,7 @@ with tab3:
329
  sentiment_text = st.text_area("Enter text for news content analysis:", height=100)
330
  if st.button("Analyze Sentiment"):
331
  with st.spinner("Analyzing sentiment..."):
 
332
  sentiment_pipeline = pipeline("sentiment-analysis")
333
  result = sentiment_pipeline(sentiment_text)[0]
334
  st.write(f"Label: {result['label']}")
 
219
  # Keep only necessary columns
220
  df = df[['content','Class']]
221
 
222
+ #show Classification Results
223
+ st.subheader("Classification Results")
224
+ st.write(df)
225
 
226
  # Word Cloud Visualization
227
  def create_wordcloud(text_data):
 
235
  st.subheader("Word Cloud of News Content")
236
  create_wordcloud(df['content'])
237
 
238
+ #show class distribution
 
 
 
 
239
  st.subheader("Class Distribution")
240
  class_dist = df['Class'].value_counts()
241
  st.bar_chart(class_dist)
 
255
  st.markdown('<div class="card">', unsafe_allow_html=True)
256
  st.header("Ask Questions Based on Uploaded News Content File")
257
  st.write("Ask questions about news content and get answers from our AI model.")
258
+
259
+ #check file is uploaded
260
  if uploaded_file is not None:
261
  context = ' '.join(df['content'].tolist())
262
  st.write(f"Loaded {len(df)} news excerpts")
263
  else:
264
  st.warning("Please upload a CSV file.")
265
+
266
+ #generate the answer based on uloaded news content file using the given model
267
  question = st.text_input("Enter your question:")
268
  if st.button("Get Answer"):
269
+ #check for file available
270
  if uploaded_file is None:
271
  st.error("Please upload a CSV file before asking a question.")
272
  elif context and question:
273
  with st.spinner("Searching for answers..."):
274
+ #load the model for Q&A pipline
275
  qa_pipeline = load_qa_model()
276
  result = qa_pipeline(question=question, context=context)
277
  st.subheader("Answer")
 
280
  st.write(f"Confidence: {result['score']:.2f}")
281
  else:
282
  st.error("Please enter a question.")
283
+
284
+ #generate the answer based on selected news content using the given model
285
  st.markdown("---")
286
  st.header("Ask Questions Based on Your News Content")
287
+ article = st.selectbox("Choose an article for the question:", df['content'].tolist())
288
+
289
  question_1 = st.text_input("Enter your question:", key="question_input")
290
  if st.button("Get Answer", key="get_answer_1"):
291
+ #check for selected context and question are available
292
  if context_1 and question_1:
293
  qa_pipeline = load_qa_model()
294
  answer_1 = qa_pipeline(question=question_1, context=context_1)
 
303
  st.write("Explore additional functionalities to enhance your news analysis.")
304
 
305
 
306
+ # Named Entity Recognition of news content
307
  st.subheader("Named Entity Recognition Of News Content")
308
  ner_text = st.text_area("Enter News Content for entity recognition:", height=100)
309
  if st.button("Extract Entities"):
310
  with st.spinner("Identifying entities..."):
311
+ #load the model
312
  ner_pipeline = pipeline("ner", grouped_entities=True)
313
  results = ner_pipeline(ner_text)
314
  entities = []
 
325
  summary_text = st.text_area("Enter news content to summarize:", height=150)
326
  if st.button("Generate Summary"):
327
  with st.spinner("Generating summary..."):
328
+ #load the summarization model
329
  summarizer = pipeline("summarization")
330
  summary = summarizer(summary_text, max_length=130, min_length=30)
331
  st.write(summary[0]['summary_text'])
 
337
  sentiment_text = st.text_area("Enter text for news content analysis:", height=100)
338
  if st.button("Analyze Sentiment"):
339
  with st.spinner("Analyzing sentiment..."):
340
+ #load the model
341
  sentiment_pipeline = pipeline("sentiment-analysis")
342
  result = sentiment_pipeline(sentiment_text)[0]
343
  st.write(f"Label: {result['label']}")