NKessler commited on
Commit
e610257
·
verified ·
1 Parent(s): 53f1e4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -10
app.py CHANGED
@@ -43,9 +43,10 @@ def analyze_article(text: str) -> dict:
43
  Keys to return:
44
  "sentiment_score": A float between -1.0 (highly negative) and 1.0 (highly positive).
45
  "primary_tone": The single dominant emotion.
46
- "primary_theme": Choose ONE from: ["economic consequences", "moral and ethical fairness", "legal and bureaucratic", "public safety and health"].
 
47
  "tone_scores": A dictionary scoring THESE EXACT 6 EMOTIONS from 0.0 to 1.0: {{"anger": 0.0, "fear": 0.0, "joy": 0.0, "sadness": 0.0, "surprise": 0.0, "trust": 0.0}}.
48
- "framing_words": A list of the 5 to 8 most emotionally charged, biased, or subjective words used in the text (e.g., ["draconian", "slammed", "titans", "catastrophic"]).
49
  "subjectivity_score": A float between 0.0 (completely objective/factual) and 1.0 (highly opinionated/subjective).
50
 
51
  Text to analyze:
@@ -75,9 +76,10 @@ def analyze_article(text: str) -> dict:
75
  return {
76
  "sentiment_score": llm_data.get("sentiment_score", 0.0),
77
  "primary_tone": llm_data.get("primary_tone", "neutral"),
78
- "primary_theme": llm_data.get("primary_theme", "unclear"),
 
79
  "tone_scores": standard_tones,
80
- "framing_words": llm_data.get("framing_words", []),
81
  "subjectivity_score": llm_data.get("subjectivity_score", 0.0),
82
  "reading_ease": max(0.0, min(100.0, raw_reading_ease)),
83
  }
@@ -147,17 +149,24 @@ def _create_comparison_radar_chart(results_a: dict, results_b: dict) -> go.Figur
147
 
148
 
149
  def _highlight_framing_words(text: str, target_words: list) -> str:
150
- """Highlights LLM-identified framing words in the synced text snippet."""
151
  display_text = _truncate_to_words(text, MAX_WORDS)
152
  if not display_text:
153
  return ""
154
 
155
  highlighted_text = display_text + ("..." if len(text.split()) > MAX_WORDS else "")
156
 
157
- for word in target_words:
 
 
 
 
 
 
 
158
  if len(word) > 2:
159
  pattern = r'\b(' + re.escape(word) + r')\b'
160
- replacement = r"<span style='background-color: #fef08a; color: #854d0e; font-weight: 600; padding: 0.1rem 0.2rem; border-radius: 4px;'>\1</span>"
161
  highlighted_text = re.sub(pattern, replacement, highlighted_text, flags=re.IGNORECASE)
162
 
163
  return highlighted_text
@@ -365,11 +374,17 @@ if st.session_state.results_a and st.session_state.results_b:
365
  m3, m4 = st.columns(2)
366
  m1.metric("Subjectivity", f"{r_a['subjectivity_score']:.2f}", help="0 is objective, 1 is highly opinionated.")
367
  m2.metric("Primary Emotion", str(r_a['primary_tone']).title())
368
- m3.metric("Framing Lens", str(r_a['primary_theme']).title())
369
  m4.metric("Reading Ease", f"{r_a['reading_ease']:.1f}", help="0-30 is college graduate level, 60-70 is 8th grade.")
370
 
371
  st.plotly_chart(_create_sentiment_gauge(r_a["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_a")
372
 
 
 
 
 
 
 
373
  st.markdown("**Key Framing Language:**")
374
  annotated_text = _highlight_framing_words(user_article_a, r_a['framing_words'])
375
  st.markdown(f"<div style='background-color: #f8fafc; padding: 1rem; border-radius: 8px; border: 1px solid #e2e8f0;'>{annotated_text}</div>", unsafe_allow_html=True)
@@ -382,10 +397,16 @@ if st.session_state.results_a and st.session_state.results_b:
382
  m3, m4 = st.columns(2)
383
  m1.metric("Subjectivity", f"{r_b['subjectivity_score']:.2f}", help="0 is objective, 1 is highly opinionated.")
384
  m2.metric("Primary Emotion", str(r_b['primary_tone']).title())
385
- m3.metric("Framing Lens", str(r_b['primary_theme']).title())
386
  m4.metric("Reading Ease", f"{r_b['reading_ease']:.1f}", help="0-30 is college graduate level, 60-70 is 8th grade.")
387
 
388
- st.plotly_chart(_create_sentiment_gauge(r_b["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_b")
 
 
 
 
 
 
389
 
390
  st.markdown("**Key Framing Language:**")
391
  annotated_text = _highlight_framing_words(user_article_b, r_b['framing_words'])
 
43
  Keys to return:
44
  "sentiment_score": A float between -1.0 (highly negative) and 1.0 (highly positive).
45
  "primary_tone": The single dominant emotion.
46
+ "dynamic_topic": A 3-to-5 word summary of the article's specific angle or frame.
47
+ "key_entities": A dictionary of the top 3 entities (people, groups, concepts) mentioned, and a 1-to-2 word description of how they are portrayed (e.g., {{"Tech Leaders": "Victims", "Government": "Aggressor"}}).
48
  "tone_scores": A dictionary scoring THESE EXACT 6 EMOTIONS from 0.0 to 1.0: {{"anger": 0.0, "fear": 0.0, "joy": 0.0, "sadness": 0.0, "surprise": 0.0, "trust": 0.0}}.
49
+ "framing_words": A list of dictionaries containing the 5 to 8 most emotionally charged or biased words, and the specific emotion they evoke. Format: [{{"word": "draconian", "emotion": "fear"}}, {{"word": "titans", "emotion": "awe"}}].
50
  "subjectivity_score": A float between 0.0 (completely objective/factual) and 1.0 (highly opinionated/subjective).
51
 
52
  Text to analyze:
 
76
  return {
77
  "sentiment_score": llm_data.get("sentiment_score", 0.0),
78
  "primary_tone": llm_data.get("primary_tone", "neutral"),
79
+ "dynamic_topic": llm_data.get("dynamic_topic", "Unclear Topic"),
80
+ "key_entities": llm_data.get("key_entities", {}),
81
  "tone_scores": standard_tones,
82
+ "framing_words": llm_data.get("framing_words", []),
83
  "subjectivity_score": llm_data.get("subjectivity_score", 0.0),
84
  "reading_ease": max(0.0, min(100.0, raw_reading_ease)),
85
  }
 
149
 
150
 
151
  def _highlight_framing_words(text: str, target_words: list) -> str:
152
+ """Highlights LLM-identified framing words and tags their specific emotion."""
153
  display_text = _truncate_to_words(text, MAX_WORDS)
154
  if not display_text:
155
  return ""
156
 
157
  highlighted_text = display_text + ("..." if len(text.split()) > MAX_WORDS else "")
158
 
159
+ for item in target_words:
160
+ if isinstance(item, dict):
161
+ word = item.get("word", "")
162
+ emotion = str(item.get("emotion", "charged")).upper()
163
+ else:
164
+ word = str(item)
165
+ emotion = "CHARGED"
166
+
167
  if len(word) > 2:
168
  pattern = r'\b(' + re.escape(word) + r')\b'
169
+ replacement = rf"<span style='background-color: #fef08a; color: #854d0e; font-weight: 600; padding: 0.1rem 0.2rem; border-radius: 4px;'>\1 <span style='font-size: 0.65em; background: #854d0e; color: white; padding: 2px 4px; border-radius: 3px; margin-left: 2px; vertical-align: middle;'>{emotion}</span></span>"
170
  highlighted_text = re.sub(pattern, replacement, highlighted_text, flags=re.IGNORECASE)
171
 
172
  return highlighted_text
 
374
  m3, m4 = st.columns(2)
375
  m1.metric("Subjectivity", f"{r_a['subjectivity_score']:.2f}", help="0 is objective, 1 is highly opinionated.")
376
  m2.metric("Primary Emotion", str(r_a['primary_tone']).title())
377
+ m3.metric("Dynamic Topic", str(r_a['dynamic_topic']).title())
378
  m4.metric("Reading Ease", f"{r_a['reading_ease']:.1f}", help="0-30 is college graduate level, 60-70 is 8th grade.")
379
 
380
  st.plotly_chart(_create_sentiment_gauge(r_a["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_a")
381
 
382
+ st.markdown("**Entities Portrayed:**")
383
+ entities = r_a.get('key_entities', {})
384
+ if entities:
385
+ entities_html = "".join([f"<span style='background:#f1f5f9; border: 1px solid #cbd5e1; padding:4px 8px; border-radius:6px; margin-right:8px; font-size:0.85em; display: inline-block; margin-bottom: 4px;'><b>{k}</b>: <i>{v}</i></span>" for k, v in entities.items()])
386
+ st.markdown(f"<div style='margin-bottom: 1rem;'>{entities_html}</div>", unsafe_allow_html=True)
387
+
388
  st.markdown("**Key Framing Language:**")
389
  annotated_text = _highlight_framing_words(user_article_a, r_a['framing_words'])
390
  st.markdown(f"<div style='background-color: #f8fafc; padding: 1rem; border-radius: 8px; border: 1px solid #e2e8f0;'>{annotated_text}</div>", unsafe_allow_html=True)
 
397
  m3, m4 = st.columns(2)
398
  m1.metric("Subjectivity", f"{r_b['subjectivity_score']:.2f}", help="0 is objective, 1 is highly opinionated.")
399
  m2.metric("Primary Emotion", str(r_b['primary_tone']).title())
400
+ m3.metric("Dynamic Topic", str(r_b['dynamic_topic']).title())
401
  m4.metric("Reading Ease", f"{r_b['reading_ease']:.1f}", help="0-30 is college graduate level, 60-70 is 8th grade.")
402
 
403
+ st.plotly_chart(_create_sentiment_gauge(r_b["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_a")
404
+
405
+ st.markdown("**Entities Portrayed:**")
406
+ entities = r_b.get('key_entities', {})
407
+ if entities:
408
+ entities_html = "".join([f"<span style='background:#f1f5f9; border: 1px solid #cbd5e1; padding:4px 8px; border-radius:6px; margin-right:8px; font-size:0.85em; display: inline-block; margin-bottom: 4px;'><b>{k}</b>: <i>{v}</i></span>" for k, v in entities.items()])
409
+ st.markdown(f"<div style='margin-bottom: 1rem;'>{entities_html}</div>", unsafe_allow_html=True)
410
 
411
  st.markdown("**Key Framing Language:**")
412
  annotated_text = _highlight_framing_words(user_article_b, r_b['framing_words'])