NKessler commited on
Commit
6a5f3c3
·
verified ·
1 Parent(s): 3bbb414

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -24
app.py CHANGED
@@ -57,16 +57,17 @@ def analyze_article(text: str) -> dict:
57
 
58
  Keys to return:
59
  "sentiment_score": A float between -1.0 (highly negative) and 1.0 (highly positive).
60
- "primary_tone": The single dominant emotion (e.g., anger, fear, joy, sadness, surprise, neutral).
61
  "primary_theme": Choose ONE from: ["economic consequences", "moral and ethical fairness", "legal and bureaucratic", "public safety and health"].
62
- "tone_scores": A dictionary scoring the top 3 emotions present from 0.0 to 1.0 (e.g., {{"fear": 0.8, "anger": 0.5}}).
 
63
 
64
  Text to analyze:
65
  "{safe_text}"
66
  """
67
 
68
  messages = [{"role": "user", "content": prompt}]
69
- response = client.chat_completion(messages=messages, max_tokens=250, temperature=0.1)
70
  response_text = response.choices[0].message.content
71
  llm_data = _extract_json_from_llm(response_text)
72
 
@@ -77,7 +78,8 @@ def analyze_article(text: str) -> dict:
77
  "sentiment_score": llm_data.get("sentiment_score", 0.0),
78
  "primary_tone": llm_data.get("primary_tone", "neutral"),
79
  "primary_theme": llm_data.get("primary_theme", "unclear"),
80
- "tone_scores": llm_data.get("tone_scores", {"neutral": 1.0}),
 
81
  "subjectivity_score": subjectivity_score,
82
  "reading_ease": max(0.0, min(100.0, raw_reading_ease)),
83
  }
@@ -146,29 +148,21 @@ def _create_comparison_radar_chart(results_a: dict, results_b: dict) -> go.Figur
146
  return fig
147
 
148
 
149
- def _highlight_framing_words(text: str) -> str:
150
- """Highlights subjective or emotional words in the text snippet."""
151
- raw_sentences = re.split(r'(?<=[.!?]) +', text)
152
- snippet = " ".join(raw_sentences[:3])
153
- if not snippet:
154
  return ""
155
 
156
- blob = TextBlob(snippet)
157
- target_words = set()
158
 
159
- for word in blob.words:
160
- w_sentiment = TextBlob(word).sentiment
161
- if w_sentiment.subjectivity > 0.5 or abs(w_sentiment.polarity) > 0.3:
162
- if len(word) > 2:
163
- target_words.add(str(word))
164
-
165
- highlighted_snippet = snippet
166
  for word in target_words:
167
- pattern = r'\b(' + re.escape(word) + r')\b'
168
- replacement = r"<span style='background-color: #fef08a; color: #854d0e; font-weight: 600; padding: 0.1rem 0.2rem; border-radius: 4px;'>\1</span>"
169
- highlighted_snippet = re.sub(pattern, replacement, highlighted_snippet, flags=re.IGNORECASE)
 
170
 
171
- return highlighted_snippet + ("..." if len(raw_sentences) > 3 else "")
172
 
173
  @st.cache_data(ttl=3600, show_spinner=False)
174
  def fetch_article_text(url: str) -> str:
@@ -351,7 +345,7 @@ if st.session_state.results_a and st.session_state.results_b:
351
  st.plotly_chart(_create_sentiment_gauge(r_a["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_a")
352
 
353
  st.markdown("**Key Framing Language:**")
354
- annotated_text = _highlight_framing_words(user_article_a)
355
  st.markdown(f"<div style='background-color: #f8fafc; padding: 1rem; border-radius: 8px; border: 1px solid #e2e8f0;'>{annotated_text}</div>", unsafe_allow_html=True)
356
 
357
  # Render Column B
@@ -368,5 +362,5 @@ if st.session_state.results_a and st.session_state.results_b:
368
  st.plotly_chart(_create_sentiment_gauge(r_b["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_b")
369
 
370
  st.markdown("**Key Framing Language:**")
371
- annotated_text = _highlight_framing_words(user_article_b)
372
  st.markdown(f"<div style='background-color: #f8fafc; padding: 1rem; border-radius: 8px; border: 1px solid #e2e8f0;'>{annotated_text}</div>", unsafe_allow_html=True)
 
57
 
58
  Keys to return:
59
  "sentiment_score": A float between -1.0 (highly negative) and 1.0 (highly positive).
60
+ "primary_tone": The single dominant emotion.
61
  "primary_theme": Choose ONE from: ["economic consequences", "moral and ethical fairness", "legal and bureaucratic", "public safety and health"].
62
+ "tone_scores": A dictionary scoring THESE EXACT 6 EMOTIONS from 0.0 to 1.0: {{"anger": 0.0, "fear": 0.0, "joy": 0.0, "sadness": 0.0, "surprise": 0.0, "trust": 0.0}}.
63
+ "framing_words": A list of the 5 to 8 most emotionally charged, biased, or subjective words used in the text (e.g., ["draconian", "slammed", "titans", "catastrophic"]).
64
 
65
  Text to analyze:
66
  "{safe_text}"
67
  """
68
 
69
  messages = [{"role": "user", "content": prompt}]
70
+ response = client.chat_completion(messages=messages, max_tokens=300, temperature=0.1)
71
  response_text = response.choices[0].message.content
72
  llm_data = _extract_json_from_llm(response_text)
73
 
 
78
  "sentiment_score": llm_data.get("sentiment_score", 0.0),
79
  "primary_tone": llm_data.get("primary_tone", "neutral"),
80
  "primary_theme": llm_data.get("primary_theme", "unclear"),
81
+ "tone_scores": llm_data.get("tone_scores", {"anger": 0, "fear": 0, "joy": 0, "sadness": 0, "surprise": 0, "trust": 0}),
82
+ "framing_words": llm_data.get("framing_words", []),
83
  "subjectivity_score": subjectivity_score,
84
  "reading_ease": max(0.0, min(100.0, raw_reading_ease)),
85
  }
 
148
  return fig
149
 
150
 
151
+ def _highlight_framing_words(text: str, target_words: list) -> str:
152
+ """Highlights LLM-identified framing words in the full text snippet."""
153
+ if not text:
 
 
154
  return ""
155
 
156
+ words = text.split()
157
+ highlighted_text = display_text
158
 
 
 
 
 
 
 
 
159
  for word in target_words:
160
+ if len(word) > 2:
161
+ pattern = r'\b(' + re.escape(word) + r')\b'
162
+ replacement = r"<span style='background-color: #fef08a; color: #854d0e; font-weight: 600; padding: 0.1rem 0.2rem; border-radius: 4px;'>\1</span>"
163
+ highlighted_text = re.sub(pattern, replacement, highlighted_text, flags=re.IGNORECASE)
164
 
165
+ return highlighted_text
166
 
167
  @st.cache_data(ttl=3600, show_spinner=False)
168
  def fetch_article_text(url: str) -> str:
 
345
  st.plotly_chart(_create_sentiment_gauge(r_a["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_a")
346
 
347
  st.markdown("**Key Framing Language:**")
348
+ annotated_text = _highlight_framing_words(user_article_a, r_a['framing_words'])
349
  st.markdown(f"<div style='background-color: #f8fafc; padding: 1rem; border-radius: 8px; border: 1px solid #e2e8f0;'>{annotated_text}</div>", unsafe_allow_html=True)
350
 
351
  # Render Column B
 
362
  st.plotly_chart(_create_sentiment_gauge(r_b["sentiment_score"], "Sentiment Bias"), use_container_width=True, key="gauge_b")
363
 
364
  st.markdown("**Key Framing Language:**")
365
+ annotated_text = _highlight_framing_words(user_article_b, r_b['framing_words'])
366
  st.markdown(f"<div style='background-color: #f8fafc; padding: 1rem; border-radius: 8px; border: 1px solid #e2e8f0;'>{annotated_text}</div>", unsafe_allow_html=True)