NKessler commited on
Commit
9986f92
·
verified ·
1 Parent(s): 660fff4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -25
app.py CHANGED
@@ -5,9 +5,9 @@ import plotly.graph_objects as go
5
  import streamlit as st
6
  from textblob import TextBlob
7
  import json
 
8
  import os
9
  import concurrent.futures
10
- from groq import Groq
11
  import textstat
12
  import trafilatura
13
  import requests
@@ -22,9 +22,12 @@ ARTICLE_B = """Tech industry leaders and economists are sounding the alarm over
22
  URL_A = "https://www.foxnews.com/live-news/trump-iran-israel-war-updates-march-30"
23
  URL_B = "https://edition.cnn.com/2026/03/30/world/live-news/iran-war-us-israel-trump"
24
 
25
- # Initialize the Hugging Face Client
26
- GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
27
- client = Groq(api_key=GROQ_API_KEY)
 
 
 
28
 
29
  def _truncate_to_words(text: str, limit: int) -> str:
30
  """Truncates text by word count."""
@@ -62,25 +65,33 @@ def analyze_article(text: str) -> dict:
62
  Text to analyze:
63
  "{safe_text}"
64
  """
65
-
66
- messages = [{"role": "user", "content": prompt}]
67
- response = client.chat.completions.create(
68
- model="llama-3.3-70b-versatile",
69
- messages=messages,
70
- max_tokens=300,
71
- temperature=0.1,
72
- response_format={"type": "json_object"}
73
  )
74
- llm_data = json.loads(response.choices[0].message.content)
75
 
76
  subjectivity_score = TextBlob(safe_text).sentiment.subjectivity
77
  raw_reading_ease = textstat.flesch_reading_ease(safe_text)
78
 
 
 
 
 
 
 
 
 
 
 
79
  return {
80
  "sentiment_score": llm_data.get("sentiment_score", 0.0),
81
  "primary_tone": llm_data.get("primary_tone", "neutral"),
82
  "primary_theme": llm_data.get("primary_theme", "unclear"),
83
- "tone_scores": llm_data.get("tone_scores", {"anger": 0, "fear": 0, "joy": 0, "sadness": 0, "surprise": 0, "trust": 0}),
84
  "framing_words": llm_data.get("framing_words", []),
85
  "subjectivity_score": subjectivity_score,
86
  "reading_ease": max(0.0, min(100.0, raw_reading_ease)),
@@ -214,24 +225,22 @@ def check_contradiction(text_a: str, text_b: str) -> dict:
214
  Text 1: "{safe_a}"
215
  Text 2: "{safe_b}"
216
  """
217
- messages = [{"role": "user", "content": prompt}]
218
- response = client.chat.completions.create(
219
- model="llama-3.3-70b-versatile",
220
- messages=messages,
221
- max_tokens=100,
222
- temperature=0.1,
223
- response_format={"type": "json_object"}
224
  )
225
-
226
- result = json.loads(response.choices[0].message.content)
227
  return {"relationship": result.get("relationship", "NEUTRAL"), "confidence": result.get("confidence", 0.0)}
228
 
229
 
230
  # USER INTERFACE
231
  st.set_page_config(page_title="FrameVis | Media Framing", layout="wide")
232
 
233
- if not GROQ_API_KEY:
234
- st.warning("Groq API Token Missing.")
235
  st.stop()
236
 
237
  st.markdown("""
 
5
  import streamlit as st
6
  from textblob import TextBlob
7
  import json
8
+ import google.generativeai as genai
9
  import os
10
  import concurrent.futures
 
11
  import textstat
12
  import trafilatura
13
  import requests
 
22
  URL_A = "https://www.foxnews.com/live-news/trump-iran-israel-war-updates-march-30"
23
  URL_B = "https://edition.cnn.com/2026/03/30/world/live-news/iran-war-us-israel-trump"
24
 
25
+ # Initialize the AI model
26
+ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
27
+ if GEMINI_API_KEY:
28
+ genai.configure(api_key=GEMINI_API_KEY)
29
+
30
+ ai_model = genai.GenerativeModel('gemini-2.5-pro')
31
 
32
  def _truncate_to_words(text: str, limit: int) -> str:
33
  """Truncates text by word count."""
 
65
  Text to analyze:
66
  "{safe_text}"
67
  """
68
+ response = ai_model.generate_content(
69
+ prompt,
70
+ generation_config={
71
+ "response_mime_type": "application/json",
72
+ "temperature": 0.1,
73
+ }
 
 
74
  )
75
+ llm_data = json.loads(response.text)
76
 
77
  subjectivity_score = TextBlob(safe_text).sentiment.subjectivity
78
  raw_reading_ease = textstat.flesch_reading_ease(safe_text)
79
 
80
+ tones = llm_data.get("tone_scores", {})
81
+ standard_tones = {
82
+ "anger": tones.get("anger", 0.0),
83
+ "fear": tones.get("fear", 0.0),
84
+ "joy": tones.get("joy", 0.0),
85
+ "sadness": tones.get("sadness", 0.0),
86
+ "surprise": tones.get("surprise", 0.0),
87
+ "trust": tones.get("trust", 0.0),
88
+ }
89
+
90
  return {
91
  "sentiment_score": llm_data.get("sentiment_score", 0.0),
92
  "primary_tone": llm_data.get("primary_tone", "neutral"),
93
  "primary_theme": llm_data.get("primary_theme", "unclear"),
94
+ "tone_scores": standard_tones,
95
  "framing_words": llm_data.get("framing_words", []),
96
  "subjectivity_score": subjectivity_score,
97
  "reading_ease": max(0.0, min(100.0, raw_reading_ease)),
 
225
  Text 1: "{safe_a}"
226
  Text 2: "{safe_b}"
227
  """
228
+ response = ai_model.generate_content(
229
+ prompt,
230
+ generation_config={
231
+ "response_mime_type": "application/json",
232
+ "temperature": 0.1,
233
+ }
 
234
  )
235
+ result = json.loads(response.text)
 
236
  return {"relationship": result.get("relationship", "NEUTRAL"), "confidence": result.get("confidence", 0.0)}
237
 
238
 
239
  # USER INTERFACE
240
  st.set_page_config(page_title="FrameVis | Media Framing", layout="wide")
241
 
242
+ if not GEMINI_API_KEY:
243
+ st.warning("Gemini API Token Missing.")
244
  st.stop()
245
 
246
  st.markdown("""