NKessler commited on
Commit
62090b1
·
verified ·
1 Parent(s): 31fb8ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -25
app.py CHANGED
@@ -3,7 +3,7 @@ import re
3
  import plotly.graph_objects as go
4
  import streamlit as st
5
  import json
6
- import google.generativeai as genai
7
  import concurrent.futures
8
  import textstat
9
  import trafilatura
@@ -19,11 +19,16 @@ URL_A = "https://www.foxnews.com/live-news/trump-iran-israel-war-updates-march-3
19
  URL_B = "https://edition.cnn.com/2026/03/30/world/live-news/iran-war-us-israel-trump"
20
 
21
  # Initialize the AI model
22
- GEMINI_API_KEY = st.secrets.get("GEMINI_API_KEY")
23
- if GEMINI_API_KEY:
24
- genai.configure(api_key=GEMINI_API_KEY)
25
 
26
- ai_model = genai.GenerativeModel('gemini-3.1-flash-lite-preview')
 
 
 
 
 
 
 
27
 
28
  def _truncate_to_words(text: str, limit: int) -> str:
29
  """Truncates text by word count."""
@@ -51,14 +56,20 @@ def analyze_article(text: str) -> dict:
51
  Text to analyze:
52
  "{safe_text}"
53
  """
54
- response = ai_model.generate_content(
55
- prompt,
56
- generation_config={
57
- "response_mime_type": "application/json",
58
- "temperature": 0.1,
59
- }
60
- )
61
- llm_data = json.loads(response.text)
 
 
 
 
 
 
62
 
63
  raw_reading_ease = textstat.flesch_reading_ease(safe_text)
64
 
@@ -219,14 +230,20 @@ def check_contradiction(text_a: str, text_b: str) -> dict:
219
  Text 1: "{safe_a}"
220
  Text 2: "{safe_b}"
221
  """
222
- response = ai_model.generate_content(
223
- prompt,
224
- generation_config={
225
- "response_mime_type": "application/json",
226
- "temperature": 0.1,
227
- }
228
- )
229
- result = json.loads(response.text)
 
 
 
 
 
 
230
  return {"relationship": result.get("relationship", "NEUTRAL"), "confidence": result.get("confidence", 0.0)}
231
 
232
 
@@ -328,11 +345,17 @@ if execute_analysis:
328
  st.error("One of the URLs could not be scraped. Please copy and paste the text directly.")
329
 
330
  else:
331
- with st.spinner("Analyzing framing semantics for both sources (this takes a few seconds)..."):
 
332
  try:
333
- st.session_state.results_a = analyze_article(text_a_clean)
334
- st.session_state.results_b = analyze_article(text_b_clean)
335
- st.session_state.nli_result = check_contradiction(text_a_clean, text_b_clean)
 
 
 
 
 
336
 
337
  except Exception as e:
338
  error_msg = str(e)
 
3
  import plotly.graph_objects as go
4
  import streamlit as st
5
  import json
6
+ from openai import OpenAI
7
  import concurrent.futures
8
  import textstat
9
  import trafilatura
 
19
  URL_B = "https://edition.cnn.com/2026/03/30/world/live-news/iran-war-us-israel-trump"
20
 
21
  # Initialize the AI model
22
+ GROQ_API_KEY = st.secrets.get("GROQ_API_KEY")
 
 
23
 
24
+ if not GROQ_API_KEY:
25
+ st.warning("Groq API Token Missing.")
26
+ st.stop()
27
+
28
+ client = OpenAI(
29
+ api_key=GROQ_API_KEY,
30
+ base_url="https://api.groq.com/openai/v1",
31
+ )
32
 
33
  def _truncate_to_words(text: str, limit: int) -> str:
34
  """Truncates text by word count."""
 
56
  Text to analyze:
57
  "{safe_text}"
58
  """
59
+ try:
60
+ response = client.chat.completions.create(
61
+ model="llama-3.3-70b-versatile",
62
+ response_format={"type": "json_object"},
63
+ temperature=0.1,
64
+ messages=[
65
+ {"role": "system", "content": "You are a media analyst. You MUST respond with ONLY valid JSON matching the exact requested schema."},
66
+ {"role": "user", "content": prompt}
67
+ ]
68
+ )
69
+ llm_data = json.loads(response.choices[0].message.content)
70
+ except Exception as e:
71
+ print(f"Error parsing LLM response: {e}")
72
+ llm_data = {}
73
 
74
  raw_reading_ease = textstat.flesch_reading_ease(safe_text)
75
 
 
230
  Text 1: "{safe_a}"
231
  Text 2: "{safe_b}"
232
  """
233
+ try:
234
+ response = client.chat.completions.create(
235
+ model="llama-3.3-70b-versatile",
236
+ response_format={"type": "json_object"},
237
+ temperature=0.1,
238
+ messages=[
239
+ {"role": "system", "content": "You are a fact-checker. You MUST respond with ONLY valid JSON."},
240
+ {"role": "user", "content": prompt}
241
+ ]
242
+ )
243
+ result = json.loads(response.choices[0].message.content)
244
+ except Exception as e:
245
+ print(f"Error in contradiction check: {e}")
246
+ result = {}
247
  return {"relationship": result.get("relationship", "NEUTRAL"), "confidence": result.get("confidence", 0.0)}
248
 
249
 
 
345
  st.error("One of the URLs could not be scraped. Please copy and paste the text directly.")
346
 
347
  else:
348
+
349
+ with st.spinner("Analyzing both sources."):
350
  try:
351
+ with concurrent.futures.ThreadPoolExecutor() as executor:
352
+ future_a = executor.submit(analyze_article, text_a_clean)
353
+ future_b = executor.submit(analyze_article, text_b_clean)
354
+ future_nli = executor.submit(check_contradiction, text_a_clean, text_b_clean)
355
+
356
+ st.session_state.results_a = future_a.result()
357
+ st.session_state.results_b = future_b.result()
358
+ st.session_state.nli_result = future_nli.result()
359
 
360
  except Exception as e:
361
  error_msg = str(e)