Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,7 +7,7 @@ from textblob import TextBlob
|
|
| 7 |
import json
|
| 8 |
import os
|
| 9 |
import concurrent.futures
|
| 10 |
-
from
|
| 11 |
import textstat
|
| 12 |
import trafilatura
|
| 13 |
import requests
|
|
@@ -23,8 +23,8 @@ URL_A = "https://www.foxnews.com/live-news/trump-iran-israel-war-updates-march-3
|
|
| 23 |
URL_B = "https://edition.cnn.com/2026/03/30/world/live-news/iran-war-us-israel-trump"
|
| 24 |
|
| 25 |
# Initialize the Hugging Face Client
|
| 26 |
-
|
| 27 |
-
client =
|
| 28 |
|
| 29 |
@st.cache_resource
|
| 30 |
def _initialize_app():
|
|
@@ -67,7 +67,13 @@ def analyze_article(text: str) -> dict:
|
|
| 67 |
"""
|
| 68 |
|
| 69 |
messages = [{"role": "user", "content": prompt}]
|
| 70 |
-
response = client.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
response_text = response.choices[0].message.content
|
| 72 |
llm_data = _extract_json_from_llm(response_text)
|
| 73 |
|
|
@@ -212,7 +218,13 @@ def check_contradiction(text_a: str, text_b: str) -> dict:
|
|
| 212 |
Text 2: "{text_b[:1000]}"
|
| 213 |
"""
|
| 214 |
messages = [{"role": "user", "content": prompt}]
|
| 215 |
-
response = client.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
response_text = response.choices[0].message.content
|
| 217 |
result = _extract_json_from_llm(response_text)
|
| 218 |
return {"relationship": result.get("relationship", "NEUTRAL"), "confidence": result.get("confidence", 0.0)}
|
|
@@ -221,8 +233,8 @@ def check_contradiction(text_a: str, text_b: str) -> dict:
|
|
| 221 |
# USER INTERFACE
|
| 222 |
st.set_page_config(page_title="FrameVis | Media Framing", layout="wide")
|
| 223 |
|
| 224 |
-
if not
|
| 225 |
-
st.warning("
|
| 226 |
st.stop()
|
| 227 |
|
| 228 |
st.markdown("""
|
|
|
|
| 7 |
import json
|
| 8 |
import os
|
| 9 |
import concurrent.futures
|
| 10 |
+
from groq import Groq
|
| 11 |
import textstat
|
| 12 |
import trafilatura
|
| 13 |
import requests
|
|
|
|
| 23 |
URL_B = "https://edition.cnn.com/2026/03/30/world/live-news/iran-war-us-israel-trump"
|
| 24 |
|
| 25 |
# Initialize the Hugging Face Client
|
| 26 |
+
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 27 |
+
client = Groq(api_key=GROQ_API_KEY)
|
| 28 |
|
| 29 |
@st.cache_resource
|
| 30 |
def _initialize_app():
|
|
|
|
| 67 |
"""
|
| 68 |
|
| 69 |
messages = [{"role": "user", "content": prompt}]
|
| 70 |
+
response = client.chat.completions.create(
|
| 71 |
+
model="llama-3.3-70b-versatile",
|
| 72 |
+
messages=messages,
|
| 73 |
+
max_tokens=300,
|
| 74 |
+
temperature=0.1,
|
| 75 |
+
response_format={"type": "json_object"}
|
| 76 |
+
)
|
| 77 |
response_text = response.choices[0].message.content
|
| 78 |
llm_data = _extract_json_from_llm(response_text)
|
| 79 |
|
|
|
|
| 218 |
Text 2: "{text_b[:1000]}"
|
| 219 |
"""
|
| 220 |
messages = [{"role": "user", "content": prompt}]
|
| 221 |
+
response = client.chat.completions.create(
|
| 222 |
+
model="llama-3.3-70b-versatile",
|
| 223 |
+
messages=messages,
|
| 224 |
+
max_tokens=100,
|
| 225 |
+
temperature=0.1,
|
| 226 |
+
response_format={"type": "json_object"}
|
| 227 |
+
)
|
| 228 |
response_text = response.choices[0].message.content
|
| 229 |
result = _extract_json_from_llm(response_text)
|
| 230 |
return {"relationship": result.get("relationship", "NEUTRAL"), "confidence": result.get("confidence", 0.0)}
|
|
|
|
| 233 |
# USER INTERFACE
|
| 234 |
st.set_page_config(page_title="FrameVis | Media Framing", layout="wide")
|
| 235 |
|
| 236 |
+
if not GROQ_API_KEY:
|
| 237 |
+
st.warning("Groq API Token Missing.")
|
| 238 |
st.stop()
|
| 239 |
|
| 240 |
st.markdown("""
|