import streamlit as st
import requests
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import json
from collections import Counter
API_URL = "http://localhost:8000"
COLORS = {
"positive": "#00d4aa",
"neutral": "#6b7280",
"negative": "#ff4757",
}
EMOJI = {
"positive": "β²",
"neutral": "β ",
"negative": "βΌ",
}
RISK_COLOR = {
"HIGH": "#ff4757",
"MEDIUM": "#ffa502",
"LOW": "#00d4aa",
}
EXAMPLE_SENTENCES = [
"Operating profit rose to EUR 13.1 mn from EUR 8.7 mn in the year-ago period.",
"Stock prices crashed after company reported massive losses and declared bankruptcy.",
"Net sales remained stable compared to the previous fiscal year.",
"Revenue surged 40% driven by record-breaking demand across all segments.",
"The firm announced layoffs affecting 2,000 employees amid declining revenues.",
"The board decided to maintain the current dividend policy unchanged.",
"BIST 100 rekor kirdi, yatirimcilar buyuk kazanc elde etti.",
"Sirket batti, hissedarlar tum parasini kaybetti, borclar odenemiyor.",
"Merkez Bankasi faiz oranini degistirmedi.",
]
st.set_page_config(
page_title="FinSentiment",
page_icon="π",
layout="wide",
initial_sidebar_state="expanded",
)
# ββ Bloomberg-style dark theme CSS βββββββββββββββββββββββββββ
st.markdown("""
""", unsafe_allow_html=True)
# ββ Header ββββββββββββββββββββββββββββββββββββββββββββββββββββ
st.markdown("""
β¬ FINSENTIMENT
Financial NLP Intelligence Platform Β· TR/EN
""", unsafe_allow_html=True)
# ββ Helpers βββββββββββββββββββββββββββββββββββββββββββββββββββ
def call_predict(text):
try:
r = requests.post(f"{API_URL}/predict", json={"text": text}, timeout=30)
r.raise_for_status()
return r.json()
except requests.exceptions.ConnectionError:
st.error("API offline β uvicorn src.api:app --port 8000")
return None
except Exception as e:
st.error(f"Error: {e}")
return None
def call_batch(texts):
try:
r = requests.post(f"{API_URL}/predict/batch", json={"texts": texts}, timeout=60)
r.raise_for_status()
return r.json()
except Exception as e:
st.error(f"Error: {e}")
return None
def sentiment_badge(sentiment, confidence):
color = COLORS[sentiment]
arrow = EMOJI[sentiment]
return f"""
{arrow} {sentiment.upper()} {confidence:.1%}
"""
def risk_badge(risk_level):
if not risk_level:
return ""
color = RISK_COLOR.get(risk_level, "#6b7280")
return f"""
RISK:{risk_level}
"""
def result_card(result):
sentiment = result["sentiment"]
confidence = result["confidence"]
language = result.get("language", "en")
risk_level = result.get("risk_level", "")
risk_score = result.get("risk_score", 0)
keywords = result.get("keywords", [])
translated = result.get("translated_text")
latency = result.get("latency_ms", 0)
color = COLORS[sentiment]
neg = result["scores"]["negative"]
neu = result["scores"]["neutral"]
pos = result["scores"]["positive"]
# Translated row
tr_row = ""
if translated:
tr_row = (
""
"TRβEN"
f" {translated}
"
)
# Keyword row
kw_row = ""
if keywords:
kw_spans = ""
for kw in keywords:
kw_spans += (
f"{kw}"
)
kw_row = f"{kw_spans}
"
# Badge row
s_badge = sentiment_badge(sentiment, confidence)
r_badge = risk_badge(risk_level)
html = (
f""
f"
"
f"
"
f"{s_badge}{r_badge}"
f""
f"LANG:{language.upper()} {latency:.0f}ms
"
f"
"
f"NEG {neg:.3f}"
f"NEU {neu:.3f}"
f"POS {pos:.3f}"
f"
"
f"{tr_row}{kw_row}
"
)
st.markdown(html, unsafe_allow_html=True)
def mini_chart(scores, sentiment):
color = COLORS[sentiment]
labels = ["negative", "neutral", "positive"]
values = [scores["negative"], scores["neutral"], scores["positive"]]
colors = [COLORS[l] for l in labels]
fig = go.Figure(go.Bar(
x=labels, y=values,
marker_color=colors,
marker_line_width=0,
text=[f"{v:.1%}" for v in values],
textposition="outside",
textfont=dict(family="IBM Plex Mono", size=11, color="#94a3b8"),
))
fig.update_layout(
plot_bgcolor="#0d1117",
paper_bgcolor="#0d1117",
height=200,
margin=dict(t=10, b=10, l=10, r=10),
yaxis=dict(
range=[0, 1.2],
tickformat=".0%",
gridcolor="#1e2a3a",
tickfont=dict(family="IBM Plex Mono", size=10, color="#4a6fa5"),
),
xaxis=dict(tickfont=dict(family="IBM Plex Mono", size=11, color="#94a3b8")),
showlegend=False,
)
return fig
# ββ Tabs ββββββββββββββββββββββββββββββββββββββββββββββββββββββ
tab1, tab2, tab3, tab4, tab5 = st.tabs([
"ANALYZE",
"BATCH",
"EXAMPLES",
"MONITOR",
"LIVE FEED",
])
# ββ TAB 1: ANALYZE ββββββββββββββββββββββββββββββββββββββββββββ
with tab1:
col1, col2 = st.columns([1.3, 1])
with col1:
st.markdown("""
INPUT TEXT
""", unsafe_allow_html=True)
text_input = st.text_area(
label="text",
placeholder="Enter financial news or tweet...\nTΓΌrkΓ§e metin de girebilirsiniz.",
height=160,
label_visibility="collapsed",
)
analyze_btn = st.button("βΆ ANALYZE", use_container_width=True)
with col2:
if analyze_btn and text_input.strip():
with st.spinner("Processing..."):
result = call_predict(text_input)
if result:
result_card(result)
st.plotly_chart(mini_chart(result["scores"], result["sentiment"]),
use_container_width=True)
elif analyze_btn:
st.warning("Input required.")
if not analyze_btn:
st.markdown("""
""", unsafe_allow_html=True)
# ββ TAB 2: BATCH ββββββββββββββββββββββββββββββββββββββββββββββ
with tab2:
st.markdown("""
BATCH INPUT β ONE TEXT PER LINE (MAX 32)
""", unsafe_allow_html=True)
batch_input = st.text_area(
label="batch",
placeholder="Line 1...\nLine 2...\nLine 3...",
height=160,
label_visibility="collapsed",
)
batch_btn = st.button("βΆ RUN BATCH", use_container_width=True)
if batch_btn and batch_input.strip():
texts = [t.strip() for t in batch_input.strip().split("\n") if t.strip()]
if len(texts) > 32:
st.error("MAX 32 LINES")
else:
with st.spinner(f"Processing {len(texts)} texts..."):
result = call_batch(texts)
if result:
rows = []
for r in result["results"]:
rows.append({
"TEXT" : r["text"][:70] + ("..." if len(r["text"]) > 70 else ""),
"LANG" : r.get("language", "en").upper(),
"SENTIMENT" : f"{EMOJI[r['sentiment']]} {r['sentiment'].upper()}",
"CONF" : f"{r['confidence']:.1%}",
"RISK" : r.get("risk_level", ""),
"KEYWORDS" : ", ".join(r.get("keywords", [])[:3]),
})
st.dataframe(
pd.DataFrame(rows),
use_container_width=True,
hide_index=True,
)
st.divider()
sentiments = [r["sentiment"] for r in result["results"]]
counts = Counter(sentiments)
c1, c2, c3, c4 = st.columns(4)
c1.metric("TOTAL", len(texts))
c2.metric("β² POSITIVE", counts.get("positive", 0))
c3.metric("β NEUTRAL", counts.get("neutral", 0))
c4.metric("βΌ NEGATIVE", counts.get("negative", 0))
# ββ TAB 3: EXAMPLES βββββββββββββββββββββββββββββββββββββββββββ
with tab3:
st.markdown("""
SAMPLE FINANCIAL TEXTS
""", unsafe_allow_html=True)
for i, sentence in enumerate(EXAMPLE_SENTENCES):
col1, col2 = st.columns([4, 1])
with col1:
st.markdown(f"""
{sentence}
""", unsafe_allow_html=True)
with col2:
if st.button("βΆ RUN", key=f"ex_{i}"):
with st.spinner(""):
result = call_predict(sentence)
if result:
s = result["sentiment"]
c = result["confidence"]
color = COLORS[s]
st.markdown(
f''
f'{EMOJI[s]} {s.upper()}
'
f'{c:.1%}
',
unsafe_allow_html=True
)
# ββ TAB 4: MONITOR ββββββββββββββββββββββββββββββββββββββββββββ
with tab4:
col_refresh, _ = st.columns([1, 4])
with col_refresh:
if st.button("β» REFRESH", use_container_width=True):
st.rerun()
try:
stats = requests.get(f"{API_URL}/monitoring/stats", timeout=5).json()
except:
st.error("API offline")
stats = {}
if not stats or stats.get("total", 0) == 0:
st.markdown("""
No requests recorded yet
""", unsafe_allow_html=True)
else:
c1, c2, c3, c4 = st.columns(4)
c1.metric("TOTAL REQUESTS", stats["total"])
if stats.get("latency"):
c2.metric("AVG LATENCY", f"{stats['latency'][0]['avg_ms']} ms")
if stats.get("distribution"):
dist = {d["sentiment"]: d["count"] for d in stats["distribution"]}
c3.metric("β² POSITIVE", dist.get("positive", 0))
c4.metric("βΌ NEGATIVE", dist.get("negative", 0))
st.divider()
col_a, col_b = st.columns(2)
with col_a:
if stats.get("distribution"):
df_dist = pd.DataFrame(stats["distribution"])
fig = go.Figure(go.Pie(
labels=df_dist["sentiment"],
values=df_dist["count"],
marker=dict(
colors=[COLORS.get(s, "#6b7280") for s in df_dist["sentiment"]],
line=dict(color="#0a0e1a", width=2),
),
hole=0.6,
textfont=dict(family="IBM Plex Mono", size=11),
))
fig.update_layout(
title=dict(text="SENTIMENT DISTRIBUTION",
font=dict(family="IBM Plex Mono", size=10,
color="#4a6fa5"), x=0),
plot_bgcolor="#0d1117", paper_bgcolor="#0d1117",
height=280, margin=dict(t=40, b=10),
legend=dict(font=dict(family="IBM Plex Mono", size=10,
color="#94a3b8")),
)
st.plotly_chart(fig, use_container_width=True)
with col_b:
if stats.get("hourly"):
df_h = pd.DataFrame(stats["hourly"])
fig2 = go.Figure(go.Bar(
x=df_h["hour"], y=df_h["count"],
marker_color="#00d4aa",
marker_line_width=0,
))
fig2.update_layout(
title=dict(text="REQUEST VOLUME (24H)",
font=dict(family="IBM Plex Mono", size=10,
color="#4a6fa5"), x=0),
plot_bgcolor="#0d1117", paper_bgcolor="#0d1117",
height=280, margin=dict(t=40, b=10),
xaxis=dict(tickangle=45, gridcolor="#1e2a3a",
tickfont=dict(family="IBM Plex Mono", size=9,
color="#4a6fa5")),
yaxis=dict(gridcolor="#1e2a3a",
tickfont=dict(family="IBM Plex Mono", size=9,
color="#4a6fa5")),
)
st.plotly_chart(fig2, use_container_width=True)
if stats.get("recent"):
st.markdown("""
RECENT REQUESTS
""", unsafe_allow_html=True)
df_r = pd.DataFrame(stats["recent"])
df_r["text"] = df_r["text"].str[:60] + "..."
df_r["confidence"] = df_r["confidence"].apply(lambda x: f"{x:.1%}")
df_r["latency_ms"] = df_r["latency_ms"].apply(lambda x: f"{x:.0f}ms")
st.dataframe(df_r, use_container_width=True, hide_index=True)
# ββ TAB 5: LIVE FEED ββββββββββββββββββββββββββββββββββββββββββ
with tab5:
col1, col2 = st.columns([1, 2])
with col1:
if st.button("β» FETCH NEWS", use_container_width=True):
with st.spinner("Fetching..."):
try:
from src.news_collector import fetch_all_feeds, init_news_db
init_news_db()
fetch_all_feeds()
st.success("Updated")
except Exception as e:
st.error(f"{e}")
st.rerun()
with col2:
risk_filter = st.selectbox(
"FILTER",
["ALL", "HIGH", "MEDIUM", "LOW"],
label_visibility="collapsed",
)
try:
from src.news_collector import get_recent_news, init_news_db
init_news_db()
news = get_recent_news(limit=50)
except Exception as e:
st.error(f"{e}")
news = []
if not news:
st.markdown("""
No news fetched yet β click FETCH NEWS
""", unsafe_allow_html=True)
else:
if risk_filter != "ALL":
news = [n for n in news if n.get("risk_level") == risk_filter]
sentiments = [n["sentiment"] for n in news if n.get("sentiment")]
counts = Counter(sentiments)
c1, c2, c3, c4 = st.columns(4)
c1.metric("TOTAL", len(news))
c2.metric("β² POSITIVE", counts.get("positive", 0))
c3.metric("β NEUTRAL", counts.get("neutral", 0))
c4.metric("βΌ NEGATIVE", counts.get("negative", 0))
st.divider()
for item in news:
sentiment = item.get("sentiment", "neutral")
risk_level = item.get("risk_level", "")
keywords = item.get("keywords", [])
if isinstance(keywords, str):
try:
keywords = json.loads(keywords)
except:
keywords = []
color = COLORS.get(sentiment, "#6b7280")
risk_col = RISK_COLOR.get(risk_level, "#6b7280")
title = item.get("title", "")[:120]
source = item.get("source", "")
pub = str(item.get("published", ""))[:16]
url = item.get("url", "#")
conf = item.get("confidence", 0)
emj = EMOJI.get(sentiment, "")
# Keyword spans
kw_spans = ""
for kw in keywords[:4]:
kw_spans += (
f"{kw}"
)
kw_row = f"{kw_spans}
" if kw_spans else ""
# Risk span
risk_span = ""
if risk_level:
risk_span = (
f""
f"RISK:{risk_level}"
)
html = (
f""
f"
"
f""
f"{source} Β· {pub}"
f""
f""
f"{emj} {sentiment.upper()} {conf:.0%}"
f"{risk_span}
"
f"
"
f"{kw_row}
"
)
st.markdown(html, unsafe_allow_html=True)
# ββ Sidebar βββββββββββββββββββββββββββββββββββββββββββββββββββ
with st.sidebar:
st.markdown("""
β¬ FINSENTIMENT
""", unsafe_allow_html=True)
try:
r = requests.get(f"{API_URL}/health", timeout=3)
data = r.json()
st.markdown(
""
"β SYSTEM ONLINE
",
unsafe_allow_html=True
)
st.markdown(
f""
f"STATUS ok
"
f"MODEL finbert-finetuned
"
f"DEVICE {data.get('device', 'cpu').upper()}
"
f"
",
unsafe_allow_html=True
)
except:
st.markdown("""
β SYSTEM OFFLINE
""", unsafe_allow_html=True)
st.divider()
st.markdown("""
MODEL ProsusAI/FinBERT
DATASET financial_phrasebank
F1 SCORE 0.963
ACCURACY 0.978
LATENCY ~300ms (EN)
~1500ms (TR)
""", unsafe_allow_html=True)
st.divider()
st.markdown("""
LANG TR β Helsinki + FinBERT
LANG EN β FinBERT direct
""", unsafe_allow_html=True)