Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import pandas as pd | |
| import numpy as np | |
| from GoogleNews import GoogleNews | |
| from transformers import pipeline | |
| import plotly.express as px | |
| import plotly.graph_objects as go | |
| import dateparser | |
| from datetime import datetime, timedelta | |
| # --- SYSTEM CONFIG --- | |
| st.set_page_config( | |
| page_title="VBI Terminal: Strategic AI", | |
| page_icon="🛡️", | |
| layout="wide", | |
| initial_sidebar_state="collapsed" | |
| ) | |
| # --- PROFESSIONAL UI (Cyberpunk/Terminal Style) --- | |
| st.markdown(""" | |
| <style> | |
| /* Dark Deep Background */ | |
| .stApp { background-color: #0f172a; color: #e2e8f0; } | |
| /* Metrics Cards */ | |
| [data-testid="stMetric"] { | |
| background-color: #1e293b !important; | |
| border: 1px solid #334155 !important; | |
| border-left: 5px solid #0ea5e9 !important; | |
| border-radius: 8px; | |
| padding: 15px !important; | |
| box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.5); | |
| } | |
| [data-testid="stMetricLabel"] p { color: #94a3b8 !important; font-size: 0.9rem !important; } | |
| [data-testid="stMetricValue"] div { color: #f8fafc !important; font-size: 2rem !important; } | |
| /* Advisor Box */ | |
| .advisor-box { | |
| background-color: #334155; | |
| border: 1px solid #10b981; | |
| border-radius: 10px; | |
| padding: 20px; | |
| margin-top: 20px; | |
| color: #f0fdf4; | |
| } | |
| .warning-box { | |
| background-color: #450a0a; | |
| border: 1px solid #ef4444; | |
| border-radius: 10px; | |
| padding: 20px; | |
| margin-top: 20px; | |
| color: #fef2f2; | |
| } | |
| /* Tables */ | |
| .stDataFrame { border: 1px solid #334155; border-radius: 5px; } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # --- AI ENGINE --- | |
| def load_neural_engine(): | |
| """Load Zero-Shot Classification Model""" | |
| return pipeline("zero-shot-classification", model="facebook/bart-large-mnli") | |
| analyzer = load_neural_engine() | |
| # --- INTELLIGENCE ENGINE --- | |
| def calculate_source_weight(source_name): | |
| """Assigns weight based on source credibility (Heuristic)""" | |
| source_name = str(source_name).lower() | |
| if any(x in source_name for x in ['reuters', 'bloomberg', 'forbes', 'tengrinews', 'zakon', 'kapital']): | |
| return 2.0 # High Impact Tier 1 | |
| elif any(x in source_name for x in ['blog', 'reddit', 'twitter', 'post']): | |
| return 0.8 # Low Impact / High Noise | |
| return 1.0 # Standard | |
| def fetch_intelligence(query, region, depth, mode): | |
| """Fetches real-time data using OSINT techniques.""" | |
| lang_set = 'en' | |
| gn = GoogleNews(lang=lang_set, region=region) | |
| gn.clear() | |
| # Advanced Query Logic | |
| final_query = query | |
| if mode == "Social Buzz (Risk)": | |
| final_query = f'{query} AND ("scandal" OR "fail" OR "crash" OR "complaint" OR "leak" OR "reddit" OR "opinion")' | |
| elif mode == "Financial Intel": | |
| final_query = f'{query} AND ("stock" OR "profit" OR "loss" OR "quarter" OR "ipo" OR "revenue")' | |
| gn.search(final_query) | |
| results = gn.result() | |
| if len(results) < depth: | |
| gn.getpage(2) | |
| results += gn.result() | |
| clean_data = [] | |
| seen_titles = set() | |
| for item in results[:depth]: | |
| title = item.get('title', '') | |
| if title in seen_titles: continue | |
| seen_titles.add(title) | |
| raw_date = item.get('date', '') | |
| parsed_date = dateparser.parse(raw_date) | |
| if not parsed_date: | |
| parsed_date = datetime.now() | |
| source_label = item.get('media', 'Unknown Node') | |
| weight = calculate_source_weight(source_label) | |
| clean_data.append({ | |
| "Timestamp": parsed_date, | |
| "Source": source_label, | |
| "Headline": title, | |
| "Link": item.get('link', '#'), | |
| "Weight": weight | |
| }) | |
| return clean_data | |
| # --- STRATEGIC ADVISOR MODULE --- | |
| def generate_ai_advice(rep_index, top_risk, volatility, sentiment_ratio): | |
| """Generates text-based strategy based on metrics.""" | |
| advice = [] | |
| status = "STABLE" | |
| css_class = "advisor-box" | |
| # 1. Reputation Assessment | |
| if rep_index < 40: | |
| status = "CRITICAL" | |
| css_class = "warning-box" | |
| advice.append(f"🚨 **CRISIS MODE ACTIVATED:** Reputation Index ({round(rep_index)}%) is critically low. Immediate PR intervention required.") | |
| elif rep_index < 60: | |
| status = "VOLATILE" | |
| advice.append(f"⚠️ **Caution:** Brand sentiment is mixed ({round(rep_index)}%). Monitor closely.") | |
| else: | |
| advice.append(f"✅ **Healthy:** Strong market perception ({round(rep_index)}%). Focus on maintaining momentum.") | |
| # 2. Risk Specific Strategy | |
| if top_risk == "Legal/Compliance": | |
| advice.append("⚖️ **Legal Vector:** High volume of legal discussions detected. Prepare official statements regarding compliance/lawsuits immediately.") | |
| elif top_risk == "Technical Failure": | |
| advice.append("🔧 **Ops Vector:** Technical complaints are trending. Issue a transparency report on uptime/fixes to reassure customers.") | |
| elif top_risk == "Financial Risk": | |
| advice.append("📉 **Market Vector:** Financial anxiety detected. IR (Investor Relations) should release clarifying data.") | |
| elif top_risk == "PR Crisis": | |
| advice.append("📣 **PR Vector:** Viral negativity detected. Do not ignore. Use 'Recall & Reframe' strategy.") | |
| # 3. Volatility Check | |
| if volatility > 0.3: | |
| advice.append("🌊 **High Volatility:** Opinions are shifting rapidly. Avoid controversial statements for 48h.") | |
| return status, "\n\n".join(advice), css_class | |
| # --- SIDEBAR --- | |
| with st.sidebar: | |
| st.header("🛰️ VBI: COMMAND") | |
| target_region = st.selectbox("Geo-Node", ["KZ", "US", "GB", "RU"], index=0) | |
| source_mode = st.radio("Signal Mode:", ["Corporate News", "Social Buzz (Risk)", "Financial Intel"]) | |
| scan_depth = st.slider("Depth", 10, 60, 30) | |
| st.divider() | |
| st.markdown("Created by **VBI Intelligence**") | |
| # --- MAIN LAYOUT --- | |
| st.title("🛡️ VBI: Strategic Brand Intelligence") | |
| st.markdown("`v2.0` | Neural OSINT & Automated Strategy Generation") | |
| col_search, col_btn = st.columns([4, 1]) | |
| with col_search: | |
| target_query = st.text_input("TARGET ENTITY:", placeholder="e.g. FlyArystan, Kaspi, KazMunayGas") | |
| with col_btn: | |
| st.write("") | |
| st.write("") | |
| start_btn = st.button("🚀 INITIATE SCAN", use_container_width=True) | |
| if start_btn and target_query: | |
| with st.spinner(f"🛰️ Intercepting signals for '{target_query}'..."): | |
| # 1. Fetch | |
| raw_data = fetch_intelligence(target_query, target_region, scan_depth, source_mode) | |
| if not raw_data: | |
| st.error("No signals detected. System Standby.") | |
| else: | |
| # 2. Process | |
| processed_data = [] | |
| risk_vectors = ["Legal/Compliance", "Financial Risk", "Technical Failure", "Market Expansion", "PR Crisis", "Customer Service"] | |
| sentiment_cats = ["Positive", "Negative", "Neutral"] | |
| prog = st.progress(0) | |
| for i, item in enumerate(raw_data): | |
| # Neural Analysis | |
| risk_out = analyzer(item['Headline'], candidate_labels=risk_vectors) | |
| sent_out = analyzer(item['Headline'], candidate_labels=sentiment_cats) | |
| # Logic: If Risk Score is low, it might be just General News | |
| top_risk = risk_out['labels'][0] | |
| if risk_out['scores'][0] < 0.4: | |
| top_risk = "General Noise" | |
| # Weighted Score Calculation | |
| sent_score = 1 if sent_out['labels'][0] == 'Positive' else -1 if sent_out['labels'][0] == 'Negative' else 0 | |
| weighted_impact = sent_score * item['Weight'] | |
| processed_data.append({ | |
| "Time": item['Timestamp'], | |
| "Source": item['Source'], | |
| "Headline": item['Headline'], | |
| "Risk Category": top_risk, | |
| "Risk Conf": risk_out['scores'][0], | |
| "Sentiment": sent_out['labels'][0], | |
| "Impact Score": weighted_impact | |
| }) | |
| prog.progress((i + 1) / len(raw_data)) | |
| prog.empty() | |
| df = pd.DataFrame(processed_data).sort_values(by='Time') | |
| # --- ANALYTICS ENGINE --- | |
| total_impact = df['Impact Score'].sum() | |
| total_weight = df['Impact Score'].abs().sum() if df['Impact Score'].abs().sum() != 0 else 1 | |
| # Reputation Index (0-100) | |
| rep_index = 50 + ((total_impact / len(df)) * 50) | |
| rep_index = max(0, min(100, rep_index)) # Clamp 0-100 | |
| # Volatility (Std Dev of Impact) | |
| volatility = df['Impact Score'].std() | |
| # Top Risk | |
| top_risk_cat = df['Risk Category'].value_counts().idxmax() | |
| # Advice Generation | |
| status, strategy_text, css_style = generate_ai_advice(rep_index, top_risk_cat, volatility, 0) | |
| # --- DASHBOARD --- | |
| # 1. STRATEGIC ADVISOR BLOCK | |
| st.markdown(f""" | |
| <div class="{css_style}"> | |
| <h3>🧠 AI Strategic Advisor: {status}</h3> | |
| <div style='white-space: pre-wrap; font-size: 1.1rem;'>{strategy_text}</div> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| st.divider() | |
| # 2. KEY METRICS | |
| m1, m2, m3, m4 = st.columns(4) | |
| m1.metric("Reputation Index", f"{round(rep_index, 1)}%", delta="Weighted Impact") | |
| m2.metric("Signal Volume", len(df)) | |
| m3.metric("Primary Threat", top_risk_cat, delta_color="off") | |
| m4.metric("Volatility", f"{round(volatility, 2)}", help="Higher means less stable opinions") | |
| # 3. VISUALIZATIONS | |
| st.subheader("📊 Visual Intelligence") | |
| tab1, tab2 = st.tabs(["📈 Trend Dynamics", "🛡️ Risk Matrix"]) | |
| with tab1: | |
| # Rolling Average Trend | |
| df['MA'] = df['Impact Score'].rolling(window=3).mean() | |
| fig_trend = go.Figure() | |
| fig_trend.add_trace(go.Scatter(x=df['Time'], y=df['Impact Score'], mode='markers', name='Raw Signal', marker=dict(color='#94a3b8'))) | |
| fig_trend.add_trace(go.Scatter(x=df['Time'], y=df['MA'], mode='lines', name='Reputation Trend', line=dict(color='#38bdf8', width=3))) | |
| fig_trend.update_layout(title="Reputation Stability Over Time", template="plotly_dark", paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)') | |
| st.plotly_chart(fig_trend, use_container_width=True) | |
| with tab2: | |
| c1, c2 = st.columns(2) | |
| with c1: | |
| fig_pie = px.pie(df, names='Sentiment', title="Sentiment Distribution", color='Sentiment', | |
| color_discrete_map={"Positive": "#10b981", "Negative": "#ef4444", "Neutral": "#64748b"}) | |
| fig_pie.update_layout(template="plotly_dark", paper_bgcolor='rgba(0,0,0,0)') | |
| st.plotly_chart(fig_pie, use_container_width=True) | |
| with c2: | |
| risk_counts = df['Risk Category'].value_counts().reset_index() | |
| risk_counts.columns = ['Risk', 'Count'] | |
| fig_bar = px.bar(risk_counts, x='Count', y='Risk', orientation='h', title="Risk Vectors", | |
| color='Count', color_continuous_scale='Redor') | |
| fig_bar.update_layout(template="plotly_dark", paper_bgcolor='rgba(0,0,0,0)') | |
| st.plotly_chart(fig_bar, use_container_width=True) | |
| # 4. INTELLIGENCE LOG | |
| st.subheader("📡 Decrypted Signal Log") | |
| def highlight_risk(val): | |
| color = '#ef4444' if val in ['Legal/Compliance', 'Technical Failure', 'PR Crisis'] else '#cbd5e1' | |
| return f'color: {color}' | |
| display_df = df[['Time', 'Source', 'Headline', 'Risk Category', 'Sentiment', 'Impact Score']].copy() | |
| st.dataframe( | |
| display_df.style.map(highlight_risk, subset=['Risk Category']), | |
| use_container_width=True, | |
| height=400 | |
| ) |