| | """ |
| | Reply Required Page |
| | Displays comments that require replies with filtering and prioritisation. |
| | |
| | Data is fetched on-demand: user sets filters then clicks "Fetch Data". |
| | Platform, brand and date are pre-populated from global sidebar filters. |
| | """ |
| | import streamlit as st |
| | import pandas as pd |
| | import sys |
| | from pathlib import Path |
| |
|
| | parent_dir = Path(__file__).resolve().parent.parent |
| | sys.path.append(str(parent_dir)) |
| |
|
| | from utils.metrics import SentimentMetrics |
| | from visualizations.sentiment_charts import SentimentCharts |
| | from visualizations.distribution_charts import DistributionCharts |
| | from visualizations.content_cards import ContentCards |
| |
|
| |
|
| | def render_reply_required(data_loader): |
| | """ |
| | Render the Reply Required page. |
| | |
| | Args: |
| | data_loader: SentimentDataLoader instance |
| | """ |
| | st.title("β οΈ Comments Requiring Reply") |
| | st.markdown("Manage and prioritise comments that need responses") |
| | st.markdown("---") |
| |
|
| | metrics = SentimentMetrics() |
| |
|
| | |
| | dashboard_df = st.session_state.get('dashboard_df') |
| | if dashboard_df is None or dashboard_df.empty: |
| | st.warning("Dashboard data not loaded yet. Please wait for the app to initialise.") |
| | return |
| |
|
| | available_platforms = sorted(dashboard_df['platform'].dropna().unique().tolist()) |
| | available_brands = sorted(dashboard_df['brand'].dropna().unique().tolist()) |
| |
|
| | |
| | global_filters = st.session_state.get('global_filters', {}) |
| | global_platforms = global_filters.get('platforms', []) |
| | global_brands = global_filters.get('brands', []) |
| | global_date = global_filters.get('date_range') |
| |
|
| | st.markdown("### π Query Filters") |
| | st.info( |
| | "β‘ **Performance**: Set your filters then click **Fetch Data** to run a targeted Snowflake query. " |
| | "Global sidebar filters are pre-populated below." |
| | ) |
| |
|
| | filter_col1, filter_col2, filter_col3 = st.columns(3) |
| |
|
| | with filter_col1: |
| | selected_platforms = st.multiselect( |
| | "Platforms", |
| | options=available_platforms, |
| | default=[p for p in global_platforms if p in available_platforms], |
| | help="Leave empty to include all platforms" |
| | ) |
| |
|
| | with filter_col2: |
| | selected_brands = st.multiselect( |
| | "Brands", |
| | options=available_brands, |
| | default=[b for b in global_brands if b in available_brands], |
| | help="Leave empty to include all brands" |
| | ) |
| |
|
| | with filter_col3: |
| | |
| | if global_date and len(global_date) == 2: |
| | default_date = (global_date[0], global_date[1]) |
| | elif 'comment_timestamp' in dashboard_df.columns and not dashboard_df.empty: |
| | max_d = dashboard_df['comment_timestamp'].max().date() |
| | min_d = dashboard_df['comment_timestamp'].min().date() |
| | default_date = (min_d, max_d) |
| | else: |
| | default_date = None |
| |
|
| | if default_date: |
| | date_range = st.date_input( |
| | "Date Range", |
| | value=default_date, |
| | help="Filter by comment timestamp" |
| | ) |
| | else: |
| | date_range = None |
| |
|
| | st.markdown("---") |
| |
|
| | |
| | fetch_key = ( |
| | tuple(sorted(selected_platforms)), |
| | tuple(sorted(selected_brands)), |
| | str(date_range) if date_range and len(date_range) == 2 else '' |
| | ) |
| |
|
| | has_data = ( |
| | 'rr_df' in st.session_state |
| | and st.session_state.get('rr_fetch_key') == fetch_key |
| | and not st.session_state['rr_df'].empty |
| | ) |
| |
|
| | fetch_col, status_col = st.columns([1, 3]) |
| | with fetch_col: |
| | fetch_clicked = st.button("π Fetch Data", use_container_width=True, type="primary") |
| | with status_col: |
| | if has_data: |
| | st.success(f"β
Showing **{len(st.session_state['rr_df']):,}** comments requiring reply") |
| | elif not fetch_clicked: |
| | st.info("π Click **Fetch Data** to load reply-required comments from Snowflake.") |
| |
|
| | if fetch_clicked: |
| | with st.spinner("Fetching reply-required comments from Snowflakeβ¦"): |
| | df = data_loader.load_reply_required_data( |
| | platforms=selected_platforms or None, |
| | brands=selected_brands or None, |
| | date_range=date_range if date_range and len(date_range) == 2 else None, |
| | ) |
| | st.session_state['rr_df'] = df |
| | st.session_state['rr_fetch_key'] = fetch_key |
| | st.session_state['reply_page'] = 1 |
| | st.rerun() |
| |
|
| | if not has_data and not fetch_clicked: |
| | return |
| |
|
| | |
| | reply_comments = st.session_state.get('rr_df', pd.DataFrame()) |
| |
|
| | if reply_comments.empty: |
| | st.success("π No comments currently require replies with these filters.") |
| | return |
| |
|
| | st.markdown("---") |
| |
|
| | |
| | st.markdown("### π Summary") |
| | col1, col2, col3, col4 = st.columns(4) |
| |
|
| | with col1: |
| | st.metric("Total Replies Needed", len(reply_comments)) |
| | with col2: |
| | urgency = metrics.calculate_response_urgency(reply_comments) |
| | st.metric("π΄ Urgent", urgency['urgent_count'], help="Negative sentiment") |
| | with col3: |
| | unique_contents = reply_comments['content_sk'].nunique() if 'content_sk' in reply_comments.columns else 0 |
| | st.metric("Affected Contents", unique_contents) |
| | with col4: |
| | neg_cnt = reply_comments['sentiment_polarity'].isin(['negative', 'very_negative']).sum() |
| | neg_pct = neg_cnt / len(reply_comments) * 100 if len(reply_comments) > 0 else 0 |
| | st.metric("Negative %", f"{neg_pct:.1f}%") |
| |
|
| | st.markdown("---") |
| |
|
| | |
| | st.markdown("### π¨ Response Urgency Breakdown") |
| | urgency_metrics = metrics.calculate_response_urgency(reply_comments) |
| | uc1, uc2, uc3, uc4 = st.columns(4) |
| | uc1.metric("π΄ Urgent", urgency_metrics['urgent_count'], help="Negative β immediate action") |
| | uc2.metric("π High Priority", urgency_metrics['high_priority_count'], help="Neutral + feedback/request β 24h") |
| | uc3.metric("π‘ Medium", urgency_metrics['medium_priority_count'], help="Positive β 48h") |
| | uc4.metric("π’ Low", urgency_metrics['low_priority_count'], help="Very positive β when convenient") |
| |
|
| | st.markdown("---") |
| |
|
| | |
| | st.markdown("### π Refine View") |
| | rf1, rf2, rf3, rf4 = st.columns(4) |
| |
|
| | with rf1: |
| | priority_options = ['All', 'π΄ Urgent', 'π High', 'π‘ Medium', 'π’ Low'] |
| | selected_priority = st.selectbox("Priority", priority_options, index=0) |
| |
|
| | with rf2: |
| | platform_options = ['All'] + sorted(reply_comments['platform'].unique().tolist()) |
| | view_platform = st.selectbox("Platform", platform_options, index=0) |
| |
|
| | with rf3: |
| | brand_options = ['All'] + sorted(reply_comments['brand'].unique().tolist()) |
| | view_brand = st.selectbox("Brand", brand_options, index=0) |
| |
|
| | with rf4: |
| | intent_list = ( |
| | reply_comments['intent'].str.split(',').explode().str.strip() |
| | .dropna().unique().tolist() |
| | ) |
| | intent_options = ['All'] + sorted(intent_list) |
| | selected_intent = st.selectbox("Intent", intent_options, index=0) |
| |
|
| | filtered_comments = reply_comments |
| |
|
| | if selected_priority != 'All': |
| | if selected_priority == 'π΄ Urgent': |
| | filtered_comments = filtered_comments[ |
| | filtered_comments['sentiment_polarity'].isin(['negative', 'very_negative']) |
| | ] |
| | elif selected_priority == 'π High': |
| | filtered_comments = filtered_comments[ |
| | (filtered_comments['sentiment_polarity'] == 'neutral') & |
| | (filtered_comments['intent'].str.contains('feedback_negative|request', na=False)) |
| | ] |
| | elif selected_priority == 'π‘ Medium': |
| | filtered_comments = filtered_comments[filtered_comments['sentiment_polarity'] == 'positive'] |
| | elif selected_priority == 'π’ Low': |
| | filtered_comments = filtered_comments[filtered_comments['sentiment_polarity'] == 'very_positive'] |
| |
|
| | if view_platform != 'All': |
| | filtered_comments = filtered_comments[filtered_comments['platform'] == view_platform] |
| |
|
| | if view_brand != 'All': |
| | filtered_comments = filtered_comments[filtered_comments['brand'] == view_brand] |
| |
|
| | if selected_intent != 'All': |
| | filtered_comments = filtered_comments[ |
| | filtered_comments['intent'].str.contains(selected_intent, na=False) |
| | ] |
| |
|
| | st.markdown(f"**Showing {len(filtered_comments):,} comments after filtering**") |
| | st.markdown("---") |
| |
|
| | |
| | if not filtered_comments.empty: |
| | st.markdown("### π Analysis") |
| | viz_col1, viz_col2 = st.columns(2) |
| | with viz_col1: |
| | sentiment_charts = SentimentCharts() |
| | st.plotly_chart( |
| | sentiment_charts.create_sentiment_pie_chart(filtered_comments, title="Sentiment Distribution"), |
| | use_container_width=True |
| | ) |
| | with viz_col2: |
| | distribution_charts = DistributionCharts() |
| | st.plotly_chart( |
| | distribution_charts.create_intent_bar_chart( |
| | filtered_comments, title="Intent Distribution", orientation='h' |
| | ), |
| | use_container_width=True |
| | ) |
| | st.markdown("---") |
| |
|
| | |
| | st.markdown("### π¬ Comments Requiring Reply") |
| |
|
| | items_per_page = 10 |
| | total_pages = max(1, (len(filtered_comments) - 1) // items_per_page + 1) |
| |
|
| | if 'reply_page' not in st.session_state: |
| | st.session_state.reply_page = 1 |
| |
|
| | |
| | st.session_state.reply_page = min(st.session_state.reply_page, total_pages) |
| |
|
| | if total_pages > 1: |
| | pc1, pc2, pc3 = st.columns([1, 2, 1]) |
| | with pc1: |
| | if st.button("β¬
οΈ Previous", key="prev_top", |
| | disabled=st.session_state.reply_page <= 1): |
| | st.session_state.reply_page -= 1 |
| | st.rerun() |
| | with pc2: |
| | st.markdown(f"<center>Page {st.session_state.reply_page} of {total_pages}</center>", |
| | unsafe_allow_html=True) |
| | with pc3: |
| | if st.button("Next β‘οΈ", key="next_top", |
| | disabled=st.session_state.reply_page >= total_pages): |
| | st.session_state.reply_page += 1 |
| | st.rerun() |
| | st.markdown("---") |
| |
|
| | start_idx = (st.session_state.reply_page - 1) * items_per_page |
| | paginated = filtered_comments.iloc[start_idx: start_idx + items_per_page] |
| |
|
| | if paginated.empty: |
| | st.info("No comments match the selected filters.") |
| | else: |
| | for idx, (_, comment) in enumerate(paginated.iterrows(), start=start_idx + 1): |
| | sp = comment['sentiment_polarity'] |
| | if sp in ['negative', 'very_negative']: |
| | priority_emoji = "π΄" |
| | elif sp == 'neutral' and any(i in comment['intent'] for i in ['feedback_negative', 'request']): |
| | priority_emoji = "π " |
| | elif sp == 'positive': |
| | priority_emoji = "π‘" |
| | else: |
| | priority_emoji = "π’" |
| |
|
| | st.markdown(f"#### {priority_emoji} Comment #{idx}") |
| | ContentCards.display_comment_card(comment, show_original=True) |
| |
|
| | if total_pages > 1: |
| | st.markdown("---") |
| | pb1, pb2, pb3 = st.columns([1, 2, 1]) |
| | with pb1: |
| | if st.button("β¬
οΈ Previous", key="prev_bottom", |
| | disabled=st.session_state.reply_page <= 1): |
| | st.session_state.reply_page -= 1 |
| | st.rerun() |
| | with pb2: |
| | st.markdown(f"<center>Page {st.session_state.reply_page} of {total_pages}</center>", |
| | unsafe_allow_html=True) |
| | with pb3: |
| | if st.button("Next β‘οΈ", key="next_bottom", |
| | disabled=st.session_state.reply_page >= total_pages): |
| | st.session_state.reply_page += 1 |
| | st.rerun() |
| |
|
| | st.markdown("---") |
| |
|
| | |
| | st.markdown("### πΎ Export Data") |
| | col1, col2 = st.columns([1, 3]) |
| | with col1: |
| | export_columns = [ |
| | 'comment_id', 'author_name', 'platform', 'brand', 'comment_timestamp', |
| | 'display_text', 'original_text', 'detected_language', 'sentiment_polarity', |
| | 'intent', 'sentiment_confidence', 'content_description', 'permalink_url' |
| | ] |
| | available_cols = [c for c in export_columns if c in filtered_comments.columns] |
| | csv = filtered_comments[available_cols].to_csv(index=False) |
| | st.download_button( |
| | label="π₯ Download as CSV", |
| | data=csv, |
| | file_name="comments_requiring_reply.csv", |
| | mime="text/csv" |
| | ) |
| | with col2: |
| | st.info("Download the filtered comments for team collaboration or CRM import.") |
| |
|
| | st.markdown("---") |
| |
|
| | |
| | st.markdown("### π Reply Requirements by Content") |
| |
|
| | if 'content_sk' in filtered_comments.columns: |
| | content_reply_summary = ( |
| | filtered_comments |
| | .groupby('content_sk', as_index=False) |
| | .agg( |
| | replies_needed=('comment_sk', 'count') if 'comment_sk' in filtered_comments.columns |
| | else ('sentiment_polarity', 'count'), |
| | content_description=('content_description', 'first'), |
| | permalink_url=('permalink_url', 'first') |
| | ) |
| | .sort_values('replies_needed', ascending=False) |
| | .head(10) |
| | ) |
| |
|
| | for i, (_, content) in enumerate(content_reply_summary.iterrows(), 1): |
| | with st.expander(f"π Content #{i} β {content['replies_needed']} replies needed"): |
| | st.markdown(f"**Description:** {content['content_description']}") |
| | if pd.notna(content.get('permalink_url')): |
| | st.markdown(f"**Link:** [View Content]({content['permalink_url']})") |
| |
|
| | top_comments = filtered_comments[ |
| | filtered_comments['content_sk'] == content['content_sk'] |
| | ].head(3) |
| | st.markdown(f"**Top {len(top_comments)} comments:**") |
| | for _, c in top_comments.iterrows(): |
| | ContentCards.display_comment_card(c, show_original=True) |