SHELLAPANDIANGANHUNGING commited on
Commit
cb328df
·
verified ·
1 Parent(s): e1436f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +193 -182
app.py CHANGED
@@ -1976,202 +1976,213 @@ else:
1976
 
1977
  st.markdown("<h3 class='section-title'>OBJECTIVE 7 - Insight and Recommendation</h3>", unsafe_allow_html=True)
1978
 
1979
- def compute_ai_insights(df: pd.DataFrame) -> List[dict]:
 
1980
  """
1981
- Generates insights and recommendations based on the current data and average monthly ratios.
1982
- Returns a list of dictionaries, each containing an 'insight' and a 'recommendation'.
 
 
 
 
1983
  """
1984
- insight_recommendations = []
1985
 
1986
  if df.empty:
1987
- return insight_recommendations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1988
 
1989
- total_findings = len(df)
1990
- total_locations = df['nama_lokasi_full'].nunique() if 'nama_lokasi_full' in df.columns else 0
1991
- total_companies = df['nama_perusahaan'].nunique() if 'nama_perusahaan' in df.columns else 0
1992
- total_divisions = df['nama'].nunique() if 'nama' in df.columns else 0
 
 
 
 
 
 
 
 
1993
 
1994
- # --- 1. Insight & Recommendation: Rata-rata Bulanan Ratio Temuan/Orang Perusahaan ---
1995
- if 'nama_perusahaan' in df.columns and 'creator_nid' in df.columns:
1996
- df_with_month = df.copy()
1997
- df_with_month['created_month'] = df_with_month['created_at'].dt.to_period('M')
1998
-
1999
- # Hitung temuan per bulan per perusahaan
2000
- findings_by_company_month = df_with_month.groupby(['created_month', 'nama_perusahaan']).size().reset_index(name='findings_count')
2001
- # Hitung jumlah orang unik per bulan per perusahaan
2002
- creators_by_company_month = df_with_month.groupby(['created_month', 'nama_perusahaan'])['creator_nid'].nunique().reset_index(name='unique_creators')
2003
- # Gabung
2004
- merged_ratio = findings_by_company_month.merge(creators_by_company_month, on=['created_month', 'nama_perusahaan'], how='outer')
2005
- # Filter untuk menghindari pembagian dengan nol
2006
- merged_ratio = merged_ratio[merged_ratio['unique_creators'] > 0]
2007
- # Hitung rasio (ignore NaN)
2008
- merged_ratio['ratio'] = merged_ratio['findings_count'] / merged_ratio['unique_creators']
2009
- merged_ratio['ratio'] = merged_ratio['ratio'].replace([np.inf, -np.inf], np.nan)
2010
-
2011
- # Rata-rata bulanan per perusahaan
2012
- avg_ratio_per_company = merged_ratio.groupby('nama_perusahaan')['ratio'].mean().reset_index(name='avg_monthly_ratio')
2013
- # Filter hasil akhir untuk menghindari NaN
2014
- avg_ratio_per_company = avg_ratio_per_company.dropna(subset=['avg_monthly_ratio'])
2015
-
2016
- if not avg_ratio_per_company.empty:
2017
- # Temukan perusahaan dengan rasio tertinggi dan terendah
2018
- top_company_ratio = avg_ratio_per_company.loc[avg_ratio_per_company['avg_monthly_ratio'].idxmax()]
2019
- low_company_ratio = avg_ratio_per_company.loc[avg_ratio_per_company['avg_monthly_ratio'].idxmin()]
2020
-
2021
- insight_text = (
2022
- f"Based on the average monthly finding-to-person ratio, "
2023
- f"Company '{top_company_ratio['nama_perusahaan']}' has the highest activity level ({top_company_ratio['avg_monthly_ratio']:.2f} findings/person/month), "
2024
- f"while '{low_company_ratio['nama_perusahaan']}' has the lowest ({low_company_ratio['avg_monthly_ratio']:.2f} findings/person/month)."
 
 
 
 
2025
  )
2026
- recommendation_text = (
2027
- f"For '{top_company_ratio['nama_perusahaan']}': Investigate the underlying reasons for the high ratio. Is it due to active reporting, higher risk, or more personnel? "
2028
- f"For '{low_company_ratio['nama_perusahaan']}': Verify if the low ratio reflects effective risk management or potential under-reporting."
 
2029
  )
2030
- insight_recommendations.append({"insight": insight_text, "recommendation": recommendation_text})
 
 
 
 
 
 
 
 
 
 
 
 
 
2031
 
2032
- # --- 2. Insight & Recommendation: Distribusi Temuan (Umum) ---
2033
  if 'temuan_kategori' in df.columns:
2034
- cat_counts = df['temuan_kategori'].value_counts()
2035
- top_cat = cat_counts.index[0] if not cat_counts.empty else "N/A"
2036
- top_cat_count = cat_counts.iloc[0] if not cat_counts.empty else 0
2037
- if top_cat != "N/A":
2038
- perc = (top_cat_count / total_findings) * 100
2039
- if top_cat == "Positive":
2040
- insight_text = (
2041
- f"The majority of findings ({top_cat_count} or {perc:.1f}%) are categorized as 'Positive'. "
2042
- f"This indicates a strong culture of recognizing and reporting good practices and safety compliance."
2043
- )
2044
- recommendation_text = (
2045
- f"Maintain and reinforce the positive reporting culture. "
2046
- f"Consider using these 'Positive' examples as best practice case studies for training and awareness programs."
2047
- )
2048
- else:
2049
- insight_text = (
2050
- f"The most frequent finding category is '{top_cat}' ({top_cat_count} instances, {perc:.1f}% of total). "
2051
- f"This highlights a specific area requiring focused attention."
2052
- )
2053
- recommendation_text = (
2054
- f"Conduct a root-cause analysis for the '{top_cat}' category. "
2055
- f"Develop targeted corrective actions and preventive measures to address the underlying issues."
2056
- )
2057
- insight_recommendations.append({"insight": insight_text, "recommendation": recommendation_text})
2058
-
2059
- # --- 3. Insight & Recommendation: Aktivitas Lokasi (Umum) ---
2060
- if 'nama_lokasi_full' in df.columns and total_locations > 0:
2061
- loc_counts = df['nama_lokasi_full'].value_counts()
2062
- top_loc = loc_counts.index[0] if not loc_counts.empty else "N/A"
2063
- top_loc_count = loc_counts.iloc[0] if not loc_counts.empty else 0
2064
- if top_loc != "N/A":
2065
- insight_text = (
2066
- f"Location '{top_loc}' has the highest number of findings ({top_loc_count}). "
2067
- f"This could indicate higher activity, more scrutiny, or potentially higher risk in this area."
2068
  )
2069
- recommendation_text = (
2070
- f"Perform a detailed review of activities in '{top_loc}'. "
2071
- f"Determine if the high volume is due to increased activity or specific risk factors. "
2072
- f"Ensure adequate resources and controls are in place."
2073
  )
2074
- insight_recommendations.append({"insight": insight_text, "recommendation": recommendation_text})
2075
-
2076
- # --- 4. Insight & Recommendation: Kinerja Resolusi (Umum) ---
2077
- if 'days_to_close' in df.columns:
2078
- closed_df = df.dropna(subset=['days_to_close'])
2079
- if not closed_df.empty:
2080
- avg_close_time = closed_df['days_to_close'].mean()
2081
- median_close_time = closed_df['days_to_close'].median()
2082
- # Ambang batas SLA, misal 7 hari
2083
- sla_threshold = 7
2084
- slow_findings = closed_df[closed_df['days_to_close'] > sla_threshold]
2085
- slow_count = len(slow_findings)
2086
- slow_percentage = (slow_count / len(closed_df)) * 100 if len(closed_df) > 0 else 0
2087
-
2088
- insight_text = (
2089
- f"The average time to close findings is {avg_close_time:.1f} days (median: {median_close_time:.1f} days). "
2090
- f"{slow_count} findings ({slow_percentage:.1f}%) exceeded the {sla_threshold}-day SLA."
2091
  )
2092
- if slow_percentage > 20:
2093
- recommendation_text = (
2094
- f"The resolution performance is below target. Investigate bottlenecks in the closure process. "
2095
- f"Prioritize findings that are taking longer than {sla_threshold} days. Consider implementing an escalation matrix."
2096
- )
2097
- else:
2098
- recommendation_text = (
2099
- f"The resolution performance is generally good, but there's room for improvement. "
2100
- f"Focus on reducing the backlog of findings that exceed the {sla_threshold}-day SLA."
2101
- )
2102
- insight_recommendations.append({"insight": insight_text, "recommendation": recommendation_text})
2103
-
2104
- # --- 5. Insight & Recommendation: Tren Bulanan (Umum) ---
2105
- if 'created_at' in df.columns:
2106
- monthly_trend = df.set_index('created_at').resample('M').size()
2107
- if len(monthly_trend) >= 2:
2108
- last_month_count = monthly_trend.iloc[-1]
2109
- prev_month_count = monthly_trend.iloc[-2]
2110
- if prev_month_count > 0:
2111
- change_pct = (last_month_count - prev_month_count) / prev_month_count * 100
2112
- trend_word = "increase" if change_pct > 0 else "decrease"
2113
- insight_text = (
2114
- f"There was a {change_pct:+.1f}% {trend_word} in finding volume between the last two months "
2115
- f"({monthly_trend.index[-2].strftime('%b %Y')} and {monthly_trend.index[-1].strftime('%b %Y')})."
2116
- )
2117
- if abs(change_pct) > 20: # Jika perubahan besar
2118
- recommendation_text = (
2119
- f"Investigate the cause of this significant {trend_word} in findings. "
2120
- f"Review operational changes, contractor activities, or audit focus shifts that occurred recently."
2121
- )
2122
- else:
2123
- recommendation_text = (
2124
- f"Monitor the trend over the next few weeks to see if this change represents a new pattern or a temporary fluctuation."
2125
- )
2126
- insight_recommendations.append({"insight": insight_text, "recommendation": recommendation_text})
2127
-
2128
- # --- 6. Insight & Recommendation: Aktivitas Pelapor (Umum) ---
2129
- if 'creator_nid' in df.columns:
2130
- active_reporters = df['creator_nid'].nunique()
2131
- total_reports = len(df)
2132
- avg_reports_per_person = total_reports / active_reporters if active_reporters > 0 else 0
2133
- # Cek apakah ada reporter dominan
2134
- top_reporter_counts = df['creator_nid'].value_counts()
2135
- if not top_reporter_counts.empty:
2136
- top_reporter_id = top_reporter_counts.index[0]
2137
- top_reporter_count = top_reporter_counts.iloc[0]
2138
- if top_reporter_count / total_reports > 0.15: # Jika satu orang membuat > 15% laporan
2139
- insight_text = (
2140
- f"Reporter with ID '{top_reporter_id}' has submitted a disproportionately high number of findings ({top_reporter_count}). "
2141
- f"They account for {top_reporter_count/total_reports*100:.1f}% of the total volume."
2142
- )
2143
- recommendation_text = (
2144
- f"Recognize the active reporter. Also, ensure reporting is distributed across the team "
2145
- f"to provide a more comprehensive view of risks across all areas and activities."
2146
- )
2147
- insight_recommendations.append({"insight": insight_text, "recommendation": recommendation_text})
2148
-
2149
- return insight_recommendations
2150
 
2151
- # Panggil fungsi untuk mendapatkan insight dan rekomendasi
2152
- ai_insights_and_recs = compute_ai_insights(df_filtered)
2153
 
2154
- # Tampilkan hasil
 
2155
 
2156
- if ai_insights_and_recs:
2157
- for i, item in enumerate(ai_insights_and_recs):
2158
- insight = item["insight"]
2159
- recommendation = item["recommendation"]
2160
- # Tampilkan Insight
2161
- st.markdown(f'<div class="ai-insight"><strong>Insight {i+1}:</strong> {insight}</div>', unsafe_allow_html=True)
2162
- # Tampilkan Recommendation
2163
- st.markdown(f'<div class="ai-recommendation"><strong>Recommendation {i+1}:</strong> {recommendation}</div>', unsafe_allow_html=True)
2164
  else:
2165
- # Jika tidak ada insight yang dihasilkan, mungkin karena data kosong atau kolom tidak ditemukan
2166
- st.markdown('<div class="ai-insight">No significant AI insights could be generated. This might be due to insufficient data or missing required columns after filtering.</div>', unsafe_allow_html=True)
2167
-
2168
- # =================== FOOTER ===================
2169
- st.markdown("---")
2170
- st.markdown(
2171
- """
2172
- <div style="text-align:center; color:#757575; font-size:0.9em;">
2173
- <strong> Special Design for PLN </strong> • © 2025 PT Bukit Technology
2174
- </div>
2175
- """,
2176
- unsafe_allow_html=True
2177
- )
 
1976
 
1977
  st.markdown("<h3 class='section-title'>OBJECTIVE 7 - Insight and Recommendation</h3>", unsafe_allow_html=True)
1978
 
1979
+
1980
+ def compute_risk_mitigation_insights(df: pd.DataFrame) -> List[dict]:
1981
  """
1982
+ Generates *risk-mitigation-focused* insights and recommendations across:
1983
+ - Locations & Coverage Equity
1984
+ - Divisional Reporting Load (people & frequency)
1985
+ - Agentic Safety Behaviors (proactive vs reactive)
1986
+ - Wordcloud-based Emerging Risk Detection
1987
+ - Actionable coverage-balancing strategies
1988
  """
1989
+ insights = []
1990
 
1991
  if df.empty:
1992
+ return insights
1993
+
1994
+ # --- Helper: Detect risk terms from wordcloud (simulate via keyword freq in 'uraian_temuan' or similar) ---
1995
+ def detect_emerging_risks(df):
1996
+ # Assume 'uraian_temuan' or 'temuan_uraian' contains free text
1997
+ text_col = None
1998
+ for col in ['uraian_temuan', 'temuan_uraian', 'keterangan', 'catatan']:
1999
+ if col in df.columns and df[col].notna().any():
2000
+ text_col = col
2001
+ break
2002
+ if text_col is None:
2003
+ return [], []
2004
+
2005
+ # Combine all text (non-null)
2006
+ all_text = ' '.join(df[text_col].dropna().astype(str).str.lower())
2007
+ # Define risk lexicon (adjust based on domain)
2008
+ risk_keywords = [
2009
+ 'terbuka', 'tidak terkunci', 'tanpa izin', 'tanpa alat', 'tanpa pelindung',
2010
+ 'overload', 'short circuit', 'grounding', 'exposed', 'fall', 'slip',
2011
+ 'fire hazard', 'unauthorized', 'no ppe', 'unsecured', 'untrained'
2012
+ ]
2013
+ found_risks = [kw for kw in risk_keywords if kw in all_text]
2014
+ return risk_keywords, found_risks
2015
+
2016
+ # --- 1. Coverage Equity by Location (Spatial Risk Mapping) ---
2017
+ if 'nama_lokasi_full' in df.columns and 'creator_nid' in df.columns:
2018
+ loc_activity = df.groupby('nama_lokasi_full').agg(
2019
+ findings_count=('temuan_id', 'count'),
2020
+ unique_reporters=('creator_nid', 'nunique')
2021
+ ).reset_index()
2022
+ total_locations = loc_activity.shape[0]
2023
+ low_coverage_locs = loc_activity[loc_activity['unique_reporters'] <= 1]
2024
+ high_volume_locs = loc_activity[loc_activity['findings_count'] > loc_activity['findings_count'].quantile(0.75)]
2025
+
2026
+ # Risk: High findings + low reporters = under-coverage bias
2027
+ risky_high_low = loc_activity[
2028
+ (loc_activity['findings_count'] > loc_activity['findings_count'].median()) &
2029
+ (loc_activity['unique_reporters'] <= 2)
2030
+ ]
2031
 
2032
+ if not risky_high_low.empty:
2033
+ loc_list = risky_high_low['nama_lokasi_full'].tolist()[:3]
2034
+ loc_names = ', '.join(loc_list)
2035
+ insight = (
2036
+ f"Locations {loc_names} show high finding volume but rely on ≤2 reporters, indicating potential blind spots "
2037
+ f"and over-dependence on few individuals — a coverage equity risk."
2038
+ )
2039
+ recommendation = (
2040
+ f"Redistribute inspection assignments using rotation schedules. Deploy 'buddy auditing' for high-risk locations. "
2041
+ f"Introduce anonymous near-miss reporting channels to supplement formal findings."
2042
+ )
2043
+ insights.append({"insight": insight, "recommendation": recommendation})
2044
 
2045
+ if not low_coverage_locs.empty and len(low_coverage_locs) > total_locations * 0.3:
2046
+ insight = (
2047
+ f"Over 30% of locations ({len(low_coverage_locs)}/{total_locations}) are covered by only 1 reporter, "
2048
+ f"increasing the risk of unreported hazards due to observer fatigue or familiarity bias."
2049
+ )
2050
+ recommendation = (
2051
+ f"Implement mandatory location rotation for auditors every 2 months. "
2052
+ f"Use geotagged photo evidence to validate field presence and ensure physical coverage."
2053
+ )
2054
+ insights.append({"insight": insight, "recommendation": recommendation})
2055
+
2056
+ # --- 2. Divisional Load & Frequency Risk (Over/Under-Reporting) ---
2057
+ if 'nama' in df.columns and 'created_at' in df.columns: # `nama` = divisi (per permintaan user)
2058
+ div_summary = df.groupby('nama').agg(
2059
+ total_findings=('temuan_id', 'count'),
2060
+ unique_people=('creator_nid', 'nunique'),
2061
+ first_report=('created_at', 'min'),
2062
+ last_report=('created_at', 'max')
2063
+ )
2064
+ div_summary['reporting_span_days'] = (div_summary['last_report'] - div_summary['first_report']).dt.days + 1
2065
+ div_summary['avg_freq_per_person'] = div_summary['total_findings'] / div_summary['unique_people']
2066
+ div_summary['findings_per_day'] = div_summary['total_findings'] / div_summary['reporting_span_days']
2067
+
2068
+ # Define thresholds (adjust as needed)
2069
+ HIGH_LOAD_THRESHOLD = 8 # avg > 8 findings/person
2070
+ LOW_ACTIVITY_THRESHOLD = 0.2 # < 0.2 findings/day
2071
+
2072
+ high_load_div = div_summary[div_summary['avg_freq_per_person'] >= HIGH_LOAD_THRESHOLD]
2073
+ low_activity_div = div_summary[div_summary['findings_per_day'] <= LOW_ACTIVITY_THRESHOLD]
2074
+
2075
+ if not high_load_div.empty:
2076
+ top_div = high_load_div['avg_freq_per_person'].idxmax()
2077
+ insight = (
2078
+ f"Division '{top_div}' has an elevated reporting load ({high_load_div.loc[top_div, 'avg_freq_per_person']:.1f} findings/person), "
2079
+ f"which may lead to fatigue, rushed inspections, or selective reporting."
2080
  )
2081
+ recommendation = (
2082
+ f"Augment the division’s safety team with cross-trained support staff. "
2083
+ f"Introduce AI-assisted checklist validation to reduce cognitive load. "
2084
+ f"Monitor for declining finding quality (e.g., vague descriptions)."
2085
  )
2086
+ insights.append({"insight": insight, "recommendation": recommendation})
2087
+
2088
+ if not low_activity_div.empty:
2089
+ low_divs = low_activity_div.index.tolist()[:3]
2090
+ div_names = ', '.join(low_divs)
2091
+ insight = (
2092
+ f"Divisions {div_names} show persistently low reporting frequency (<0.2 findings/day), "
2093
+ f"suggesting either excellent safety performance or significant under-reporting."
2094
+ )
2095
+ recommendation = (
2096
+ f"Conduct a *silent audit* (observation-only, no prior notice) in these divisions to validate safety status. "
2097
+ f"Review training records and psychological safety survey scores — fear of blame suppresses reporting."
2098
+ )
2099
+ insights.append({"insight": insight, "recommendation": recommendation})
2100
 
2101
+ # --- 3. Agentic Safety Mitigation (Proactive vs Reactive Behavior) ---
2102
  if 'temuan_kategori' in df.columns:
2103
+ # Assume: 'Positive' = proactive (e.g., good housekeeping, initiative)
2104
+ # Others (e.g., 'Unsafe Condition', 'Unsafe Act') = reactive
2105
+ total = len(df)
2106
+ proactive = (df['temuan_kategori'] == 'Positive').sum()
2107
+ reactive = total - proactive
2108
+ proactive_rate = proactive / total if total > 0 else 0
2109
+
2110
+ insight = (
2111
+ f"Only {proactive_rate:.1%} of findings reflect *proactive* safety behaviors (e.g., positive interventions, improvements). "
2112
+ f"The remaining {100 - proactive_rate*100:.1f}% are *reactive* (hazards already present)."
2113
+ )
2114
+ recommendation = (
2115
+ f"Shift incentives from 'finding count' to 'prevention impact'. "
2116
+ f"Launch an *Agentic Safety Program*: reward near-miss reports, safety suggestions, and coaching moments. "
2117
+ f"Track % of proactive findings monthly as a leading KPI."
2118
+ )
2119
+ insights.append({"insight": insight, "recommendation": recommendation})
2120
+
2121
+ # --- 4. Emerging Risk Detection via Wordcloud (Cloud = Risk Signal) ---
2122
+ all_risk_terms, detected_terms = detect_emerging_risks(df)
2123
+ if detected_terms:
2124
+ missing_terms = set(all_risk_terms) - set(detected_terms)
2125
+ # If *some* high-severity terms appear, but not all → partial risk cloud
2126
+ high_sev_terms = ['exposed', 'fire hazard', 'fall', 'short circuit', 'unauthorized']
2127
+ detected_high = [t for t in detected_terms if t in high_sev_terms]
2128
+ if detected_high:
2129
+ terms_str = ', '.join(detected_high)
2130
+ insight = (
2131
+ f"Wordcloud analysis indicates emerging high-severity risks: *{terms_str}*. "
2132
+ f"These signal active hazards (e.g., exposed conductors, fall risks) not yet fully mitigated."
 
 
 
 
2133
  )
2134
+ recommendation = (
2135
+ f"Launch a 14-day *Targeted Risk Blitz* on locations reporting these terms. "
2136
+ f"Require immediate photo evidence of corrective actions. "
2137
+ f"Update inspection checklists to prioritize these items."
2138
  )
2139
+ insights.append({"insight": insight, "recommendation": recommendation})
2140
+
2141
+ # Cloud still exists why?
2142
+ if missing_terms and len(detected_terms) > 3:
2143
+ insight = (
2144
+ f"Despite mitigation efforts, the risk 'cloud' persists — likely due to: "
2145
+ f"(1) Recurring root causes (e.g., contractor turnover), "
2146
+ f"(2) Incomplete closure verification, or "
2147
+ f"(3) Findings reappearing in new locations after fixes in old ones."
 
 
 
 
 
 
 
 
2148
  )
2149
+ recommendation = (
2150
+ f"Adopt *closed-loop verification*: require geo-tagged before/after photos + supervisor sign-off. "
2151
+ f"Map recurring findings to contractor IDs hold vendors accountable via SLA penalties. "
2152
+ f"Use AI to cluster similar findings across time/location to detect systemic failures."
2153
+ )
2154
+ insights.append({"insight": insight, "recommendation": recommendation})
2155
+
2156
+ # --- 5. Coverage Balancing Strategy (How to Achieve Equitable Coverage) ---
2157
+ # Based on location & reporter distribution
2158
+ if 'nama_lokasi_full' in df.columns and 'creator_nid' in df.columns:
2159
+ reporters_per_location = df.groupby('nama_lokasi_full')['creator_nid'].nunique()
2160
+ coverage_gini = (reporters_per_location.std() / reporters_per_location.mean()) if reporters_per_location.mean() > 0 else 0
2161
+
2162
+ if coverage_gini > 0.6: # High inequality
2163
+ insight = (
2164
+ f"Coverage inequality (Gini ≈ {coverage_gini:.2f}) is high — a few locations dominate reporting effort. "
2165
+ f"This creates surveillance deserts in low-coverage zones."
2166
+ )
2167
+ recommendation = (
2168
+ f"1. Assign *minimum 2 unique reporters per high-risk location* monthly. "
2169
+ f"2. Use route optimization (e.g., VRP algorithm) to balance travel + inspection load. "
2170
+ f"3. Deploy mobile micro-checklists for non-auditors (e.g., operators) to increase eyes-on-ground."
2171
+ )
2172
+ insights.append({"insight": insight, "recommendation": recommendation})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2173
 
2174
+ return insights
 
2175
 
2176
+ # Generate and display
2177
+ risk_insights = compute_risk_mitigation_insights(df_filtered)
2178
 
2179
+ if risk_insights:
2180
+ for i, ir in enumerate(risk_insights, 1):
2181
+ st.markdown(f"<div class='ai-insight'><strong>Insight {i}:</strong> {ir['insight']}</div>", unsafe_allow_html=True)
2182
+ st.markdown(f"<div class='ai-recommendation'><strong>Action {i}:</strong> {ir['recommendation']}</div>", unsafe_allow_html=True)
 
 
 
 
2183
  else:
2184
+ st.markdown(
2185
+ "<div class='ai-insight'>No risk-mitigation insights generated. Ensure key columns are present: "
2186
+ "<code>nama_lokasi_full</code>, <code>nama</code> (division), <code>creator_nid</code>, <code>temuan_kategori</code>, and free-text field (e.g., <code>uraian_temuan</code>).</div>",
2187
+ unsafe_allow_html=True
2188
+ )