|
|
""" |
|
|
Optimization Results Display Functions for Streamlit |
|
|
Handles visualization of optimization results with charts and tables |
|
|
""" |
|
|
|
|
|
import streamlit as st |
|
|
import pandas as pd |
|
|
import plotly.express as px |
|
|
import plotly.graph_objects as go |
|
|
import sys |
|
|
import json |
|
|
|
|
|
|
|
|
def load_kit_hierarchy(): |
|
|
"""Load kit hierarchy data from JSON file""" |
|
|
try: |
|
|
with open('data/hierarchy_exports/kit_hierarchy.json', 'r') as f: |
|
|
return json.load(f) |
|
|
except (FileNotFoundError, json.JSONDecodeError): |
|
|
return {} |
|
|
|
|
|
def get_kit_hierarchy_info(product): |
|
|
"""Get hierarchy level and dependencies for a product using main optimization system""" |
|
|
try: |
|
|
|
|
|
from src.config.optimization_config import KIT_LEVELS, KIT_DEPENDENCIES |
|
|
from src.config.constants import KitLevel |
|
|
|
|
|
|
|
|
if product in KIT_LEVELS: |
|
|
level = KIT_LEVELS[product] |
|
|
level_name = KitLevel.get_name(level) |
|
|
dependencies = KIT_DEPENDENCIES.get(product, []) |
|
|
return level_name, dependencies |
|
|
else: |
|
|
return 'unknown', [] |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error getting hierarchy info for {product}: {e}") |
|
|
return 'unknown', [] |
|
|
|
|
|
|
|
|
def display_optimization_results(results): |
|
|
"""Display comprehensive optimization results with visualizations""" |
|
|
st.header("π Optimization Results") |
|
|
|
|
|
|
|
|
tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs([ |
|
|
"π Weekly Summary", |
|
|
"π
Daily Deep Dive", |
|
|
"π Line Schedules", |
|
|
"π¦ Kit Production", |
|
|
"π° Cost Analysis", |
|
|
"π Input Data", |
|
|
"π Demand Validation" |
|
|
]) |
|
|
|
|
|
with tab1: |
|
|
display_weekly_summary(results) |
|
|
|
|
|
with tab2: |
|
|
display_daily_deep_dive(results) |
|
|
|
|
|
with tab3: |
|
|
display_line_schedules(results) |
|
|
|
|
|
with tab4: |
|
|
display_kit_production(results) |
|
|
|
|
|
with tab5: |
|
|
display_cost_analysis(results) |
|
|
|
|
|
with tab6: |
|
|
display_input_data_inspection() |
|
|
|
|
|
with tab7: |
|
|
display_demand_validation_tab() |
|
|
|
|
|
def display_weekly_summary(results): |
|
|
"""Display weekly summary with key metrics and charts""" |
|
|
st.subheader("π Weekly Performance Summary") |
|
|
|
|
|
|
|
|
col1, col2, col3, col4 = st.columns(4) |
|
|
|
|
|
with col1: |
|
|
total_cost = results['objective'] |
|
|
st.metric("Total Cost", f"β¬{total_cost:,.2f}") |
|
|
|
|
|
with col2: |
|
|
total_production = sum(results['weekly_production'].values()) |
|
|
st.metric("Total Production", f"{total_production:,.0f} units") |
|
|
|
|
|
with col3: |
|
|
|
|
|
from src.config.optimization_config import get_demand_dictionary |
|
|
DEMAND_DICTIONARY = get_demand_dictionary() |
|
|
total_demand = sum(DEMAND_DICTIONARY.values()) |
|
|
fulfillment_rate = (total_production / total_demand * 100) if total_demand > 0 else 0 |
|
|
st.metric("Fulfillment Rate", f"{fulfillment_rate:.1f}%") |
|
|
|
|
|
with col4: |
|
|
|
|
|
cost_per_unit = total_cost / total_production if total_production > 0 else 0 |
|
|
st.metric("Cost per Unit", f"β¬{cost_per_unit:.2f}") |
|
|
|
|
|
|
|
|
st.subheader("π― Production vs Demand") |
|
|
|
|
|
from src.config.optimization_config import get_demand_dictionary |
|
|
DEMAND_DICTIONARY = get_demand_dictionary() |
|
|
prod_demand_data = [] |
|
|
for product, production in results['weekly_production'].items(): |
|
|
demand = DEMAND_DICTIONARY.get(product, 0) |
|
|
prod_demand_data.append({ |
|
|
'Product': product, |
|
|
'Production': production, |
|
|
'Demand': demand, |
|
|
'Gap': production - demand |
|
|
}) |
|
|
|
|
|
df_prod = pd.DataFrame(prod_demand_data) |
|
|
|
|
|
if not df_prod.empty: |
|
|
|
|
|
fig = go.Figure() |
|
|
fig.add_trace(go.Bar(name='Production', x=df_prod['Product'], y=df_prod['Production'], |
|
|
marker_color='lightblue')) |
|
|
fig.add_trace(go.Bar(name='Demand', x=df_prod['Product'], y=df_prod['Demand'], |
|
|
marker_color='orange')) |
|
|
|
|
|
fig.update_layout( |
|
|
title='Weekly Production vs Demand by Product', |
|
|
xaxis_title='Product', |
|
|
yaxis_title='Units', |
|
|
barmode='group', |
|
|
height=400 |
|
|
) |
|
|
st.plotly_chart(fig, use_container_width=True) |
|
|
|
|
|
def display_daily_deep_dive(results): |
|
|
"""Display daily breakdown with employee counts by type and shift""" |
|
|
st.subheader("π
Daily Employee Count by Type and Shift") |
|
|
|
|
|
|
|
|
employee_counts = [] |
|
|
|
|
|
|
|
|
|
|
|
for row in results['run_schedule']: |
|
|
|
|
|
if row['run_hours'] <= 0 and row['units'] <= 0: |
|
|
continue |
|
|
|
|
|
day = f"Day {row['day']}" |
|
|
shift_name = {1: 'Regular', 2: 'Evening', 3: 'Overtime'}.get(row['shift'], f"Shift {row['shift']}") |
|
|
|
|
|
|
|
|
from src.config.optimization_config import get_team_requirements |
|
|
TEAM_REQ_PER_PRODUCT = get_team_requirements() |
|
|
|
|
|
for emp_type in ['UNICEF Fixed term', 'Humanizer']: |
|
|
if row['product'] in TEAM_REQ_PER_PRODUCT.get(emp_type, {}): |
|
|
employee_count = TEAM_REQ_PER_PRODUCT[emp_type][row['product']] |
|
|
|
|
|
|
|
|
if employee_count > 0 and (row['run_hours'] > 0 or row['units'] > 0): |
|
|
employee_counts.append({ |
|
|
'Day': day, |
|
|
'Employee Type': emp_type, |
|
|
'Shift': shift_name, |
|
|
'Product': row['product'], |
|
|
'Employee Count': employee_count, |
|
|
'Hours Worked': row['run_hours'], |
|
|
'Total Person-Hours': employee_count * row['run_hours'] |
|
|
}) |
|
|
|
|
|
if employee_counts: |
|
|
df_employees = pd.DataFrame(employee_counts) |
|
|
|
|
|
|
|
|
df_summary = df_employees.groupby(['Day', 'Employee Type', 'Shift']).agg({ |
|
|
'Employee Count': 'sum', |
|
|
'Total Person-Hours': 'sum' |
|
|
}).reset_index() |
|
|
|
|
|
|
|
|
fig = px.bar(df_summary, |
|
|
x='Day', |
|
|
y='Employee Count', |
|
|
color='Shift', |
|
|
facet_col='Employee Type', |
|
|
title='Daily Employee Count by Type and Shift', |
|
|
color_discrete_map={ |
|
|
'Regular': '#32CD32', |
|
|
'Overtime': '#FF8C00', |
|
|
'Evening': '#4169E1' |
|
|
}, |
|
|
height=500) |
|
|
|
|
|
fig.update_layout( |
|
|
yaxis_title='Number of Employees', |
|
|
showlegend=True |
|
|
) |
|
|
st.plotly_chart(fig, use_container_width=True) |
|
|
|
|
|
|
|
|
st.subheader("π Employee Allocation Details") |
|
|
|
|
|
|
|
|
st.markdown("**Summary by Day and Shift:**") |
|
|
summary_pivot = df_summary.pivot_table( |
|
|
values='Employee Count', |
|
|
index=['Day', 'Shift'], |
|
|
columns='Employee Type', |
|
|
aggfunc='sum', |
|
|
fill_value=0 |
|
|
).reset_index() |
|
|
|
|
|
|
|
|
try: |
|
|
from src.config.optimization_config import get_max_employee_per_type_on_day |
|
|
MAX_EMPLOYEE_PER_TYPE_ON_DAY = get_max_employee_per_type_on_day() |
|
|
|
|
|
|
|
|
for emp_type in ['UNICEF Fixed term', 'Humanizer']: |
|
|
if emp_type in summary_pivot.columns: |
|
|
capacity_col = f'{emp_type} Capacity' |
|
|
|
|
|
|
|
|
summary_pivot['Day_Num'] = summary_pivot['Day'].str.extract(r'(\d+)').astype(int) |
|
|
|
|
|
|
|
|
summary_pivot[capacity_col] = summary_pivot['Day_Num'].apply( |
|
|
lambda day: MAX_EMPLOYEE_PER_TYPE_ON_DAY.get(emp_type, {}).get(day, 0) |
|
|
) |
|
|
|
|
|
|
|
|
summary_pivot = summary_pivot.drop('Day_Num', axis=1) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Could not add capacity information: {e}") |
|
|
|
|
|
st.dataframe(summary_pivot, use_container_width=True) |
|
|
|
|
|
|
|
|
st.markdown("**Detailed Production Assignments:**") |
|
|
df_detailed = df_employees[['Day', 'Employee Type', 'Shift', 'Product', 'Employee Count', 'Hours Worked']].copy() |
|
|
df_detailed = df_detailed.sort_values(['Day', 'Shift', 'Employee Type']) |
|
|
st.dataframe(df_detailed, use_container_width=True) |
|
|
|
|
|
else: |
|
|
st.info("π No employees scheduled - All production runs have zero hours and zero units") |
|
|
|
|
|
|
|
|
total_schedule_rows = len(results.get('run_schedule', [])) |
|
|
if total_schedule_rows > 0: |
|
|
st.markdown(f"*Note: {total_schedule_rows} schedule entries exist but all have zero production activity*") |
|
|
|
|
|
def display_line_schedules(results): |
|
|
"""Display line schedules showing what runs when and with how many workers""" |
|
|
st.subheader("π Production Line Schedules") |
|
|
|
|
|
|
|
|
schedule_data = [] |
|
|
from src.config.optimization_config import get_team_requirements, get_demand_dictionary, shift_code_to_name, line_code_to_name |
|
|
TEAM_REQ_PER_PRODUCT = get_team_requirements() |
|
|
DEMAND_DICTIONARY = get_demand_dictionary() |
|
|
|
|
|
|
|
|
shift_names = shift_code_to_name() |
|
|
line_names = line_code_to_name() |
|
|
|
|
|
for row in results['run_schedule']: |
|
|
|
|
|
unicef_workers = TEAM_REQ_PER_PRODUCT.get('UNICEF Fixed term', {}).get(row['product'], 0) |
|
|
humanizer_workers = TEAM_REQ_PER_PRODUCT.get('Humanizer', {}).get(row['product'], 0) |
|
|
total_workers = unicef_workers + humanizer_workers |
|
|
|
|
|
|
|
|
kit_total_demand = DEMAND_DICTIONARY.get(row['product'], 0) |
|
|
|
|
|
|
|
|
line_name = line_names.get(row['line_type_id'], f"Line {row['line_type_id']}") |
|
|
shift_name = shift_names.get(row['shift'], f"Shift {row['shift']}") |
|
|
|
|
|
schedule_data.append({ |
|
|
'Day': f"Day {row['day']}", |
|
|
'Line': f"{line_name} {row['line_idx']}", |
|
|
'Shift': shift_name, |
|
|
'Product': row['product'], |
|
|
'Kit Total Demand': kit_total_demand, |
|
|
'Hours': round(row['run_hours'], 2), |
|
|
'Units': round(row['units'], 0), |
|
|
'UNICEF Workers': unicef_workers, |
|
|
'Humanizer Workers': humanizer_workers, |
|
|
'Total Workers': total_workers |
|
|
}) |
|
|
|
|
|
df_schedule = pd.DataFrame(schedule_data) |
|
|
|
|
|
if not df_schedule.empty: |
|
|
|
|
|
st.subheader("β° Production Line by Line and Day") |
|
|
|
|
|
|
|
|
for row in schedule_data: |
|
|
hierarchy_level, dependencies = get_kit_hierarchy_info(row['Product']) |
|
|
row['Hierarchy_Level'] = hierarchy_level |
|
|
row['Dependencies'] = dependencies |
|
|
|
|
|
|
|
|
df_schedule = pd.DataFrame(schedule_data) |
|
|
|
|
|
|
|
|
fig = create_enhanced_timeline_with_relationships(df_schedule) |
|
|
|
|
|
if fig: |
|
|
st.plotly_chart(fig, use_container_width=True) |
|
|
else: |
|
|
st.warning("Could not create enhanced timeline chart") |
|
|
|
|
|
|
|
|
st.markdown(""" |
|
|
**π¨ Hierarchy Level Colors:** |
|
|
- π’ **Prepack**: Level 0 - Dependencies produced first (Lime Green) |
|
|
- π΅ **Subkit**: Level 1 - Intermediate assemblies (Royal Blue) |
|
|
- π **Master**: Level 2 - Final products (Dark Orange) |
|
|
""") |
|
|
|
|
|
|
|
|
st.subheader("π Detailed Production Schedule") |
|
|
|
|
|
|
|
|
df_schedule_filtered = df_schedule[ |
|
|
(df_schedule['Hours'] > 0) | (df_schedule['Units'] > 0) |
|
|
].copy() |
|
|
|
|
|
if df_schedule_filtered.empty: |
|
|
st.warning("No production activity scheduled (all hours and units are zero)") |
|
|
else: |
|
|
|
|
|
filtered_count = len(df_schedule_filtered) |
|
|
total_count = len(df_schedule) |
|
|
if filtered_count < total_count: |
|
|
st.info(f"Showing {filtered_count} active production entries (filtered out {total_count - filtered_count} zero-activity rows)") |
|
|
|
|
|
st.dataframe(df_schedule_filtered, use_container_width=True) |
|
|
|
|
|
def create_enhanced_timeline_with_relationships(df_schedule): |
|
|
"""Create enhanced timeline chart with hierarchy colors and relationship lines""" |
|
|
if df_schedule.empty: |
|
|
return None |
|
|
|
|
|
|
|
|
hierarchy_colors = { |
|
|
'prepack': '#32CD32', |
|
|
'subkit': '#4169E1', |
|
|
'master': '#FF8C00', |
|
|
'unknown': '#8B0000' |
|
|
} |
|
|
|
|
|
|
|
|
fig = px.bar(df_schedule, |
|
|
x='Hours', |
|
|
y='Line', |
|
|
color='Hierarchy_Level', |
|
|
facet_col='Day', |
|
|
orientation='h', |
|
|
title='Production Schedule by Line and Day (Colored by Hierarchy Level)', |
|
|
height=500, |
|
|
color_discrete_map=hierarchy_colors, |
|
|
hover_data=['Product', 'Units', 'Total Workers']) |
|
|
|
|
|
|
|
|
fig.update_traces( |
|
|
marker_line_color='black', |
|
|
marker_line_width=1.5, |
|
|
opacity=0.8 |
|
|
) |
|
|
|
|
|
|
|
|
fig.update_layout( |
|
|
showlegend=True, |
|
|
plot_bgcolor='white', |
|
|
paper_bgcolor='white', |
|
|
font=dict(size=12, color='#000000', family='Arial, sans-serif'), |
|
|
title_font=dict(color='#000000', size=14, family='Arial Bold'), |
|
|
legend_title_text='Hierarchy Level', |
|
|
legend=dict( |
|
|
font=dict(size=11, color='#000000'), |
|
|
bgcolor='rgba(255,255,255,0.8)', |
|
|
bordercolor='#000000', |
|
|
borderwidth=1 |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
fig.update_xaxes( |
|
|
showgrid=True, |
|
|
gridwidth=0.5, |
|
|
gridcolor='lightgray', |
|
|
title_font=dict(size=12, color='#000000'), |
|
|
tickfont=dict(color='#000000', size=10) |
|
|
) |
|
|
fig.update_yaxes( |
|
|
showgrid=True, |
|
|
gridwidth=0.5, |
|
|
gridcolor='lightgray', |
|
|
title_font=dict(size=12, color='#000000'), |
|
|
tickfont=dict(color='#000000', size=10) |
|
|
) |
|
|
|
|
|
|
|
|
try: |
|
|
fig = add_dependency_connections(fig, df_schedule) |
|
|
except Exception as e: |
|
|
print(f"Could not add dependency connections: {e}") |
|
|
|
|
|
return fig |
|
|
|
|
|
def add_dependency_connections(fig, df_schedule): |
|
|
"""Add arrows or lines showing dependencies between kits""" |
|
|
|
|
|
product_positions = {} |
|
|
|
|
|
for _, row in df_schedule.iterrows(): |
|
|
product = row['Product'] |
|
|
day = row['Day'] |
|
|
line = row['Line'] |
|
|
|
|
|
|
|
|
product_positions[product] = { |
|
|
'day': day, |
|
|
'line': line, |
|
|
'dependencies': row.get('Dependencies', []) |
|
|
} |
|
|
|
|
|
|
|
|
relationship_count = 0 |
|
|
dependency_details = [] |
|
|
|
|
|
for product, pos_info in product_positions.items(): |
|
|
dependencies = pos_info['dependencies'] |
|
|
|
|
|
for dep in dependencies: |
|
|
if dep in product_positions: |
|
|
|
|
|
dep_pos = product_positions[dep] |
|
|
relationship_count += 1 |
|
|
|
|
|
dependency_details.append({ |
|
|
'product': product, |
|
|
'dependency': dep, |
|
|
'product_day': pos_info['day'], |
|
|
'dependency_day': dep_pos['day'], |
|
|
'timing': 'correct' if dep_pos['day'] <= pos_info['day'] else 'violation' |
|
|
}) |
|
|
|
|
|
|
|
|
if relationship_count > 0: |
|
|
violations = len([d for d in dependency_details if d['timing'] == 'violation']) |
|
|
|
|
|
fig.add_annotation( |
|
|
text=f"π {relationship_count} dependencies | {'β οΈ ' + str(violations) + ' violations' if violations > 0 else 'β
All correct'}", |
|
|
xref="paper", yref="paper", |
|
|
x=0.02, y=0.98, |
|
|
showarrow=False, |
|
|
font=dict(size=10, color="purple"), |
|
|
bgcolor="rgba(255,255,255,0.8)", |
|
|
bordercolor="purple", |
|
|
borderwidth=1 |
|
|
) |
|
|
|
|
|
|
|
|
if dependency_details: |
|
|
dependency_text = "\\n".join([ |
|
|
f"β’ {d['dependency']} β {d['product']} ({'β
' if d['timing'] == 'correct' else 'β οΈ'})" |
|
|
for d in dependency_details[:5] |
|
|
]) |
|
|
|
|
|
if len(dependency_details) > 5: |
|
|
dependency_text += f"\\n... and {len(dependency_details) - 5} more" |
|
|
|
|
|
fig.add_annotation( |
|
|
text=dependency_text, |
|
|
xref="paper", yref="paper", |
|
|
x=0.02, y=0.02, |
|
|
showarrow=False, |
|
|
font=dict(size=8, color="navy"), |
|
|
bgcolor="rgba(240,248,255,0.9)", |
|
|
bordercolor="navy", |
|
|
borderwidth=1, |
|
|
align="left" |
|
|
) |
|
|
|
|
|
return fig |
|
|
|
|
|
def display_kit_production(results): |
|
|
"""Display kit production details""" |
|
|
st.subheader("π¦ Kit Production Analysis") |
|
|
|
|
|
|
|
|
production_data = [] |
|
|
from src.config.optimization_config import get_demand_dictionary |
|
|
DEMAND_DICTIONARY = get_demand_dictionary() |
|
|
|
|
|
for product, production in results['weekly_production'].items(): |
|
|
demand = DEMAND_DICTIONARY.get(product, 0) |
|
|
production_data.append({ |
|
|
'Product': product, |
|
|
'Production': production, |
|
|
'Demand': demand, |
|
|
'Fulfillment %': (production / demand * 100) if demand > 0 else 0, |
|
|
'Over/Under': production - demand |
|
|
}) |
|
|
|
|
|
df_production = pd.DataFrame(production_data) |
|
|
|
|
|
if not df_production.empty: |
|
|
|
|
|
fig = px.bar(df_production, x='Product', y='Fulfillment %', |
|
|
title='Kit Fulfillment Rate by Product', |
|
|
color='Fulfillment %', |
|
|
color_continuous_scale=['red', 'yellow', 'green'], |
|
|
height=400) |
|
|
fig.add_hline(y=100, line_dash="dash", line_color="black", |
|
|
annotation_text="100% Target") |
|
|
st.plotly_chart(fig, use_container_width=True) |
|
|
|
|
|
|
|
|
st.subheader("π Kit Production Summary") |
|
|
st.dataframe(df_production, use_container_width=True) |
|
|
|
|
|
def display_cost_analysis(results): |
|
|
"""Display cost breakdown and analysis""" |
|
|
st.subheader("π° Cost Breakdown Analysis") |
|
|
|
|
|
|
|
|
from src.config.optimization_config import get_cost_list_per_emp_shift, get_team_requirements, shift_code_to_name, line_code_to_name |
|
|
COST_LIST_PER_EMP_SHIFT = get_cost_list_per_emp_shift() |
|
|
TEAM_REQ_PER_PRODUCT = get_team_requirements() |
|
|
|
|
|
|
|
|
shift_names = shift_code_to_name() |
|
|
line_names = line_code_to_name() |
|
|
|
|
|
cost_data = [] |
|
|
total_cost_by_type = {} |
|
|
|
|
|
for row in results['run_schedule']: |
|
|
product = row['product'] |
|
|
hours = row['run_hours'] |
|
|
shift = row['shift'] |
|
|
shift_name = shift_names.get(shift, f"Shift {shift}") |
|
|
line_name = line_names.get(row['line_type_id'], f"Line {row['line_type_id']}") |
|
|
|
|
|
|
|
|
from src.config.optimization_config import get_payment_mode_config, get_max_hour_per_shift_per_person |
|
|
PAYMENT_MODE_CONFIG = get_payment_mode_config() |
|
|
MAX_HOUR_PER_SHIFT_PER_PERSON = get_max_hour_per_shift_per_person() |
|
|
|
|
|
for emp_type in ['UNICEF Fixed term', 'Humanizer']: |
|
|
workers_needed = TEAM_REQ_PER_PRODUCT.get(emp_type, {}).get(product, 0) |
|
|
hourly_rate = COST_LIST_PER_EMP_SHIFT.get(emp_type, {}).get(shift, 0) |
|
|
|
|
|
|
|
|
payment_mode = PAYMENT_MODE_CONFIG.get(shift, "partial") |
|
|
|
|
|
if payment_mode == "bulk" and hours > 0: |
|
|
|
|
|
shift_hours = MAX_HOUR_PER_SHIFT_PER_PERSON.get(shift, hours) |
|
|
cost = workers_needed * shift_hours * hourly_rate |
|
|
display_hours = shift_hours |
|
|
else: |
|
|
|
|
|
cost = workers_needed * hours * hourly_rate |
|
|
display_hours = hours |
|
|
|
|
|
if emp_type not in total_cost_by_type: |
|
|
total_cost_by_type[emp_type] = 0 |
|
|
total_cost_by_type[emp_type] += cost |
|
|
|
|
|
if cost > 0: |
|
|
|
|
|
payment_indicator = f" ({payment_mode})" if payment_mode == "bulk" else "" |
|
|
cost_data.append({ |
|
|
'Employee Type': emp_type, |
|
|
'Day': f"Day {row['day']}", |
|
|
'Shift': f"{shift_name}{payment_indicator}", |
|
|
'Line': f"{line_name} {row['line_idx']}", |
|
|
'Product': product, |
|
|
'Actual Hours': round(hours, 2), |
|
|
'Paid Hours': round(display_hours, 2), |
|
|
'Workers': workers_needed, |
|
|
'Hourly Rate': f"β¬{hourly_rate:.2f}", |
|
|
'Cost': round(cost, 2) |
|
|
}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
total_cost = results['objective'] |
|
|
col1, col2, col3, col4 = st.columns(4) |
|
|
|
|
|
with col1: |
|
|
st.metric("Total Cost", f"β¬{total_cost:,.2f}") |
|
|
|
|
|
with col2: |
|
|
unicef_cost = total_cost_by_type.get('UNICEF Fixed term', 0) |
|
|
st.metric("UNICEF Cost", f"β¬{unicef_cost:,.2f}") |
|
|
|
|
|
with col3: |
|
|
humanizer_cost = total_cost_by_type.get('Humanizer', 0) |
|
|
st.metric("Humanizer Cost", f"β¬{humanizer_cost:,.2f}") |
|
|
|
|
|
with col4: |
|
|
avg_daily_cost = total_cost / len(set(row['day'] for row in results['run_schedule'])) if results['run_schedule'] else 0 |
|
|
st.metric("Avg Daily Cost", f"β¬{avg_daily_cost:,.2f}") |
|
|
|
|
|
|
|
|
if total_cost_by_type: |
|
|
fig = px.pie(values=list(total_cost_by_type.values()), |
|
|
names=list(total_cost_by_type.keys()), |
|
|
title='Cost Distribution by Employee Type') |
|
|
st.plotly_chart(fig, use_container_width=True) |
|
|
|
|
|
|
|
|
if cost_data: |
|
|
df_costs = pd.DataFrame(cost_data) |
|
|
|
|
|
|
|
|
total_cost = df_costs['Cost'].sum() |
|
|
total_paid_hours = df_costs['Paid Hours'].sum() if 'Paid Hours' in df_costs.columns else df_costs['Actual Hours'].sum() |
|
|
total_row = pd.DataFrame([{ |
|
|
'Employee Type': '**TOTAL**', |
|
|
'Day': '-', |
|
|
'Shift': '-', |
|
|
'Line': '-', |
|
|
'Product': '-', |
|
|
'Actual Hours': df_costs['Actual Hours'].sum(), |
|
|
'Paid Hours': total_paid_hours, |
|
|
'Workers': df_costs['Workers'].sum(), |
|
|
'Hourly Rate': '-', |
|
|
'Cost': total_cost |
|
|
}]) |
|
|
|
|
|
|
|
|
df_costs_with_total = pd.concat([df_costs, total_row], ignore_index=True) |
|
|
|
|
|
st.subheader("π Detailed Cost Breakdown") |
|
|
st.dataframe(df_costs_with_total, use_container_width=True) |
|
|
|
|
|
|
|
|
def display_input_data_inspection(): |
|
|
""" |
|
|
Display comprehensive input data inspection showing what was fed into the optimizer |
|
|
""" |
|
|
st.subheader("π Input Data Inspection") |
|
|
st.markdown("This section shows all the input data and parameters that were fed into the optimization model.") |
|
|
|
|
|
|
|
|
try: |
|
|
from src.config import optimization_config |
|
|
from src.config.constants import ShiftType, LineType, KitLevel |
|
|
|
|
|
|
|
|
with st.expander("π
**Schedule & Time Parameters**", expanded=True): |
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.write("**Date Range:**") |
|
|
date_span = optimization_config.get_date_span() |
|
|
st.write(f"β’ Planning Period: {len(date_span)} days") |
|
|
st.write(f"β’ Date Span: {list(date_span)}") |
|
|
|
|
|
st.write("**Shift Configuration:**") |
|
|
shift_list = optimization_config.get_shift_list() |
|
|
for shift in shift_list: |
|
|
shift_name = ShiftType.get_name(shift) |
|
|
st.write(f"β’ {shift_name} (ID: {shift})") |
|
|
|
|
|
with col2: |
|
|
st.write("**Work Hours Configuration:**") |
|
|
max_hours_shift = optimization_config.get_max_hour_per_shift_per_person() |
|
|
for shift_id, hours in max_hours_shift.items(): |
|
|
shift_name = ShiftType.get_name(shift_id) |
|
|
st.write(f"β’ {shift_name}: {hours} hours/shift") |
|
|
|
|
|
from src.config.optimization_config import MAX_HOUR_PER_PERSON_PER_DAY |
|
|
max_daily_hours = MAX_HOUR_PER_PERSON_PER_DAY |
|
|
st.write(f"β’ Maximum daily hours per person: {max_daily_hours}") |
|
|
|
|
|
with st.expander("π₯ **Workforce Parameters**", expanded=False): |
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.write("**Employee Types:**") |
|
|
emp_types = optimization_config.get_employee_type_list() |
|
|
for emp_type in emp_types: |
|
|
st.write(f"β’ {emp_type}") |
|
|
|
|
|
st.write("**Daily Workforce Capacity:**") |
|
|
max_emp_per_day = optimization_config.get_max_employee_per_type_on_day() |
|
|
for emp_type, daily_caps in max_emp_per_day.items(): |
|
|
st.write(f"**{emp_type}:**") |
|
|
for day, count in daily_caps.items(): |
|
|
st.write(f" - Day {day}: {count} employees") |
|
|
|
|
|
with col2: |
|
|
st.write("**Team Requirements per Product:**") |
|
|
team_req = optimization_config.get_team_requirements() |
|
|
st.write("*Sample products:*") |
|
|
|
|
|
sample_products = list(team_req.get('UNICEF Fixed term', {}).keys())[:5] |
|
|
for product in sample_products: |
|
|
st.write(f"**{product}:**") |
|
|
for emp_type in emp_types: |
|
|
req = team_req.get(emp_type, {}).get(product, 0) |
|
|
if req > 0: |
|
|
st.write(f" - {emp_type}: {req}") |
|
|
|
|
|
if len(team_req.get('UNICEF Fixed term', {})) > 5: |
|
|
remaining = len(team_req.get('UNICEF Fixed term', {})) - 5 |
|
|
st.write(f"... and {remaining} more products") |
|
|
|
|
|
with st.expander("π **Production & Line Parameters**", expanded=False): |
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.write("**Line Configuration:**") |
|
|
line_list = optimization_config.get_line_list() |
|
|
line_cnt = optimization_config.get_line_cnt_per_type() |
|
|
|
|
|
for line_type in line_list: |
|
|
line_name = LineType.get_name(line_type) |
|
|
count = line_cnt.get(line_type, 0) |
|
|
st.write(f"β’ {line_name} (ID: {line_type}): {count} lines") |
|
|
|
|
|
st.write("**Maximum Workers per Line:**") |
|
|
max_workers = optimization_config.get_max_parallel_workers() |
|
|
for line_type, max_count in max_workers.items(): |
|
|
line_name = LineType.get_name(line_type) |
|
|
st.write(f"β’ {line_name}: {max_count} workers max") |
|
|
|
|
|
with col2: |
|
|
st.write("**Product-Line Matching:**") |
|
|
from src.config.optimization_config import KIT_LINE_MATCH_DICT |
|
|
kit_line_match = KIT_LINE_MATCH_DICT |
|
|
st.write("*Sample mappings:*") |
|
|
sample_items = list(kit_line_match.items())[:10] |
|
|
for product, line_type in sample_items: |
|
|
line_name = LineType.get_name(line_type) |
|
|
st.write(f"β’ {product}: {line_name}") |
|
|
|
|
|
if len(kit_line_match) > 10: |
|
|
remaining = len(kit_line_match) - 10 |
|
|
st.write(f"... and {remaining} more product mappings") |
|
|
|
|
|
with st.expander("π¦ **Product & Demand Data**", expanded=False): |
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.write("**Product List:**") |
|
|
product_list = optimization_config.get_product_list() |
|
|
st.write(f"β’ Total products: {len(product_list)}") |
|
|
st.write("*Sample products:*") |
|
|
for product in product_list[:10]: |
|
|
st.write(f" - {product}") |
|
|
if len(product_list) > 10: |
|
|
st.write(f" ... and {len(product_list) - 10} more") |
|
|
|
|
|
st.write("**Production Speed (units/hour):**") |
|
|
from src.preprocess import extract |
|
|
speed_data = extract.read_package_speed_data() |
|
|
st.write("*Sample speeds:*") |
|
|
sample_speeds = list(speed_data.items())[:5] |
|
|
for product, speed in sample_speeds: |
|
|
st.write(f"β’ {product}: {speed:.1f} units/hour") |
|
|
if len(speed_data) > 5: |
|
|
remaining = len(speed_data) - 5 |
|
|
st.write(f"... and {remaining} more products") |
|
|
|
|
|
with col2: |
|
|
st.write("**Weekly Demand:**") |
|
|
demand_dict = optimization_config.get_demand_dictionary() |
|
|
st.write(f"β’ Total products with demand: {len(demand_dict)}") |
|
|
|
|
|
|
|
|
total_demand = sum(demand_dict.values()) |
|
|
st.write(f"β’ Total weekly demand: {total_demand:,.0f} units") |
|
|
|
|
|
st.write("*Sample demands:*") |
|
|
|
|
|
sorted_demands = sorted(demand_dict.items(), key=lambda x: x[1], reverse=True)[:10] |
|
|
for product, demand in sorted_demands: |
|
|
st.write(f"β’ {product}: {demand:,.0f} units") |
|
|
|
|
|
if len(demand_dict) > 10: |
|
|
remaining = len(demand_dict) - 10 |
|
|
st.write(f"... and {remaining} more products") |
|
|
|
|
|
with st.expander("ποΈ **Kit Hierarchy & Dependencies**", expanded=False): |
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.write("**Kit Levels:**") |
|
|
kit_levels = optimization_config.get_kit_levels() |
|
|
|
|
|
|
|
|
level_counts = {} |
|
|
for kit, level in kit_levels.items(): |
|
|
level_name = KitLevel.get_name(level) |
|
|
if level_name not in level_counts: |
|
|
level_counts[level_name] = 0 |
|
|
level_counts[level_name] += 1 |
|
|
|
|
|
for level_name, count in level_counts.items(): |
|
|
st.write(f"β’ {level_name}: {count} kits") |
|
|
|
|
|
st.write("*Sample kit levels:*") |
|
|
sample_levels = list(kit_levels.items())[:10] |
|
|
for kit, level in sample_levels: |
|
|
level_name = KitLevel.get_name(level) |
|
|
st.write(f" - {kit}: {level_name}") |
|
|
|
|
|
if len(kit_levels) > 10: |
|
|
remaining = len(kit_levels) - 10 |
|
|
st.write(f" ... and {remaining} more kits") |
|
|
|
|
|
with col2: |
|
|
st.write("**Dependencies:**") |
|
|
kit_deps = optimization_config.get_kit_dependencies() |
|
|
|
|
|
|
|
|
total_deps = sum(len(deps) for deps in kit_deps.values()) |
|
|
kits_with_deps = len([k for k, deps in kit_deps.items() if deps]) |
|
|
|
|
|
st.write(f"β’ Total dependency relationships: {total_deps}") |
|
|
st.write(f"β’ Kits with dependencies: {kits_with_deps}") |
|
|
|
|
|
st.write("*Sample dependencies:*") |
|
|
sample_deps = [(k, deps) for k, deps in kit_deps.items() if deps][:5] |
|
|
for kit, deps in sample_deps: |
|
|
st.write(f"β’ {kit}:") |
|
|
for dep in deps[:3]: |
|
|
st.write(f" - depends on: {dep}") |
|
|
if len(deps) > 3: |
|
|
st.write(f" - ... and {len(deps) - 3} more") |
|
|
|
|
|
if len(sample_deps) > 5: |
|
|
remaining = len([k for k, deps in kit_deps.items() if deps]) - 5 |
|
|
st.write(f"... and {remaining} more kits with dependencies") |
|
|
|
|
|
with st.expander("π° **Cost & Payment Configuration**", expanded=False): |
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.write("**Hourly Cost Rates:**") |
|
|
cost_rates = optimization_config.get_cost_list_per_emp_shift() |
|
|
|
|
|
for emp_type, shift_costs in cost_rates.items(): |
|
|
st.write(f"**{emp_type}:**") |
|
|
for shift_id, cost in shift_costs.items(): |
|
|
shift_name = ShiftType.get_name(shift_id) |
|
|
st.write(f" - {shift_name}: β¬{cost:.2f}/hour") |
|
|
|
|
|
with col2: |
|
|
st.write("**Payment Mode Configuration:**") |
|
|
payment_config = optimization_config.get_payment_mode_config() |
|
|
|
|
|
payment_descriptions = { |
|
|
'bulk': 'Full shift payment (even for partial hours)', |
|
|
'partial': 'Pay only for actual hours worked' |
|
|
} |
|
|
|
|
|
for shift_id, mode in payment_config.items(): |
|
|
shift_name = ShiftType.get_name(shift_id) |
|
|
description = payment_descriptions.get(mode, mode) |
|
|
st.write(f"β’ **{shift_name}:** {mode.title()}") |
|
|
st.caption(f" {description}") |
|
|
|
|
|
with st.expander("βοΈ **Additional Configuration**", expanded=False): |
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.write("**Schedule Mode:**") |
|
|
schedule_mode = "weekly" |
|
|
st.write(f"β’ Planning mode: {schedule_mode}") |
|
|
|
|
|
st.write("**Evening Shift Mode:**") |
|
|
from src.config.optimization_config import EVENING_SHIFT_MODE |
|
|
evening_mode = EVENING_SHIFT_MODE |
|
|
evening_threshold = optimization_config.get_evening_shift_demand_threshold() |
|
|
st.write(f"β’ Mode: {evening_mode}") |
|
|
st.write(f"β’ Activation threshold: {evening_threshold:.1%}") |
|
|
|
|
|
with col2: |
|
|
st.write("**Fixed Staffing:**") |
|
|
fixed_min_unicef = optimization_config.get_fixed_min_unicef_per_day() |
|
|
st.write(f"β’ Minimum UNICEF staff per day: {fixed_min_unicef}") |
|
|
|
|
|
st.write("**Data Sources:**") |
|
|
st.write("β’ Kit hierarchy: kit_hierarchy.json") |
|
|
st.write("β’ Production orders: CSV files") |
|
|
st.write("β’ Personnel data: WH_Workforce CSV") |
|
|
st.write("β’ Speed data: Kits_Calculation CSV") |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"β Error loading input data inspection: {str(e)}") |
|
|
st.info("π‘ This may happen if the optimization configuration is not properly loaded. Please check the Settings page first.") |
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
if st.button("π Refresh Input Data", help="Reload the current configuration data"): |
|
|
st.rerun() |
|
|
|
|
|
|
|
|
def display_demand_validation_tab(): |
|
|
""" |
|
|
Display demand validation in the optimization results tab |
|
|
""" |
|
|
try: |
|
|
from src.demand_validation_viz import display_demand_validation |
|
|
display_demand_validation() |
|
|
except ImportError as e: |
|
|
st.error(f"β Error loading demand validation module: {str(e)}") |
|
|
st.info("π‘ Please ensure the demand validation module is properly installed.") |
|
|
except Exception as e: |
|
|
st.error(f"β Error in demand validation: {str(e)}") |
|
|
st.info("π‘ Please check the data files and configuration.") |