HaLim
commited on
Commit
Β·
868114c
1
Parent(s):
cc411d5
Touchpoint - complete idle analysis data
Browse files- config_page.py +15 -1
- optimization_results.py +409 -36
- src/config/constants.py +2 -9
- src/config/optimization_config.py +19 -0
- src/models/optimizer_real.py +210 -33
- src/visualization/hierarchy_dashboard.py +555 -0
- src/visualization/kit_relationships.py +630 -0
- test_hierarchy_viz.py +167 -0
- test_kit_relationships.py +162 -0
config_page.py
CHANGED
|
@@ -81,7 +81,7 @@ def initialize_session_state():
|
|
| 81 |
MAX_PARALLEL_WORKERS, COST_LIST_PER_EMP_SHIFT,
|
| 82 |
PAYMENT_MODE_CONFIG, LINE_CNT_PER_TYPE,
|
| 83 |
MAX_EMPLOYEE_PER_TYPE_ON_DAY, start_date, end_date,
|
| 84 |
-
shift_code_to_name
|
| 85 |
)
|
| 86 |
|
| 87 |
# Get the actual computed default values from optimization_config.py
|
|
@@ -97,6 +97,7 @@ def initialize_session_state():
|
|
| 97 |
|
| 98 |
# Fixed staff configuration - from optimization_config.py
|
| 99 |
'fixed_staff_mode': FIXED_STAFF_CONSTRAINT_MODE,
|
|
|
|
| 100 |
|
| 101 |
# Payment configuration - from optimization_config.py
|
| 102 |
'payment_mode_shift_1': PAYMENT_MODE_CONFIG.get(ShiftType.REGULAR),
|
|
@@ -236,6 +237,17 @@ def render_workforce_config():
|
|
| 236 |
help="Maximum number of Humanizer employees per day"
|
| 237 |
)
|
| 238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
# Working hours configuration
|
| 240 |
st.subheader("β° Working Hours Configuration")
|
| 241 |
|
|
@@ -520,6 +532,7 @@ def save_configuration():
|
|
| 520 |
'workforce_limits': {
|
| 521 |
'max_unicef_per_day': st.session_state.max_unicef_per_day,
|
| 522 |
'max_humanizer_per_day': st.session_state.max_humanizer_per_day,
|
|
|
|
| 523 |
},
|
| 524 |
'working_hours': {
|
| 525 |
'max_hour_per_person_per_day': st.session_state.max_hour_per_person_per_day,
|
|
@@ -598,6 +611,7 @@ def display_user_friendly_summary(config):
|
|
| 598 |
with col1:
|
| 599 |
st.write(f"**Max UNICEF Staff per Day:** {config['workforce_limits']['max_unicef_per_day']} people")
|
| 600 |
st.write(f"**Max Humanizer Staff per Day:** {config['workforce_limits']['max_humanizer_per_day']} people")
|
|
|
|
| 601 |
with col2:
|
| 602 |
st.write(f"**Staff Management Mode:** {config['fixed_staff_mode'].replace('_', ' ').title()}")
|
| 603 |
st.write(f"**Max Hours per Person per Day:** {config['working_hours']['max_hour_per_person_per_day']} hours")
|
|
|
|
| 81 |
MAX_PARALLEL_WORKERS, COST_LIST_PER_EMP_SHIFT,
|
| 82 |
PAYMENT_MODE_CONFIG, LINE_CNT_PER_TYPE,
|
| 83 |
MAX_EMPLOYEE_PER_TYPE_ON_DAY, start_date, end_date,
|
| 84 |
+
shift_code_to_name, FIXED_MIN_UNICEF_PER_DAY
|
| 85 |
)
|
| 86 |
|
| 87 |
# Get the actual computed default values from optimization_config.py
|
|
|
|
| 97 |
|
| 98 |
# Fixed staff configuration - from optimization_config.py
|
| 99 |
'fixed_staff_mode': FIXED_STAFF_CONSTRAINT_MODE,
|
| 100 |
+
'fixed_min_unicef_per_day': FIXED_MIN_UNICEF_PER_DAY,
|
| 101 |
|
| 102 |
# Payment configuration - from optimization_config.py
|
| 103 |
'payment_mode_shift_1': PAYMENT_MODE_CONFIG.get(ShiftType.REGULAR),
|
|
|
|
| 237 |
help="Maximum number of Humanizer employees per day"
|
| 238 |
)
|
| 239 |
|
| 240 |
+
# Fixed minimum UNICEF requirement
|
| 241 |
+
st.subheader("π Fixed Minimum Requirements")
|
| 242 |
+
|
| 243 |
+
st.session_state.fixed_min_unicef_per_day = st.number_input(
|
| 244 |
+
"Fixed Minimum UNICEF per Day",
|
| 245 |
+
min_value=0,
|
| 246 |
+
max_value=20,
|
| 247 |
+
value=st.session_state.fixed_min_unicef_per_day,
|
| 248 |
+
help="Minimum number of UNICEF Fixed term employees required every working day (constraint)"
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
# Working hours configuration
|
| 252 |
st.subheader("β° Working Hours Configuration")
|
| 253 |
|
|
|
|
| 532 |
'workforce_limits': {
|
| 533 |
'max_unicef_per_day': st.session_state.max_unicef_per_day,
|
| 534 |
'max_humanizer_per_day': st.session_state.max_humanizer_per_day,
|
| 535 |
+
'fixed_min_unicef_per_day': st.session_state.fixed_min_unicef_per_day,
|
| 536 |
},
|
| 537 |
'working_hours': {
|
| 538 |
'max_hour_per_person_per_day': st.session_state.max_hour_per_person_per_day,
|
|
|
|
| 611 |
with col1:
|
| 612 |
st.write(f"**Max UNICEF Staff per Day:** {config['workforce_limits']['max_unicef_per_day']} people")
|
| 613 |
st.write(f"**Max Humanizer Staff per Day:** {config['workforce_limits']['max_humanizer_per_day']} people")
|
| 614 |
+
st.write(f"**Fixed Minimum UNICEF per Day:** {config['workforce_limits']['fixed_min_unicef_per_day']} people")
|
| 615 |
with col2:
|
| 616 |
st.write(f"**Staff Management Mode:** {config['fixed_staff_mode'].replace('_', ' ').title()}")
|
| 617 |
st.write(f"**Max Hours per Person per Day:** {config['working_hours']['max_hour_per_person_per_day']} hours")
|
optimization_results.py
CHANGED
|
@@ -8,6 +8,37 @@ import pandas as pd
|
|
| 8 |
import plotly.express as px
|
| 9 |
import plotly.graph_objects as go
|
| 10 |
import sys
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
def display_optimization_results(results):
|
| 13 |
"""Display comprehensive optimization results with visualizations"""
|
|
@@ -64,6 +95,8 @@ def display_weekly_summary(results):
|
|
| 64 |
# Calculate cost per unit
|
| 65 |
cost_per_unit = total_cost / total_production if total_production > 0 else 0
|
| 66 |
st.metric("Cost per Unit", f"β¬{cost_per_unit:.2f}")
|
|
|
|
|
|
|
| 67 |
|
| 68 |
# Production vs Demand Chart
|
| 69 |
st.subheader("π― Production vs Demand")
|
|
@@ -99,33 +132,131 @@ def display_weekly_summary(results):
|
|
| 99 |
st.plotly_chart(fig, use_container_width=True)
|
| 100 |
|
| 101 |
def display_daily_deep_dive(results):
|
| 102 |
-
"""Display daily breakdown with
|
| 103 |
-
st.subheader("π
Daily
|
| 104 |
|
| 105 |
-
#
|
| 106 |
-
|
| 107 |
-
for row in results['person_hours_by_day']:
|
| 108 |
-
workforce_data.append({
|
| 109 |
-
'Day': f"Day {row['day']}",
|
| 110 |
-
'Employee Type': row['emp_type'],
|
| 111 |
-
'Used Hours': row['used_person_hours'],
|
| 112 |
-
'Available Hours': row['cap_person_hours'],
|
| 113 |
-
'Utilization %': (row['used_person_hours'] / row['cap_person_hours'] * 100) if row['cap_person_hours'] > 0 else 0
|
| 114 |
-
})
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
#
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
st.plotly_chart(fig, use_container_width=True)
|
| 125 |
|
| 126 |
-
# Detailed table
|
| 127 |
-
st.subheader("π
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
def display_line_schedules(results):
|
| 131 |
"""Display line schedules showing what runs when and with how many workers"""
|
|
@@ -134,7 +265,7 @@ def display_line_schedules(results):
|
|
| 134 |
# Process schedule data
|
| 135 |
schedule_data = []
|
| 136 |
sys.path.append('src')
|
| 137 |
-
from config.optimization_config import TEAM_REQ_PER_PRODUCT, shift_code_to_name, line_code_to_name
|
| 138 |
|
| 139 |
# Get the mapping dictionaries
|
| 140 |
shift_names = shift_code_to_name()
|
|
@@ -146,6 +277,9 @@ def display_line_schedules(results):
|
|
| 146 |
humanizer_workers = TEAM_REQ_PER_PRODUCT.get('Humanizer', {}).get(row['product'], 0)
|
| 147 |
total_workers = unicef_workers + humanizer_workers
|
| 148 |
|
|
|
|
|
|
|
|
|
|
| 149 |
# Convert codes to readable names
|
| 150 |
line_name = line_names.get(row['line_type_id'], f"Line {row['line_type_id']}")
|
| 151 |
shift_name = shift_names.get(row['shift'], f"Shift {row['shift']}")
|
|
@@ -155,6 +289,7 @@ def display_line_schedules(results):
|
|
| 155 |
'Line': f"{line_name} {row['line_idx']}",
|
| 156 |
'Shift': shift_name,
|
| 157 |
'Product': row['product'],
|
|
|
|
| 158 |
'Hours': round(row['run_hours'], 2),
|
| 159 |
'Units': round(row['units'], 0),
|
| 160 |
'UNICEF Workers': unicef_workers,
|
|
@@ -165,20 +300,201 @@ def display_line_schedules(results):
|
|
| 165 |
df_schedule = pd.DataFrame(schedule_data)
|
| 166 |
|
| 167 |
if not df_schedule.empty:
|
| 168 |
-
# Timeline view
|
| 169 |
-
st.subheader("β° Production
|
| 170 |
|
| 171 |
-
#
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
|
|
|
| 178 |
|
| 179 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
st.subheader("π Detailed Production Schedule")
|
| 181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
|
| 183 |
def display_kit_production(results):
|
| 184 |
"""Display kit production details"""
|
|
@@ -278,6 +594,43 @@ def display_cost_analysis(results):
|
|
| 278 |
'Cost': round(cost, 2)
|
| 279 |
})
|
| 280 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
# Total cost metrics
|
| 282 |
total_cost = results['objective']
|
| 283 |
col1, col2, col3, col4 = st.columns(4)
|
|
@@ -307,5 +660,25 @@ def display_cost_analysis(results):
|
|
| 307 |
# Detailed cost table
|
| 308 |
if cost_data:
|
| 309 |
df_costs = pd.DataFrame(cost_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 310 |
st.subheader("π Detailed Cost Breakdown")
|
| 311 |
-
st.dataframe(
|
|
|
|
| 8 |
import plotly.express as px
|
| 9 |
import plotly.graph_objects as go
|
| 10 |
import sys
|
| 11 |
+
import json
|
| 12 |
+
|
| 13 |
+
# Load hierarchy data for enhanced visualization
|
| 14 |
+
def load_kit_hierarchy():
|
| 15 |
+
"""Load kit hierarchy data from JSON file"""
|
| 16 |
+
try:
|
| 17 |
+
with open('data/hierarchy_exports/kit_hierarchy.json', 'r') as f:
|
| 18 |
+
return json.load(f)
|
| 19 |
+
except (FileNotFoundError, json.JSONDecodeError):
|
| 20 |
+
return {}
|
| 21 |
+
|
| 22 |
+
def get_kit_hierarchy_info(product):
|
| 23 |
+
"""Get hierarchy level and dependencies for a product using main optimization system"""
|
| 24 |
+
try:
|
| 25 |
+
# Import from the main optimization system
|
| 26 |
+
from src.config.optimization_config import KIT_LEVELS, KIT_DEPENDENCIES
|
| 27 |
+
from src.config.constants import KitLevel
|
| 28 |
+
|
| 29 |
+
# Use the same hierarchy system as the optimizer
|
| 30 |
+
if product in KIT_LEVELS:
|
| 31 |
+
level = KIT_LEVELS[product]
|
| 32 |
+
level_name = KitLevel.get_name(level)
|
| 33 |
+
dependencies = KIT_DEPENDENCIES.get(product, [])
|
| 34 |
+
return level_name, dependencies
|
| 35 |
+
else:
|
| 36 |
+
return 'unknown', []
|
| 37 |
+
|
| 38 |
+
except Exception as e:
|
| 39 |
+
print(f"Error getting hierarchy info for {product}: {e}")
|
| 40 |
+
return 'unknown', []
|
| 41 |
+
|
| 42 |
|
| 43 |
def display_optimization_results(results):
|
| 44 |
"""Display comprehensive optimization results with visualizations"""
|
|
|
|
| 95 |
# Calculate cost per unit
|
| 96 |
cost_per_unit = total_cost / total_production if total_production > 0 else 0
|
| 97 |
st.metric("Cost per Unit", f"β¬{cost_per_unit:.2f}")
|
| 98 |
+
|
| 99 |
+
# Remove col5 - no idle employees metrics needed
|
| 100 |
|
| 101 |
# Production vs Demand Chart
|
| 102 |
st.subheader("π― Production vs Demand")
|
|
|
|
| 132 |
st.plotly_chart(fig, use_container_width=True)
|
| 133 |
|
| 134 |
def display_daily_deep_dive(results):
|
| 135 |
+
"""Display daily breakdown with employee counts by type and shift"""
|
| 136 |
+
st.subheader("π
Daily Employee Count by Type and Shift")
|
| 137 |
|
| 138 |
+
# Transform schedule data to show employee counts by shift
|
| 139 |
+
employee_counts = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
+
# Process the production schedule to extract employee usage by shift
|
| 142 |
+
# Only count employees when there's ACTUAL production work
|
| 143 |
+
for row in results['run_schedule']:
|
| 144 |
+
# Skip rows with no actual production activity
|
| 145 |
+
if row['run_hours'] <= 0 and row['units'] <= 0:
|
| 146 |
+
continue
|
| 147 |
+
|
| 148 |
+
day = f"Day {row['day']}"
|
| 149 |
+
shift_name = {1: 'Regular', 2: 'Evening', 3: 'Overtime'}.get(row['shift'], f"Shift {row['shift']}")
|
| 150 |
+
|
| 151 |
+
# Get team requirements for this production run
|
| 152 |
+
from src.config.optimization_config import TEAM_REQ_PER_PRODUCT
|
| 153 |
+
|
| 154 |
+
for emp_type in ['UNICEF Fixed term', 'Humanizer']:
|
| 155 |
+
if row['product'] in TEAM_REQ_PER_PRODUCT.get(emp_type, {}):
|
| 156 |
+
employee_count = TEAM_REQ_PER_PRODUCT[emp_type][row['product']]
|
| 157 |
+
|
| 158 |
+
# Only add if there are employees needed AND actual production occurs
|
| 159 |
+
if employee_count > 0 and (row['run_hours'] > 0 or row['units'] > 0):
|
| 160 |
+
employee_counts.append({
|
| 161 |
+
'Day': day,
|
| 162 |
+
'Employee Type': emp_type,
|
| 163 |
+
'Shift': shift_name,
|
| 164 |
+
'Product': row['product'],
|
| 165 |
+
'Employee Count': employee_count,
|
| 166 |
+
'Hours Worked': row['run_hours'],
|
| 167 |
+
'Total Person-Hours': employee_count * row['run_hours']
|
| 168 |
+
})
|
| 169 |
+
|
| 170 |
+
if employee_counts:
|
| 171 |
+
df_employees = pd.DataFrame(employee_counts)
|
| 172 |
+
|
| 173 |
+
# Aggregate by day, employee type, and shift
|
| 174 |
+
df_summary = df_employees.groupby(['Day', 'Employee Type', 'Shift']).agg({
|
| 175 |
+
'Employee Count': 'sum',
|
| 176 |
+
'Total Person-Hours': 'sum'
|
| 177 |
+
}).reset_index()
|
| 178 |
+
|
| 179 |
+
# Create stacked bar chart showing employee counts by shift
|
| 180 |
+
fig = px.bar(df_summary,
|
| 181 |
+
x='Day',
|
| 182 |
+
y='Employee Count',
|
| 183 |
+
color='Shift',
|
| 184 |
+
facet_col='Employee Type',
|
| 185 |
+
title='Daily Employee Count by Type and Shift',
|
| 186 |
+
color_discrete_map={
|
| 187 |
+
'Regular': '#32CD32', # Green
|
| 188 |
+
'Overtime': '#FF8C00', # Orange
|
| 189 |
+
'Evening': '#4169E1' # Blue
|
| 190 |
+
},
|
| 191 |
+
height=500)
|
| 192 |
+
|
| 193 |
+
fig.update_layout(
|
| 194 |
+
yaxis_title='Number of Employees',
|
| 195 |
+
showlegend=True
|
| 196 |
+
)
|
| 197 |
st.plotly_chart(fig, use_container_width=True)
|
| 198 |
|
| 199 |
+
# Detailed breakdown table
|
| 200 |
+
st.subheader("π Employee Allocation Details")
|
| 201 |
+
|
| 202 |
+
# Show summary by day and shift with capacity context
|
| 203 |
+
st.markdown("**Summary by Day and Shift:**")
|
| 204 |
+
summary_pivot = df_summary.pivot_table(
|
| 205 |
+
values='Employee Count',
|
| 206 |
+
index=['Day', 'Shift'],
|
| 207 |
+
columns='Employee Type',
|
| 208 |
+
aggfunc='sum',
|
| 209 |
+
fill_value=0
|
| 210 |
+
).reset_index()
|
| 211 |
+
|
| 212 |
+
# Add capacity information
|
| 213 |
+
try:
|
| 214 |
+
from src.config.optimization_config import MAX_EMPLOYEE_PER_TYPE_ON_DAY
|
| 215 |
+
|
| 216 |
+
# Add capacity columns
|
| 217 |
+
for emp_type in ['UNICEF Fixed term', 'Humanizer']:
|
| 218 |
+
if emp_type in summary_pivot.columns:
|
| 219 |
+
capacity_col = f'{emp_type} Capacity'
|
| 220 |
+
utilization_col = f'{emp_type} Utilization %'
|
| 221 |
+
|
| 222 |
+
# Extract day number from 'Day X' format
|
| 223 |
+
summary_pivot['Day_Num'] = summary_pivot['Day'].str.extract(r'(\d+)').astype(int)
|
| 224 |
+
|
| 225 |
+
# Get capacity for each day
|
| 226 |
+
summary_pivot[capacity_col] = summary_pivot['Day_Num'].apply(
|
| 227 |
+
lambda day: MAX_EMPLOYEE_PER_TYPE_ON_DAY.get(emp_type, {}).get(day, 0)
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
# Calculate utilization percentage
|
| 231 |
+
summary_pivot[utilization_col] = (
|
| 232 |
+
summary_pivot[emp_type] / summary_pivot[capacity_col] * 100
|
| 233 |
+
).round(1)
|
| 234 |
+
|
| 235 |
+
# Replace inf and NaN with 0
|
| 236 |
+
summary_pivot[utilization_col] = summary_pivot[utilization_col].fillna(0)
|
| 237 |
+
summary_pivot.loc[summary_pivot[capacity_col] == 0, utilization_col] = 0
|
| 238 |
+
|
| 239 |
+
# Drop temporary column
|
| 240 |
+
summary_pivot = summary_pivot.drop('Day_Num', axis=1)
|
| 241 |
+
|
| 242 |
+
except Exception as e:
|
| 243 |
+
print(f"Could not add capacity information: {e}")
|
| 244 |
+
|
| 245 |
+
st.dataframe(summary_pivot, use_container_width=True)
|
| 246 |
+
|
| 247 |
+
# Show detailed breakdown
|
| 248 |
+
st.markdown("**Detailed Production Assignments:**")
|
| 249 |
+
df_detailed = df_employees[['Day', 'Employee Type', 'Shift', 'Product', 'Employee Count', 'Hours Worked']].copy()
|
| 250 |
+
df_detailed = df_detailed.sort_values(['Day', 'Shift', 'Employee Type'])
|
| 251 |
+
st.dataframe(df_detailed, use_container_width=True)
|
| 252 |
+
|
| 253 |
+
else:
|
| 254 |
+
st.info("π No employees scheduled - All production runs have zero hours and zero units")
|
| 255 |
+
|
| 256 |
+
# Show debug info about filtered rows
|
| 257 |
+
total_schedule_rows = len(results.get('run_schedule', []))
|
| 258 |
+
if total_schedule_rows > 0:
|
| 259 |
+
st.markdown(f"*Note: {total_schedule_rows} schedule entries exist but all have zero production activity*")
|
| 260 |
|
| 261 |
def display_line_schedules(results):
|
| 262 |
"""Display line schedules showing what runs when and with how many workers"""
|
|
|
|
| 265 |
# Process schedule data
|
| 266 |
schedule_data = []
|
| 267 |
sys.path.append('src')
|
| 268 |
+
from config.optimization_config import TEAM_REQ_PER_PRODUCT, shift_code_to_name, line_code_to_name, DEMAND_DICTIONARY
|
| 269 |
|
| 270 |
# Get the mapping dictionaries
|
| 271 |
shift_names = shift_code_to_name()
|
|
|
|
| 277 |
humanizer_workers = TEAM_REQ_PER_PRODUCT.get('Humanizer', {}).get(row['product'], 0)
|
| 278 |
total_workers = unicef_workers + humanizer_workers
|
| 279 |
|
| 280 |
+
# Get demand for this product
|
| 281 |
+
kit_total_demand = DEMAND_DICTIONARY.get(row['product'], 0)
|
| 282 |
+
|
| 283 |
# Convert codes to readable names
|
| 284 |
line_name = line_names.get(row['line_type_id'], f"Line {row['line_type_id']}")
|
| 285 |
shift_name = shift_names.get(row['shift'], f"Shift {row['shift']}")
|
|
|
|
| 289 |
'Line': f"{line_name} {row['line_idx']}",
|
| 290 |
'Shift': shift_name,
|
| 291 |
'Product': row['product'],
|
| 292 |
+
'Kit Total Demand': kit_total_demand,
|
| 293 |
'Hours': round(row['run_hours'], 2),
|
| 294 |
'Units': round(row['units'], 0),
|
| 295 |
'UNICEF Workers': unicef_workers,
|
|
|
|
| 300 |
df_schedule = pd.DataFrame(schedule_data)
|
| 301 |
|
| 302 |
if not df_schedule.empty:
|
| 303 |
+
# Timeline view with hierarchy levels
|
| 304 |
+
st.subheader("β° Production Line by Line and Day")
|
| 305 |
|
| 306 |
+
# Add hierarchy information to the schedule data
|
| 307 |
+
for row in schedule_data:
|
| 308 |
+
hierarchy_level, dependencies = get_kit_hierarchy_info(row['Product'])
|
| 309 |
+
row['Hierarchy_Level'] = hierarchy_level
|
| 310 |
+
row['Dependencies'] = dependencies
|
| 311 |
+
|
| 312 |
+
# Recreate dataframe with hierarchy info
|
| 313 |
+
df_schedule = pd.DataFrame(schedule_data)
|
| 314 |
|
| 315 |
+
# Create enhanced timeline chart with hierarchy colors
|
| 316 |
+
fig = create_enhanced_timeline_with_relationships(df_schedule)
|
| 317 |
+
|
| 318 |
+
if fig:
|
| 319 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 320 |
+
else:
|
| 321 |
+
st.warning("Could not create enhanced timeline chart")
|
| 322 |
+
|
| 323 |
+
# Add hierarchy legend (updated to match fixed system)
|
| 324 |
+
st.markdown("""
|
| 325 |
+
**π¨ Hierarchy Level Colors:**
|
| 326 |
+
- π’ **Prepack**: Level 0 - Dependencies produced first (Lime Green)
|
| 327 |
+
- π΅ **Subkit**: Level 1 - Intermediate assemblies (Royal Blue)
|
| 328 |
+
- π **Master**: Level 2 - Final products (Dark Orange)
|
| 329 |
+
""")
|
| 330 |
+
|
| 331 |
+
# Detailed schedule table (filtered to show only meaningful rows)
|
| 332 |
st.subheader("π Detailed Production Schedule")
|
| 333 |
+
|
| 334 |
+
# Filter out rows with zero hours AND zero units (not useful)
|
| 335 |
+
df_schedule_filtered = df_schedule[
|
| 336 |
+
(df_schedule['Hours'] > 0) | (df_schedule['Units'] > 0)
|
| 337 |
+
].copy()
|
| 338 |
+
|
| 339 |
+
if df_schedule_filtered.empty:
|
| 340 |
+
st.warning("No production activity scheduled (all hours and units are zero)")
|
| 341 |
+
else:
|
| 342 |
+
# Show count of filtered vs total rows
|
| 343 |
+
filtered_count = len(df_schedule_filtered)
|
| 344 |
+
total_count = len(df_schedule)
|
| 345 |
+
if filtered_count < total_count:
|
| 346 |
+
st.info(f"Showing {filtered_count} active production entries (filtered out {total_count - filtered_count} zero-activity rows)")
|
| 347 |
+
|
| 348 |
+
st.dataframe(df_schedule_filtered, use_container_width=True)
|
| 349 |
+
|
| 350 |
+
def create_enhanced_timeline_with_relationships(df_schedule):
|
| 351 |
+
"""Create enhanced timeline chart with hierarchy colors and relationship lines"""
|
| 352 |
+
if df_schedule.empty:
|
| 353 |
+
return None
|
| 354 |
+
|
| 355 |
+
# Define hierarchy colors (using proper hierarchy levels with visible colors)
|
| 356 |
+
hierarchy_colors = {
|
| 357 |
+
'prepack': '#32CD32', # Lime Green - Level 0 (dependencies)
|
| 358 |
+
'subkit': '#4169E1', # Royal Blue - Level 1 (intermediate)
|
| 359 |
+
'master': '#FF8C00', # Dark Orange - Level 2 (final products)
|
| 360 |
+
'unknown': '#8B0000' # Dark Red - fallback (should not appear now)
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
# Create the base chart using hierarchy levels for colors
|
| 364 |
+
fig = px.bar(df_schedule,
|
| 365 |
+
x='Hours',
|
| 366 |
+
y='Line',
|
| 367 |
+
color='Hierarchy_Level',
|
| 368 |
+
facet_col='Day',
|
| 369 |
+
orientation='h',
|
| 370 |
+
title='Production Schedule by Line and Day (Colored by Hierarchy Level)',
|
| 371 |
+
height=500,
|
| 372 |
+
color_discrete_map=hierarchy_colors,
|
| 373 |
+
hover_data=['Product', 'Units', 'Total Workers'])
|
| 374 |
+
|
| 375 |
+
# Improve visibility with stronger borders and opacity
|
| 376 |
+
fig.update_traces(
|
| 377 |
+
marker_line_color='black', # Add black borders
|
| 378 |
+
marker_line_width=1.5, # Make borders visible
|
| 379 |
+
opacity=0.8 # Slightly transparent but not too much
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
# Improve layout with better text visibility
|
| 383 |
+
fig.update_layout(
|
| 384 |
+
showlegend=True,
|
| 385 |
+
plot_bgcolor='white', # White background
|
| 386 |
+
paper_bgcolor='white',
|
| 387 |
+
font=dict(size=12, color='#000000', family='Arial, sans-serif'), # Black text, clear font
|
| 388 |
+
title_font=dict(color='#000000', size=14, family='Arial Bold'), # Bold black title
|
| 389 |
+
legend_title_text='Hierarchy Level',
|
| 390 |
+
legend=dict(
|
| 391 |
+
font=dict(size=11, color='#000000'), # Black legend text
|
| 392 |
+
bgcolor='rgba(255,255,255,0.8)', # Semi-transparent white background
|
| 393 |
+
bordercolor='#000000', # Black border around legend
|
| 394 |
+
borderwidth=1
|
| 395 |
+
)
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
# Improve axes with dark, bold text
|
| 399 |
+
fig.update_xaxes(
|
| 400 |
+
showgrid=True,
|
| 401 |
+
gridwidth=0.5,
|
| 402 |
+
gridcolor='lightgray',
|
| 403 |
+
title_font=dict(size=12, color='#000000'),
|
| 404 |
+
tickfont=dict(color='#000000', size=10)
|
| 405 |
+
)
|
| 406 |
+
fig.update_yaxes(
|
| 407 |
+
showgrid=True,
|
| 408 |
+
gridwidth=0.5,
|
| 409 |
+
gridcolor='lightgray',
|
| 410 |
+
title_font=dict(size=12, color='#000000'),
|
| 411 |
+
tickfont=dict(color='#000000', size=10)
|
| 412 |
+
)
|
| 413 |
+
|
| 414 |
+
# Add dependency arrows/lines between related kits
|
| 415 |
+
try:
|
| 416 |
+
fig = add_dependency_connections(fig, df_schedule)
|
| 417 |
+
except Exception as e:
|
| 418 |
+
print(f"Could not add dependency connections: {e}")
|
| 419 |
+
|
| 420 |
+
return fig
|
| 421 |
+
|
| 422 |
+
def add_dependency_connections(fig, df_schedule):
|
| 423 |
+
"""Add arrows or lines showing dependencies between kits"""
|
| 424 |
+
# Create a mapping of product to its position in the chart
|
| 425 |
+
product_positions = {}
|
| 426 |
+
|
| 427 |
+
for _, row in df_schedule.iterrows():
|
| 428 |
+
product = row['Product']
|
| 429 |
+
day = row['Day']
|
| 430 |
+
line = row['Line']
|
| 431 |
+
|
| 432 |
+
# Store position info
|
| 433 |
+
product_positions[product] = {
|
| 434 |
+
'day': day,
|
| 435 |
+
'line': line,
|
| 436 |
+
'dependencies': row.get('Dependencies', [])
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
# Count relationships for display
|
| 440 |
+
relationship_count = 0
|
| 441 |
+
dependency_details = []
|
| 442 |
+
|
| 443 |
+
for product, pos_info in product_positions.items():
|
| 444 |
+
dependencies = pos_info['dependencies']
|
| 445 |
+
|
| 446 |
+
for dep in dependencies:
|
| 447 |
+
if dep in product_positions:
|
| 448 |
+
# Both product and dependency are in production
|
| 449 |
+
dep_pos = product_positions[dep]
|
| 450 |
+
relationship_count += 1
|
| 451 |
+
|
| 452 |
+
dependency_details.append({
|
| 453 |
+
'product': product,
|
| 454 |
+
'dependency': dep,
|
| 455 |
+
'product_day': pos_info['day'],
|
| 456 |
+
'dependency_day': dep_pos['day'],
|
| 457 |
+
'timing': 'correct' if dep_pos['day'] <= pos_info['day'] else 'violation'
|
| 458 |
+
})
|
| 459 |
+
|
| 460 |
+
# Add annotations about relationships
|
| 461 |
+
if relationship_count > 0:
|
| 462 |
+
violations = len([d for d in dependency_details if d['timing'] == 'violation'])
|
| 463 |
+
|
| 464 |
+
fig.add_annotation(
|
| 465 |
+
text=f"π {relationship_count} dependencies | {'β οΈ ' + str(violations) + ' violations' if violations > 0 else 'β
All correct'}",
|
| 466 |
+
xref="paper", yref="paper",
|
| 467 |
+
x=0.02, y=0.98,
|
| 468 |
+
showarrow=False,
|
| 469 |
+
font=dict(size=10, color="purple"),
|
| 470 |
+
bgcolor="rgba(255,255,255,0.8)",
|
| 471 |
+
bordercolor="purple",
|
| 472 |
+
borderwidth=1
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
# Add dependency info box
|
| 476 |
+
if dependency_details:
|
| 477 |
+
dependency_text = "\\n".join([
|
| 478 |
+
f"β’ {d['dependency']} β {d['product']} ({'β
' if d['timing'] == 'correct' else 'β οΈ'})"
|
| 479 |
+
for d in dependency_details[:5] # Show first 5
|
| 480 |
+
])
|
| 481 |
+
|
| 482 |
+
if len(dependency_details) > 5:
|
| 483 |
+
dependency_text += f"\\n... and {len(dependency_details) - 5} more"
|
| 484 |
+
|
| 485 |
+
fig.add_annotation(
|
| 486 |
+
text=dependency_text,
|
| 487 |
+
xref="paper", yref="paper",
|
| 488 |
+
x=0.02, y=0.02,
|
| 489 |
+
showarrow=False,
|
| 490 |
+
font=dict(size=8, color="navy"),
|
| 491 |
+
bgcolor="rgba(240,248,255,0.9)",
|
| 492 |
+
bordercolor="navy",
|
| 493 |
+
borderwidth=1,
|
| 494 |
+
align="left"
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
return fig
|
| 498 |
|
| 499 |
def display_kit_production(results):
|
| 500 |
"""Display kit production details"""
|
|
|
|
| 594 |
'Cost': round(cost, 2)
|
| 595 |
})
|
| 596 |
|
| 597 |
+
# Add idle employee costs to the breakdown
|
| 598 |
+
if 'idle_employees' in results:
|
| 599 |
+
from config.optimization_config import COST_LIST_PER_EMP_SHIFT
|
| 600 |
+
|
| 601 |
+
for idle in results['idle_employees']:
|
| 602 |
+
if idle['idle_count'] > 0:
|
| 603 |
+
emp_type = idle['emp_type']
|
| 604 |
+
shift = idle['shift']
|
| 605 |
+
day = idle['day']
|
| 606 |
+
idle_count = idle['idle_count']
|
| 607 |
+
|
| 608 |
+
# Get hourly rate and shift name
|
| 609 |
+
hourly_rate = COST_LIST_PER_EMP_SHIFT.get(emp_type, {}).get(shift, 0)
|
| 610 |
+
shift_name = shift_names.get(shift, f"Shift {shift}")
|
| 611 |
+
|
| 612 |
+
# Idle employees work 0 hours but get paid for full shift
|
| 613 |
+
actual_hours = 0
|
| 614 |
+
paid_hours = 7.5 # Assuming standard shift length
|
| 615 |
+
idle_cost = idle_count * paid_hours * hourly_rate
|
| 616 |
+
|
| 617 |
+
if emp_type not in total_cost_by_type:
|
| 618 |
+
total_cost_by_type[emp_type] = 0
|
| 619 |
+
total_cost_by_type[emp_type] += idle_cost
|
| 620 |
+
|
| 621 |
+
cost_data.append({
|
| 622 |
+
'Employee Type': emp_type,
|
| 623 |
+
'Day': f"Day {day}",
|
| 624 |
+
'Shift': f"{shift_name} (Idle)",
|
| 625 |
+
'Line': '-', # No line assignment for idle
|
| 626 |
+
'Product': '-', # No product for idle
|
| 627 |
+
'Actual Hours': actual_hours,
|
| 628 |
+
'Paid Hours': round(paid_hours, 2),
|
| 629 |
+
'Workers': int(idle_count),
|
| 630 |
+
'Hourly Rate': f"β¬{hourly_rate:.2f}",
|
| 631 |
+
'Cost': round(idle_cost, 2)
|
| 632 |
+
})
|
| 633 |
+
|
| 634 |
# Total cost metrics
|
| 635 |
total_cost = results['objective']
|
| 636 |
col1, col2, col3, col4 = st.columns(4)
|
|
|
|
| 660 |
# Detailed cost table
|
| 661 |
if cost_data:
|
| 662 |
df_costs = pd.DataFrame(cost_data)
|
| 663 |
+
|
| 664 |
+
# Add total row
|
| 665 |
+
total_cost = df_costs['Cost'].sum()
|
| 666 |
+
total_paid_hours = df_costs['Paid Hours'].sum() if 'Paid Hours' in df_costs.columns else df_costs['Actual Hours'].sum()
|
| 667 |
+
total_row = pd.DataFrame([{
|
| 668 |
+
'Employee Type': '**TOTAL**',
|
| 669 |
+
'Day': '-',
|
| 670 |
+
'Shift': '-',
|
| 671 |
+
'Line': '-',
|
| 672 |
+
'Product': '-',
|
| 673 |
+
'Actual Hours': df_costs['Actual Hours'].sum(),
|
| 674 |
+
'Paid Hours': total_paid_hours,
|
| 675 |
+
'Workers': df_costs['Workers'].sum(),
|
| 676 |
+
'Hourly Rate': '-',
|
| 677 |
+
'Cost': total_cost
|
| 678 |
+
}])
|
| 679 |
+
|
| 680 |
+
# Combine original data with total row
|
| 681 |
+
df_costs_with_total = pd.concat([df_costs, total_row], ignore_index=True)
|
| 682 |
+
|
| 683 |
st.subheader("π Detailed Cost Breakdown")
|
| 684 |
+
st.dataframe(df_costs_with_total, use_container_width=True)
|
src/config/constants.py
CHANGED
|
@@ -96,15 +96,8 @@ class KitLevel:
|
|
| 96 |
cls.MASTER: "master"
|
| 97 |
}
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
"""Get timing weight for hierarchy optimization"""
|
| 102 |
-
weights = {
|
| 103 |
-
cls.PREPACK: 0.1,
|
| 104 |
-
cls.SUBKIT: 0.5,
|
| 105 |
-
cls.MASTER: 1.0
|
| 106 |
-
}
|
| 107 |
-
return weights.get(level_id, 1.0)
|
| 108 |
|
| 109 |
class PaymentMode:
|
| 110 |
"""
|
|
|
|
| 96 |
cls.MASTER: "master"
|
| 97 |
}
|
| 98 |
|
| 99 |
+
# Removed get_timing_weight method - no longer needed
|
| 100 |
+
# Dependency ordering is now handled by topological sorting
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
class PaymentMode:
|
| 103 |
"""
|
src/config/optimization_config.py
CHANGED
|
@@ -382,6 +382,25 @@ DAILY_WEEKLY_SCHEDULE = "daily" # daily or weekly ,this needs to be implemented
|
|
| 382 |
# "none" - Purely demand-driven scheduling (cost-efficient)
|
| 383 |
FIXED_STAFF_CONSTRAINT_MODE = "priority" # Recommended: "priority" for realistic business model
|
| 384 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 385 |
|
| 386 |
def get_payment_mode_config():
|
| 387 |
"""
|
|
|
|
| 382 |
# "none" - Purely demand-driven scheduling (cost-efficient)
|
| 383 |
FIXED_STAFF_CONSTRAINT_MODE = "priority" # Recommended: "priority" for realistic business model
|
| 384 |
|
| 385 |
+
def get_fixed_min_unicef_per_day():
|
| 386 |
+
"""
|
| 387 |
+
Get fixed minimum UNICEF employees per day - try from streamlit session state first, then default
|
| 388 |
+
This ensures a minimum number of UNICEF fixed-term staff are present every working day
|
| 389 |
+
"""
|
| 390 |
+
try:
|
| 391 |
+
import streamlit as st
|
| 392 |
+
if hasattr(st, 'session_state') and 'fixed_min_unicef_per_day' in st.session_state:
|
| 393 |
+
print(f"Using fixed minimum UNICEF per day from config page: {st.session_state.fixed_min_unicef_per_day}")
|
| 394 |
+
return st.session_state.fixed_min_unicef_per_day
|
| 395 |
+
except ImportError:
|
| 396 |
+
pass # Streamlit not available in CLI mode
|
| 397 |
+
|
| 398 |
+
# Default value - minimum UNICEF Fixed term employees required per day
|
| 399 |
+
return 2
|
| 400 |
+
|
| 401 |
+
# Set the constant for backward compatibility
|
| 402 |
+
FIXED_MIN_UNICEF_PER_DAY = get_fixed_min_unicef_per_day()
|
| 403 |
+
|
| 404 |
|
| 405 |
def get_payment_mode_config():
|
| 406 |
"""
|
src/models/optimizer_real.py
CHANGED
|
@@ -26,7 +26,7 @@ from src.config.optimization_config import (
|
|
| 26 |
MAX_HOUR_PER_SHIFT_PER_PERSON, # {1: hours, 2: hours, 3: hours}
|
| 27 |
PER_PRODUCT_SPEED, # {6: cap_units_per_hour, 7: cap_units_per_hour}
|
| 28 |
MAX_PARALLEL_WORKERS, # {6: max_workers, 7: max_workers}
|
| 29 |
-
DAILY_WEEKLY_SCHEDULE, # 'daily' or 'weekly'
|
| 30 |
FIXED_STAFF_CONSTRAINT_MODE, # not used in fixed-team model (λμ ν¬μ
μ΄λΌ 무μλ―Έ)
|
| 31 |
TEAM_REQ_PER_PRODUCT, # {emp_type: {product: team_size}} from Kits_Calculation.csv
|
| 32 |
PAYMENT_MODE_CONFIG, # {shift: 'bulk'/'partial'} payment mode configuration
|
|
@@ -37,6 +37,8 @@ from src.config.optimization_config import (
|
|
| 37 |
KIT_LEVELS, # {kit_id: level} where 0=prepack, 1=subkit, 2=master
|
| 38 |
KIT_DEPENDENCIES, # {kit_id: [dependency_list]}
|
| 39 |
PRODUCTION_PRIORITY_ORDER, # [kit_ids] sorted by production priority
|
|
|
|
|
|
|
| 40 |
)
|
| 41 |
|
| 42 |
# -----------------------------------------
|
|
@@ -70,9 +72,12 @@ print("PER_PRODUCT_SPEED",PER_PRODUCT_SPEED)
|
|
| 70 |
|
| 71 |
def sort_products_by_hierarchy(product_list):
|
| 72 |
"""
|
| 73 |
-
Sort products by hierarchy levels and dependencies.
|
| 74 |
Returns products in optimal production order: prepacks β subkits β masters
|
|
|
|
| 75 |
"""
|
|
|
|
|
|
|
| 76 |
# Filter products that are in our production list and have hierarchy data
|
| 77 |
products_with_hierarchy = [p for p in product_list if p in KIT_LEVELS]
|
| 78 |
products_without_hierarchy = [p for p in product_list if p not in KIT_LEVELS]
|
|
@@ -80,32 +85,78 @@ def sort_products_by_hierarchy(product_list):
|
|
| 80 |
if products_without_hierarchy:
|
| 81 |
print(f"[HIERARCHY] Products without hierarchy data: {products_without_hierarchy}")
|
| 82 |
|
| 83 |
-
#
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
# Add products without hierarchy at the end
|
| 88 |
sorted_products.extend(sorted(products_without_hierarchy))
|
| 89 |
|
| 90 |
-
print(f"[HIERARCHY]
|
| 91 |
for i, p in enumerate(sorted_products[:10]): # Show first 10
|
| 92 |
level = KIT_LEVELS.get(p, "unknown")
|
| 93 |
level_name = KitLevel.get_name(level)
|
| 94 |
deps = KIT_DEPENDENCIES.get(p, [])
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
if len(sorted_products) > 10:
|
| 98 |
print(f" ... and {len(sorted_products) - 10} more products")
|
| 99 |
|
| 100 |
return sorted_products
|
| 101 |
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
Calculate timing weight based on hierarchy level.
|
| 105 |
-
Lower levels (prepacks) should be produced earlier.
|
| 106 |
-
"""
|
| 107 |
-
level = KIT_LEVELS.get(product, KitLevel.MASTER) # Default to master level
|
| 108 |
-
return KitLevel.get_timing_weight(level)
|
| 109 |
|
| 110 |
def solve_fixed_team_weekly():
|
| 111 |
# --- Sets ---
|
|
@@ -196,6 +247,14 @@ def solve_fixed_team_weekly():
|
|
| 196 |
Z[p, ell, s, t] = solver.BoolVar(f"Z_{p}_{ell[0]}_{ell[1]}_s{s}_d{t}")
|
| 197 |
T[p, ell, s, t] = solver.NumVar(0, Hmax_s[s], f"T_{p}_{ell[0]}_{ell[1]}_s{s}_d{t}")
|
| 198 |
U[p, ell, s, t] = solver.NumVar(0, INF, f"U_{p}_{ell[0]}_{ell[1]}_s{s}_d{t}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
|
| 200 |
# Note: Binary variables for bulk payment are now created inline in the cost calculation
|
| 201 |
|
|
@@ -237,25 +296,30 @@ def solve_fixed_team_weekly():
|
|
| 237 |
# Cost: pay the specific working employees for full shift hours
|
| 238 |
cost_terms.append(cost[e][s] * Hmax_s[s] * TEAM_REQ_PER_PRODUCT[e][p] * work_binary)
|
| 239 |
|
| 240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
|
| 242 |
-
|
| 243 |
-
# Small weight (0.01) to prioritize hierarchy without overwhelming cost optimization
|
| 244 |
-
hierarchy_penalty = solver.Sum(
|
| 245 |
-
0.01 * get_dependency_timing_weight(p) * t * T[p, ell, s, t]
|
| 246 |
-
for p in P for ell in L for s in S for t in D
|
| 247 |
-
)
|
| 248 |
|
| 249 |
-
|
| 250 |
-
|
|
|
|
| 251 |
|
| 252 |
# --- Constraints ---
|
| 253 |
|
| 254 |
-
# 1) Weekly demand
|
| 255 |
for p in P:
|
| 256 |
-
solver.
|
| 257 |
-
|
| 258 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
|
| 260 |
# 2) One product per (line,shift,day) + time gating
|
| 261 |
for ell in L:
|
|
@@ -287,21 +351,36 @@ def solve_fixed_team_weekly():
|
|
| 287 |
if p in PER_PRODUCT_SPEED:
|
| 288 |
# Convert kit per day to kit per hour (assuming 7.5 hour workday)
|
| 289 |
speed = PER_PRODUCT_SPEED[p]
|
|
|
|
| 290 |
solver.Add(
|
| 291 |
U[p, ell, s, t] <= speed * T[p, ell, s, t]
|
| 292 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
else:
|
| 294 |
# Default speed if not found
|
| 295 |
default_speed = 800 / 7.5 # units per hour
|
| 296 |
print(f"Warning: No speed data for product {p}, using default {default_speed:.1f} per hour")
|
|
|
|
| 297 |
solver.Add(
|
| 298 |
U[p, ell, s, t] <= default_speed * T[p, ell, s, t]
|
| 299 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
|
| 301 |
-
# 5) Per-shift staffing capacity by type:
|
| 302 |
for e in E:
|
| 303 |
for s in S:
|
| 304 |
for t in D:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
solver.Add(
|
| 306 |
solver.Sum(TEAM_REQ_PER_PRODUCT[e][p] * T[p, ell, s, t] for p in P for ell in L)
|
| 307 |
<= Hmax_s[s] * N_day[e][t]
|
|
@@ -326,18 +405,87 @@ def solve_fixed_team_weekly():
|
|
| 326 |
solver.Sum(TEAM_REQ_PER_PRODUCT[e][p] * T[p, ell, ShiftType.REGULAR, t] for p in P for ell in L)
|
| 327 |
)
|
| 328 |
|
| 329 |
-
# Overtime
|
| 330 |
if ShiftType.OVERTIME in S and ShiftType.REGULAR in S: # Only if both shifts are available
|
|
|
|
|
|
|
| 331 |
for e in E:
|
| 332 |
for t in D:
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
|
| 339 |
# 7.5) Bulk payment linking constraints are now handled inline in the cost calculation
|
| 340 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
# 8) *** HIERARCHY DEPENDENCY CONSTRAINTS ***
|
| 342 |
# For subkits with prepack dependencies: dependencies should be produced before or same time
|
| 343 |
print("\n[HIERARCHY] Adding dependency constraints...")
|
|
@@ -425,6 +573,21 @@ def solve_fixed_team_weekly():
|
|
| 425 |
'cap_person_hours': Hmax_daily * N_day[e][t]})
|
| 426 |
result['person_hours_by_day'] = ph_by_day
|
| 427 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 428 |
# Pretty print
|
| 429 |
print("Objective (min cost):", result['objective'])
|
| 430 |
print("\n--- Weekly production by product ---")
|
|
@@ -449,6 +612,20 @@ def solve_fixed_team_weekly():
|
|
| 449 |
print(f"{row['emp_type']}, D{row['day']}: used={row['used_person_hours']:.1f} "
|
| 450 |
f"(cap {row['cap_person_hours']})")
|
| 451 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 452 |
return result
|
| 453 |
|
| 454 |
|
|
|
|
| 26 |
MAX_HOUR_PER_SHIFT_PER_PERSON, # {1: hours, 2: hours, 3: hours}
|
| 27 |
PER_PRODUCT_SPEED, # {6: cap_units_per_hour, 7: cap_units_per_hour}
|
| 28 |
MAX_PARALLEL_WORKERS, # {6: max_workers, 7: max_workers}
|
| 29 |
+
DAILY_WEEKLY_SCHEDULE, # 'daily' or 'weekly'
|
| 30 |
FIXED_STAFF_CONSTRAINT_MODE, # not used in fixed-team model (λμ ν¬μ
μ΄λΌ 무μλ―Έ)
|
| 31 |
TEAM_REQ_PER_PRODUCT, # {emp_type: {product: team_size}} from Kits_Calculation.csv
|
| 32 |
PAYMENT_MODE_CONFIG, # {shift: 'bulk'/'partial'} payment mode configuration
|
|
|
|
| 37 |
KIT_LEVELS, # {kit_id: level} where 0=prepack, 1=subkit, 2=master
|
| 38 |
KIT_DEPENDENCIES, # {kit_id: [dependency_list]}
|
| 39 |
PRODUCTION_PRIORITY_ORDER, # [kit_ids] sorted by production priority
|
| 40 |
+
# Fixed staffing requirements
|
| 41 |
+
FIXED_MIN_UNICEF_PER_DAY, # Minimum UNICEF employees required per day
|
| 42 |
)
|
| 43 |
|
| 44 |
# -----------------------------------------
|
|
|
|
| 72 |
|
| 73 |
def sort_products_by_hierarchy(product_list):
|
| 74 |
"""
|
| 75 |
+
Sort products by hierarchy levels and dependencies using topological sorting.
|
| 76 |
Returns products in optimal production order: prepacks β subkits β masters
|
| 77 |
+
Dependencies within the same level are properly ordered.
|
| 78 |
"""
|
| 79 |
+
from collections import defaultdict, deque
|
| 80 |
+
|
| 81 |
# Filter products that are in our production list and have hierarchy data
|
| 82 |
products_with_hierarchy = [p for p in product_list if p in KIT_LEVELS]
|
| 83 |
products_without_hierarchy = [p for p in product_list if p not in KIT_LEVELS]
|
|
|
|
| 85 |
if products_without_hierarchy:
|
| 86 |
print(f"[HIERARCHY] Products without hierarchy data: {products_without_hierarchy}")
|
| 87 |
|
| 88 |
+
# Build dependency graph for products in our list
|
| 89 |
+
graph = defaultdict(list) # product -> [dependents]
|
| 90 |
+
in_degree = defaultdict(int) # product -> number of dependencies
|
| 91 |
+
|
| 92 |
+
# Initialize all products
|
| 93 |
+
for product in products_with_hierarchy:
|
| 94 |
+
in_degree[product] = 0
|
| 95 |
+
|
| 96 |
+
# Build edges based on actual dependencies
|
| 97 |
+
dependency_count = 0
|
| 98 |
+
for product in products_with_hierarchy:
|
| 99 |
+
deps = KIT_DEPENDENCIES.get(product, [])
|
| 100 |
+
for dep in deps:
|
| 101 |
+
if dep in products_with_hierarchy: # Only if dependency is in our production list
|
| 102 |
+
graph[dep].append(product) # dep -> product
|
| 103 |
+
in_degree[product] += 1
|
| 104 |
+
dependency_count += 1
|
| 105 |
+
|
| 106 |
+
print(f"[HIERARCHY] Found {dependency_count} dependency relationships in production list")
|
| 107 |
+
|
| 108 |
+
# Topological sort with hierarchy level priority
|
| 109 |
+
sorted_products = []
|
| 110 |
+
queue = deque()
|
| 111 |
+
|
| 112 |
+
# Start with products that have no dependencies, prioritized by hierarchy level
|
| 113 |
+
no_deps = [(KIT_LEVELS.get(p, 999), p) for p in products_with_hierarchy if in_degree[p] == 0]
|
| 114 |
+
no_deps.sort() # Sort by (level, product_id)
|
| 115 |
+
|
| 116 |
+
for _, product in no_deps:
|
| 117 |
+
queue.append(product)
|
| 118 |
+
|
| 119 |
+
while queue:
|
| 120 |
+
current = queue.popleft()
|
| 121 |
+
sorted_products.append(current)
|
| 122 |
+
|
| 123 |
+
# Process dependents
|
| 124 |
+
dependents = [(KIT_LEVELS.get(dep, 999), dep) for dep in graph[current]]
|
| 125 |
+
dependents.sort() # Sort by hierarchy level first
|
| 126 |
+
|
| 127 |
+
for _, dependent in dependents:
|
| 128 |
+
in_degree[dependent] -= 1
|
| 129 |
+
if in_degree[dependent] == 0:
|
| 130 |
+
queue.append(dependent)
|
| 131 |
+
|
| 132 |
+
# Check for cycles (shouldn't happen with proper hierarchy)
|
| 133 |
+
if len(sorted_products) != len(products_with_hierarchy):
|
| 134 |
+
remaining = [p for p in products_with_hierarchy if p not in sorted_products]
|
| 135 |
+
print(f"[HIERARCHY] WARNING: Potential circular dependencies detected in: {remaining}")
|
| 136 |
+
# Add remaining products sorted by level as fallback
|
| 137 |
+
remaining_sorted = sorted(remaining, key=lambda p: (KIT_LEVELS.get(p, 999), p))
|
| 138 |
+
sorted_products.extend(remaining_sorted)
|
| 139 |
|
| 140 |
# Add products without hierarchy at the end
|
| 141 |
sorted_products.extend(sorted(products_without_hierarchy))
|
| 142 |
|
| 143 |
+
print(f"[HIERARCHY] Dependency-aware production order: {len(sorted_products)} products")
|
| 144 |
for i, p in enumerate(sorted_products[:10]): # Show first 10
|
| 145 |
level = KIT_LEVELS.get(p, "unknown")
|
| 146 |
level_name = KitLevel.get_name(level)
|
| 147 |
deps = KIT_DEPENDENCIES.get(p, [])
|
| 148 |
+
deps_in_list = [d for d in deps if d in products_with_hierarchy]
|
| 149 |
+
print(f" {i+1}. {p} (level {level}={level_name}, deps: {len(deps_in_list)})")
|
| 150 |
+
if deps_in_list:
|
| 151 |
+
print(f" Dependencies: {deps_in_list}")
|
| 152 |
|
| 153 |
if len(sorted_products) > 10:
|
| 154 |
print(f" ... and {len(sorted_products) - 10} more products")
|
| 155 |
|
| 156 |
return sorted_products
|
| 157 |
|
| 158 |
+
# Removed get_dependency_timing_weight function - no longer needed
|
| 159 |
+
# Dependency ordering is now handled by topological sorting in sort_products_by_hierarchy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
def solve_fixed_team_weekly():
|
| 162 |
# --- Sets ---
|
|
|
|
| 247 |
Z[p, ell, s, t] = solver.BoolVar(f"Z_{p}_{ell[0]}_{ell[1]}_s{s}_d{t}")
|
| 248 |
T[p, ell, s, t] = solver.NumVar(0, Hmax_s[s], f"T_{p}_{ell[0]}_{ell[1]}_s{s}_d{t}")
|
| 249 |
U[p, ell, s, t] = solver.NumVar(0, INF, f"U_{p}_{ell[0]}_{ell[1]}_s{s}_d{t}")
|
| 250 |
+
|
| 251 |
+
# Idle employee variables: IDLE[e,s,t] = number of idle employees of type e in shift s on day t
|
| 252 |
+
IDLE = {}
|
| 253 |
+
for e in E:
|
| 254 |
+
for s in S:
|
| 255 |
+
for t in D:
|
| 256 |
+
max_idle = N_day[e][t] # Can't have more idle employees than available
|
| 257 |
+
IDLE[e, s, t] = solver.IntVar(0, max_idle, f"IDLE_{e}_s{s}_d{t}")
|
| 258 |
|
| 259 |
# Note: Binary variables for bulk payment are now created inline in the cost calculation
|
| 260 |
|
|
|
|
| 296 |
# Cost: pay the specific working employees for full shift hours
|
| 297 |
cost_terms.append(cost[e][s] * Hmax_s[s] * TEAM_REQ_PER_PRODUCT[e][p] * work_binary)
|
| 298 |
|
| 299 |
+
# Add idle employee costs (idle employees are paid for full shift hours)
|
| 300 |
+
for e in E:
|
| 301 |
+
for s in S:
|
| 302 |
+
for t in D:
|
| 303 |
+
cost_terms.append(cost[e][s] * Hmax_s[s] * IDLE[e, s, t])
|
| 304 |
|
| 305 |
+
total_cost = solver.Sum(cost_terms)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
|
| 307 |
+
# Objective: minimize total cost only
|
| 308 |
+
# Dependency ordering is handled by topological sorting and hard constraints
|
| 309 |
+
solver.Minimize(total_cost)
|
| 310 |
|
| 311 |
# --- Constraints ---
|
| 312 |
|
| 313 |
+
# 1) Weekly demand - must meet exactly (no over/under production)
|
| 314 |
for p in P:
|
| 315 |
+
total_production = solver.Sum(U[p, ell, s, t] for ell in L for s in S for t in D)
|
| 316 |
+
demand = d_week.get(p, 0)
|
| 317 |
+
|
| 318 |
+
# Must produce at least the demand
|
| 319 |
+
solver.Add(total_production >= demand)
|
| 320 |
+
|
| 321 |
+
# Must not produce more than the demand (prevent overproduction)
|
| 322 |
+
solver.Add(total_production <= demand)
|
| 323 |
|
| 324 |
# 2) One product per (line,shift,day) + time gating
|
| 325 |
for ell in L:
|
|
|
|
| 351 |
if p in PER_PRODUCT_SPEED:
|
| 352 |
# Convert kit per day to kit per hour (assuming 7.5 hour workday)
|
| 353 |
speed = PER_PRODUCT_SPEED[p]
|
| 354 |
+
# Upper bound: units cannot exceed capacity
|
| 355 |
solver.Add(
|
| 356 |
U[p, ell, s, t] <= speed * T[p, ell, s, t]
|
| 357 |
)
|
| 358 |
+
# Lower bound: if working, must produce (prevent phantom work)
|
| 359 |
+
solver.Add(
|
| 360 |
+
U[p, ell, s, t] >= speed * T[p, ell, s, t]
|
| 361 |
+
)
|
| 362 |
else:
|
| 363 |
# Default speed if not found
|
| 364 |
default_speed = 800 / 7.5 # units per hour
|
| 365 |
print(f"Warning: No speed data for product {p}, using default {default_speed:.1f} per hour")
|
| 366 |
+
# Upper bound: units cannot exceed capacity
|
| 367 |
solver.Add(
|
| 368 |
U[p, ell, s, t] <= default_speed * T[p, ell, s, t]
|
| 369 |
)
|
| 370 |
+
# Lower bound: if working, must produce (prevent phantom work)
|
| 371 |
+
solver.Add(
|
| 372 |
+
U[p, ell, s, t] >= default_speed * T[p, ell, s, t]
|
| 373 |
+
)
|
| 374 |
|
| 375 |
+
# 5) Per-shift staffing capacity by type: idle employees β€ available headcount
|
| 376 |
for e in E:
|
| 377 |
for s in S:
|
| 378 |
for t in D:
|
| 379 |
+
# Idle employees cannot exceed available headcount
|
| 380 |
+
# (Active employees are constrained by the working hours constraint below)
|
| 381 |
+
solver.Add(IDLE[e, s, t] <= N_day[e][t])
|
| 382 |
+
|
| 383 |
+
# Working hours constraint: active employees cannot exceed shift hour capacity
|
| 384 |
solver.Add(
|
| 385 |
solver.Sum(TEAM_REQ_PER_PRODUCT[e][p] * T[p, ell, s, t] for p in P for ell in L)
|
| 386 |
<= Hmax_s[s] * N_day[e][t]
|
|
|
|
| 405 |
solver.Sum(TEAM_REQ_PER_PRODUCT[e][p] * T[p, ell, ShiftType.REGULAR, t] for p in P for ell in L)
|
| 406 |
)
|
| 407 |
|
| 408 |
+
# Overtime should only be used when regular shift is at capacity
|
| 409 |
if ShiftType.OVERTIME in S and ShiftType.REGULAR in S: # Only if both shifts are available
|
| 410 |
+
print("\n[OVERTIME] Adding constraints to ensure overtime only when regular shift is insufficient...")
|
| 411 |
+
|
| 412 |
for e in E:
|
| 413 |
for t in D:
|
| 414 |
+
# Get available regular capacity for this employee type and day
|
| 415 |
+
regular_capacity = N_day[e][t]
|
| 416 |
+
|
| 417 |
+
# Total regular shift usage for this employee type and day
|
| 418 |
+
regular_usage = solver.Sum(
|
| 419 |
+
TEAM_REQ_PER_PRODUCT[e][p] * T[p, ell, ShiftType.REGULAR, t]
|
| 420 |
+
for p in P for ell in L
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
# Total overtime usage for this employee type and day
|
| 424 |
+
overtime_usage = solver.Sum(
|
| 425 |
+
TEAM_REQ_PER_PRODUCT[e][p] * T[p, ell, ShiftType.OVERTIME, t]
|
| 426 |
+
for p in P for ell in L
|
| 427 |
)
|
| 428 |
+
|
| 429 |
+
# Create binary variable: 1 if using overtime, 0 otherwise
|
| 430 |
+
using_overtime = solver.IntVar(0, 1, f'using_overtime_{e}_{t}')
|
| 431 |
+
|
| 432 |
+
# If using overtime, regular capacity must be utilized significantly
|
| 433 |
+
# Regular usage must be at least 90% of capacity to allow overtime
|
| 434 |
+
min_regular_for_overtime = int(0.9 * regular_capacity)
|
| 435 |
+
|
| 436 |
+
# Constraint 1: Can only use overtime if regular usage is high
|
| 437 |
+
solver.Add(regular_usage >= min_regular_for_overtime * using_overtime)
|
| 438 |
+
|
| 439 |
+
# Constraint 2: If any overtime is used, set the binary variable
|
| 440 |
+
solver.Add(overtime_usage <= regular_capacity * using_overtime)
|
| 441 |
+
|
| 442 |
+
overtime_constraints_added = len(E) * len(D) * 2 # 2 constraints per employee type per day
|
| 443 |
+
print(f"[OVERTIME] Added {overtime_constraints_added} constraints ensuring overtime only when regular shifts are at 90%+ capacity")
|
| 444 |
|
| 445 |
# 7.5) Bulk payment linking constraints are now handled inline in the cost calculation
|
| 446 |
|
| 447 |
+
# 7.6) *** FIXED MINIMUM UNICEF EMPLOYEES CONSTRAINT ***
|
| 448 |
+
# Ensure minimum UNICEF fixed-term staff are present every working day
|
| 449 |
+
if 'UNICEF Fixed term' in E and FIXED_MIN_UNICEF_PER_DAY > 0:
|
| 450 |
+
print(f"\n[FIXED STAFFING] Adding constraint for minimum {FIXED_MIN_UNICEF_PER_DAY} UNICEF employees per day...")
|
| 451 |
+
|
| 452 |
+
unicef_constraints_added = 0
|
| 453 |
+
for t in D:
|
| 454 |
+
# Method 1: Simple approach - ensure minimum UNICEF employees are scheduled
|
| 455 |
+
# regardless of whether they're working or idle
|
| 456 |
+
# Sum up all possible UNICEF work assignments + idle UNICEF employees
|
| 457 |
+
|
| 458 |
+
# Count all UNICEF work hours across all products, lines, and shifts
|
| 459 |
+
all_unicef_hours = solver.Sum(
|
| 460 |
+
TEAM_REQ_PER_PRODUCT.get('UNICEF Fixed term', {}).get(p, 0) * T[p, ell, s, t]
|
| 461 |
+
for p in P
|
| 462 |
+
for ell in L
|
| 463 |
+
for s in S
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
# Count idle UNICEF employees across all shifts
|
| 467 |
+
idle_unicef_employees = solver.Sum(
|
| 468 |
+
IDLE['UNICEF Fixed term', s, t] for s in S
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
# Constraint: total hours (work + idle*14) must meet minimum staffing
|
| 472 |
+
# This ensures at least FIXED_MIN_UNICEF_PER_DAY employees are present
|
| 473 |
+
solver.Add(all_unicef_hours + idle_unicef_employees * MAX_HOUR_PER_PERSON_PER_DAY >= FIXED_MIN_UNICEF_PER_DAY * MAX_HOUR_PER_PERSON_PER_DAY)
|
| 474 |
+
|
| 475 |
+
# Additional constraint: ensure idle employees are properly linked to total headcount
|
| 476 |
+
# This prevents the solver from avoiding the minimum by setting everyone to zero
|
| 477 |
+
total_unicef_hours_needed_for_production = solver.Sum(
|
| 478 |
+
TEAM_REQ_PER_PRODUCT.get('UNICEF Fixed term', {}).get(p, 0) * T[p, ell, s, t]
|
| 479 |
+
for p in P for ell in L for s in S
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
# Simpler approach: just ensure the basic constraint is strong enough
|
| 483 |
+
# The main constraint above should be sufficient: all_unicef_hours + idle*14 >= min*14
|
| 484 |
+
# This already forces idle employees when production is insufficient
|
| 485 |
+
unicef_constraints_added += 1
|
| 486 |
+
|
| 487 |
+
print(f"[FIXED STAFFING] Added {unicef_constraints_added} constraints ensuring >= {FIXED_MIN_UNICEF_PER_DAY} UNICEF employees per day")
|
| 488 |
+
|
| 489 |
# 8) *** HIERARCHY DEPENDENCY CONSTRAINTS ***
|
| 490 |
# For subkits with prepack dependencies: dependencies should be produced before or same time
|
| 491 |
print("\n[HIERARCHY] Adding dependency constraints...")
|
|
|
|
| 573 |
'cap_person_hours': Hmax_daily * N_day[e][t]})
|
| 574 |
result['person_hours_by_day'] = ph_by_day
|
| 575 |
|
| 576 |
+
# Idle employee data for visualization
|
| 577 |
+
idle_employees = []
|
| 578 |
+
for e in E:
|
| 579 |
+
for s in S:
|
| 580 |
+
for t in D:
|
| 581 |
+
idle_count = IDLE[e, s, t].solution_value()
|
| 582 |
+
if idle_count > 0: # Only include non-zero idle counts
|
| 583 |
+
idle_employees.append({
|
| 584 |
+
'emp_type': e,
|
| 585 |
+
'shift': s,
|
| 586 |
+
'day': t,
|
| 587 |
+
'idle_count': idle_count
|
| 588 |
+
})
|
| 589 |
+
result['idle_employees'] = idle_employees
|
| 590 |
+
|
| 591 |
# Pretty print
|
| 592 |
print("Objective (min cost):", result['objective'])
|
| 593 |
print("\n--- Weekly production by product ---")
|
|
|
|
| 612 |
print(f"{row['emp_type']}, D{row['day']}: used={row['used_person_hours']:.1f} "
|
| 613 |
f"(cap {row['cap_person_hours']})")
|
| 614 |
|
| 615 |
+
# Report idle employees
|
| 616 |
+
print("\n--- Idle employees (per type/shift/day) ---")
|
| 617 |
+
idle_found = False
|
| 618 |
+
for e in E:
|
| 619 |
+
for s in S:
|
| 620 |
+
for t in D:
|
| 621 |
+
idle_count = IDLE[e, s, t].solution_value()
|
| 622 |
+
if idle_count > 0:
|
| 623 |
+
shift_name = ShiftType.get_name(s)
|
| 624 |
+
print(f"{e}, {shift_name}, D{t}: idle={idle_count}")
|
| 625 |
+
idle_found = True
|
| 626 |
+
if not idle_found:
|
| 627 |
+
print("No idle employees scheduled")
|
| 628 |
+
|
| 629 |
return result
|
| 630 |
|
| 631 |
|
src/visualization/hierarchy_dashboard.py
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Hierarchy-Based Production Flow Visualization
|
| 3 |
+
Shows how kits flow through production based on dependency hierarchy
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import streamlit as st
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import plotly.express as px
|
| 9 |
+
import plotly.graph_objects as go
|
| 10 |
+
from plotly.subplots import make_subplots
|
| 11 |
+
try:
|
| 12 |
+
import networkx as nx
|
| 13 |
+
NETWORKX_AVAILABLE = True
|
| 14 |
+
except ImportError:
|
| 15 |
+
NETWORKX_AVAILABLE = False
|
| 16 |
+
nx = None
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import sys
|
| 20 |
+
sys.path.append('src')
|
| 21 |
+
|
| 22 |
+
from config.optimization_config import (
|
| 23 |
+
KIT_LEVELS, KIT_DEPENDENCIES, TEAM_REQ_PER_PRODUCT,
|
| 24 |
+
shift_code_to_name, line_code_to_name
|
| 25 |
+
)
|
| 26 |
+
from config.constants import ShiftType, LineType, KitLevel
|
| 27 |
+
|
| 28 |
+
# Import kit relationships dashboard
|
| 29 |
+
try:
|
| 30 |
+
from src.visualization.kit_relationships import display_kit_relationships_dashboard
|
| 31 |
+
except ImportError:
|
| 32 |
+
display_kit_relationships_dashboard = None
|
| 33 |
+
|
| 34 |
+
def display_hierarchy_operations_dashboard(results):
|
| 35 |
+
"""Enhanced operations dashboard showing hierarchy-based production flow"""
|
| 36 |
+
st.header("π Hierarchy-Based Operations Dashboard")
|
| 37 |
+
st.markdown("---")
|
| 38 |
+
|
| 39 |
+
# Create main dashboard tabs
|
| 40 |
+
tab1, tab2, tab3 = st.tabs([
|
| 41 |
+
"π Production Flow",
|
| 42 |
+
"π Hierarchy Analytics",
|
| 43 |
+
"π Kit Relationships"
|
| 44 |
+
])
|
| 45 |
+
|
| 46 |
+
with tab1:
|
| 47 |
+
display_production_flow_visualization(results)
|
| 48 |
+
|
| 49 |
+
with tab2:
|
| 50 |
+
display_hierarchy_analytics(results)
|
| 51 |
+
|
| 52 |
+
with tab3:
|
| 53 |
+
# Kit relationships from actual hierarchy data
|
| 54 |
+
if display_kit_relationships_dashboard:
|
| 55 |
+
display_kit_relationships_dashboard(results)
|
| 56 |
+
else:
|
| 57 |
+
st.error("Kit relationships dashboard not available. Please check installation.")
|
| 58 |
+
|
| 59 |
+
def display_production_flow_visualization(results):
|
| 60 |
+
"""Show how products flow through production lines by hierarchy"""
|
| 61 |
+
st.subheader("π Kit Production Flow by Hierarchy")
|
| 62 |
+
|
| 63 |
+
# Get production sequence data
|
| 64 |
+
flow_data = prepare_hierarchy_flow_data(results)
|
| 65 |
+
|
| 66 |
+
if not flow_data:
|
| 67 |
+
st.warning("No production data available for flow visualization")
|
| 68 |
+
return
|
| 69 |
+
|
| 70 |
+
# Create flow diagram
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# Hierarchy level summary - horizontal layout
|
| 75 |
+
st.subheader("π¦ Production by Level")
|
| 76 |
+
level_summary = get_hierarchy_level_summary(flow_data)
|
| 77 |
+
|
| 78 |
+
# Create horizontal columns for each level
|
| 79 |
+
level_names = ['prepack', 'subkit', 'master']
|
| 80 |
+
available_levels = [level for level in level_names if level in level_summary]
|
| 81 |
+
|
| 82 |
+
if available_levels:
|
| 83 |
+
cols = st.columns(len(available_levels))
|
| 84 |
+
|
| 85 |
+
for i, level_name in enumerate(available_levels):
|
| 86 |
+
data = level_summary[level_name]
|
| 87 |
+
with cols[i]:
|
| 88 |
+
# Use custom styling instead of st.metric to avoid delta arrows
|
| 89 |
+
st.markdown(f"""
|
| 90 |
+
<div style="
|
| 91 |
+
background: linear-gradient(135deg, #f0f8ff, #e6f3ff);
|
| 92 |
+
padding: 1rem;
|
| 93 |
+
border-radius: 0.5rem;
|
| 94 |
+
text-align: center;
|
| 95 |
+
border-left: 4px solid {'#90EE90' if level_name == 'prepack' else '#FFD700' if level_name == 'subkit' else '#FF6347'};
|
| 96 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
| 97 |
+
">
|
| 98 |
+
<div style="font-size: 0.8rem; color: #666; text-transform: uppercase; letter-spacing: 1px;">
|
| 99 |
+
{level_name.title()} Kits
|
| 100 |
+
</div>
|
| 101 |
+
<div style="font-size: 1.5rem; font-weight: bold; color: #333; margin: 0.2rem 0;">
|
| 102 |
+
{data['count']} products
|
| 103 |
+
</div>
|
| 104 |
+
<div style="font-size: 1rem; color: #555;">
|
| 105 |
+
{data['total_units']:,.0f} units
|
| 106 |
+
</div>
|
| 107 |
+
</div>
|
| 108 |
+
""", unsafe_allow_html=True)
|
| 109 |
+
|
| 110 |
+
# Timeline view of hierarchy production
|
| 111 |
+
st.subheader("π
Hierarchy Production Timeline")
|
| 112 |
+
try:
|
| 113 |
+
fig_timeline = create_hierarchy_timeline(flow_data)
|
| 114 |
+
st.plotly_chart(fig_timeline, use_container_width=True)
|
| 115 |
+
except Exception as e:
|
| 116 |
+
st.warning(f"Timeline chart temporarily unavailable. Showing alternative visualization.")
|
| 117 |
+
# Fallback: Simple bar chart by day
|
| 118 |
+
if flow_data:
|
| 119 |
+
df_simple = pd.DataFrame([{
|
| 120 |
+
'Day': f"Day {row['day']}",
|
| 121 |
+
'Level': row['level_name'].title(),
|
| 122 |
+
'Units': row['units'],
|
| 123 |
+
'Product': row['product']
|
| 124 |
+
} for row in flow_data])
|
| 125 |
+
|
| 126 |
+
fig_simple = px.bar(df_simple, x='Day', y='Units', color='Level',
|
| 127 |
+
title='Production Volume by Day and Hierarchy Level',
|
| 128 |
+
color_discrete_map={
|
| 129 |
+
'Prepack': '#90EE90',
|
| 130 |
+
'Subkit': '#FFD700',
|
| 131 |
+
'Master': '#FF6347'
|
| 132 |
+
})
|
| 133 |
+
st.plotly_chart(fig_simple, use_container_width=True)
|
| 134 |
+
|
| 135 |
+
def display_hierarchy_analytics(results):
|
| 136 |
+
"""Deep dive analytics on hierarchy production performance"""
|
| 137 |
+
st.subheader("π Hierarchy Performance Analytics")
|
| 138 |
+
|
| 139 |
+
# Prepare analytics data
|
| 140 |
+
analytics_data = prepare_hierarchy_analytics_data(results)
|
| 141 |
+
|
| 142 |
+
if not analytics_data:
|
| 143 |
+
st.warning("No hierarchy data available for analytics")
|
| 144 |
+
return
|
| 145 |
+
|
| 146 |
+
# Key metrics
|
| 147 |
+
col1, col2, col3, col4 = st.columns(4)
|
| 148 |
+
|
| 149 |
+
with col1:
|
| 150 |
+
prepack_efficiency = analytics_data.get('prepack_efficiency', 0)
|
| 151 |
+
st.metric("Prepack Efficiency", f"{prepack_efficiency:.1f}%",
|
| 152 |
+
delta=f"{prepack_efficiency-95:.1f}%" if prepack_efficiency != 95 else None)
|
| 153 |
+
|
| 154 |
+
with col2:
|
| 155 |
+
dependency_violations = analytics_data.get('dependency_violations', 0)
|
| 156 |
+
st.metric("Dependency Violations", f"{dependency_violations}",
|
| 157 |
+
delta=f"-{dependency_violations}" if dependency_violations > 0 else None)
|
| 158 |
+
|
| 159 |
+
with col3:
|
| 160 |
+
avg_lead_time = analytics_data.get('avg_lead_time', 0)
|
| 161 |
+
st.metric("Avg Lead Time", f"{avg_lead_time:.1f} days")
|
| 162 |
+
|
| 163 |
+
with col4:
|
| 164 |
+
hierarchy_cost_efficiency = analytics_data.get('cost_efficiency', 0)
|
| 165 |
+
st.metric("Cost Efficiency", f"β¬{hierarchy_cost_efficiency:.2f}/unit")
|
| 166 |
+
|
| 167 |
+
# Dependency flow chart
|
| 168 |
+
st.subheader("π Dependency Network Analysis")
|
| 169 |
+
fig_network = create_dependency_network_chart(analytics_data)
|
| 170 |
+
st.plotly_chart(fig_network, use_container_width=True)
|
| 171 |
+
|
| 172 |
+
# Production heatmap
|
| 173 |
+
st.subheader("π₯ Hierarchy Production Heatmap")
|
| 174 |
+
heatmap_fig = create_hierarchy_heatmap(results)
|
| 175 |
+
st.plotly_chart(heatmap_fig, use_container_width=True)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# Removed display_enhanced_line_utilization function - utilization concept removed
|
| 180 |
+
|
| 181 |
+
def display_production_sequence_analysis(results):
|
| 182 |
+
"""Analyze production sequence and timing"""
|
| 183 |
+
st.subheader("π― Production Sequence Analysis")
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
if not sequence_data:
|
| 187 |
+
st.warning("No sequence data available")
|
| 188 |
+
return
|
| 189 |
+
|
| 190 |
+
# Sequence adherence metrics
|
| 191 |
+
col1, col2, col3 = st.columns(3)
|
| 192 |
+
|
| 193 |
+
with col1:
|
| 194 |
+
sequence_score = sequence_data.get('sequence_adherence_score', 0)
|
| 195 |
+
st.metric("Sequence Adherence", f"{sequence_score:.1f}%",
|
| 196 |
+
help="How well production follows optimal hierarchy sequence")
|
| 197 |
+
|
| 198 |
+
with col2:
|
| 199 |
+
early_productions = sequence_data.get('early_productions', 0)
|
| 200 |
+
st.metric("Early Productions", f"{early_productions}",
|
| 201 |
+
help="Products produced before their dependencies")
|
| 202 |
+
|
| 203 |
+
with col3:
|
| 204 |
+
optimal_sequences = sequence_data.get('optimal_sequences', 0)
|
| 205 |
+
st.metric("Optimal Sequences", f"{optimal_sequences}%",
|
| 206 |
+
help="Percentage of products following optimal sequence")
|
| 207 |
+
|
| 208 |
+
# Sequence violation chart
|
| 209 |
+
if sequence_data.get('violations'):
|
| 210 |
+
st.subheader("β οΈ Sequence Violations")
|
| 211 |
+
violations_df = pd.DataFrame(sequence_data['violations'])
|
| 212 |
+
|
| 213 |
+
fig = px.scatter(violations_df,
|
| 214 |
+
x='production_day', y='dependency_day',
|
| 215 |
+
color='severity', size='impact',
|
| 216 |
+
hover_data=['product', 'dependency'],
|
| 217 |
+
title='Production vs Dependency Timing (Violations in Red)',
|
| 218 |
+
labels={'production_day': 'When Product Was Made',
|
| 219 |
+
'dependency_day': 'When Dependency Was Made'})
|
| 220 |
+
|
| 221 |
+
# Add diagonal line (should be above this line)
|
| 222 |
+
max_day = max(violations_df['production_day'].max(), violations_df['dependency_day'].max())
|
| 223 |
+
fig.add_shape(type="line", x0=0, y0=0, x1=max_day, y1=max_day,
|
| 224 |
+
line=dict(dash="dash", color="gray"),
|
| 225 |
+
name="Ideal Sequence Line")
|
| 226 |
+
|
| 227 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 228 |
+
|
| 229 |
+
# Sequence optimization suggestions
|
| 230 |
+
st.subheader("π‘ Optimization Suggestions")
|
| 231 |
+
suggestions = generate_sequence_suggestions(sequence_data)
|
| 232 |
+
for suggestion in suggestions:
|
| 233 |
+
st.info(f"π‘ {suggestion}")
|
| 234 |
+
|
| 235 |
+
# Helper Functions
|
| 236 |
+
|
| 237 |
+
def prepare_hierarchy_flow_data(results):
|
| 238 |
+
"""Prepare data for hierarchy flow visualization"""
|
| 239 |
+
flow_data = []
|
| 240 |
+
|
| 241 |
+
for row in results['run_schedule']:
|
| 242 |
+
product = row['product']
|
| 243 |
+
level = KIT_LEVELS.get(product, KitLevel.MASTER)
|
| 244 |
+
level_name = KitLevel.get_name(level)
|
| 245 |
+
|
| 246 |
+
flow_data.append({
|
| 247 |
+
'product': product,
|
| 248 |
+
'level': level,
|
| 249 |
+
'level_name': level_name,
|
| 250 |
+
'day': row['day'],
|
| 251 |
+
'shift': row['shift'],
|
| 252 |
+
'line_type': row['line_type_id'],
|
| 253 |
+
'line_idx': row['line_idx'],
|
| 254 |
+
'hours': row['run_hours'],
|
| 255 |
+
'units': row['units'],
|
| 256 |
+
'dependencies': KIT_DEPENDENCIES.get(product, [])
|
| 257 |
+
})
|
| 258 |
+
|
| 259 |
+
return flow_data
|
| 260 |
+
|
| 261 |
+
def create_hierarchy_timeline(flow_data):
|
| 262 |
+
"""Create timeline showing hierarchy production sequence"""
|
| 263 |
+
if not flow_data:
|
| 264 |
+
return go.Figure()
|
| 265 |
+
|
| 266 |
+
# Prepare timeline data with proper datetime conversion
|
| 267 |
+
timeline_data = []
|
| 268 |
+
|
| 269 |
+
from datetime import datetime, timedelta
|
| 270 |
+
base_date = datetime(2025, 1, 1) # Base date for timeline
|
| 271 |
+
|
| 272 |
+
for row in flow_data:
|
| 273 |
+
shift_name = ShiftType.get_name(row['shift'])
|
| 274 |
+
line_name = LineType.get_name(row['line_type'])
|
| 275 |
+
|
| 276 |
+
# Create start and end times for the production run
|
| 277 |
+
start_date = base_date + timedelta(days=row['day']-1)
|
| 278 |
+
end_date = start_date + timedelta(hours=row['hours'])
|
| 279 |
+
|
| 280 |
+
timeline_data.append({
|
| 281 |
+
'Product': row['product'],
|
| 282 |
+
'Level': row['level_name'].title(),
|
| 283 |
+
'Start': start_date,
|
| 284 |
+
'End': end_date,
|
| 285 |
+
'Day': f"Day {row['day']}",
|
| 286 |
+
'Shift': shift_name,
|
| 287 |
+
'Line': f"{line_name} {row['line_idx']}",
|
| 288 |
+
'Units': row['units'],
|
| 289 |
+
'Hours': row['hours'],
|
| 290 |
+
'Priority': row['level'] # For sorting
|
| 291 |
+
})
|
| 292 |
+
|
| 293 |
+
df = pd.DataFrame(timeline_data)
|
| 294 |
+
|
| 295 |
+
if df.empty:
|
| 296 |
+
return go.Figure()
|
| 297 |
+
|
| 298 |
+
# Create timeline chart with proper datetime columns
|
| 299 |
+
fig = px.timeline(df,
|
| 300 |
+
x_start='Start', x_end='End',
|
| 301 |
+
y='Line',
|
| 302 |
+
color='Level',
|
| 303 |
+
hover_data=['Product', 'Units', 'Hours', 'Shift', 'Day'],
|
| 304 |
+
title='Production Timeline by Hierarchy Level',
|
| 305 |
+
color_discrete_map={
|
| 306 |
+
'Prepack': '#90EE90',
|
| 307 |
+
'Subkit': '#FFD700',
|
| 308 |
+
'Master': '#FF6347'
|
| 309 |
+
})
|
| 310 |
+
|
| 311 |
+
fig.update_layout(
|
| 312 |
+
height=500,
|
| 313 |
+
xaxis_title='Production Timeline',
|
| 314 |
+
yaxis_title='Production Line'
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
return fig
|
| 318 |
+
|
| 319 |
+
def prepare_hierarchy_analytics_data(results):
|
| 320 |
+
"""Prepare analytics data for hierarchy performance"""
|
| 321 |
+
analytics = {
|
| 322 |
+
'prepack_efficiency': 0,
|
| 323 |
+
'dependency_violations': 0,
|
| 324 |
+
'avg_lead_time': 0,
|
| 325 |
+
'cost_efficiency': 0,
|
| 326 |
+
'violations': [],
|
| 327 |
+
'dependencies': KIT_DEPENDENCIES
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
# Calculate metrics
|
| 331 |
+
total_cost = results.get('objective', 0)
|
| 332 |
+
total_units = sum(results.get('weekly_production', {}).values())
|
| 333 |
+
|
| 334 |
+
if total_units > 0:
|
| 335 |
+
analytics['cost_efficiency'] = total_cost / total_units
|
| 336 |
+
|
| 337 |
+
# Analyze dependency violations
|
| 338 |
+
production_times = {}
|
| 339 |
+
for row in results['run_schedule']:
|
| 340 |
+
product = row['product']
|
| 341 |
+
day = row['day']
|
| 342 |
+
if product not in production_times or day < production_times[product]:
|
| 343 |
+
production_times[product] = day
|
| 344 |
+
|
| 345 |
+
violations = 0
|
| 346 |
+
violation_details = []
|
| 347 |
+
|
| 348 |
+
for product, prod_day in production_times.items():
|
| 349 |
+
dependencies = KIT_DEPENDENCIES.get(product, [])
|
| 350 |
+
for dep in dependencies:
|
| 351 |
+
if dep in production_times:
|
| 352 |
+
dep_day = production_times[dep]
|
| 353 |
+
if dep_day > prod_day: # Dependency produced after product
|
| 354 |
+
violations += 1
|
| 355 |
+
violation_details.append({
|
| 356 |
+
'product': product,
|
| 357 |
+
'dependency': dep,
|
| 358 |
+
'production_day': prod_day,
|
| 359 |
+
'dependency_day': dep_day,
|
| 360 |
+
'severity': 'high' if dep_day - prod_day > 1 else 'medium',
|
| 361 |
+
'impact': abs(dep_day - prod_day)
|
| 362 |
+
})
|
| 363 |
+
|
| 364 |
+
analytics['dependency_violations'] = violations
|
| 365 |
+
analytics['violations'] = violation_details
|
| 366 |
+
|
| 367 |
+
return analytics
|
| 368 |
+
|
| 369 |
+
# Removed calculate_hierarchy_line_utilization and create_utilization_gauge functions
|
| 370 |
+
# - utilization concept removed from dashboard
|
| 371 |
+
|
| 372 |
+
def create_hierarchy_heatmap(results):
|
| 373 |
+
"""Create heatmap showing hierarchy production by line and day"""
|
| 374 |
+
# Prepare heatmap data
|
| 375 |
+
heatmap_data = []
|
| 376 |
+
|
| 377 |
+
for row in results['run_schedule']:
|
| 378 |
+
product = row['product']
|
| 379 |
+
level_name = KitLevel.get_name(KIT_LEVELS.get(product, KitLevel.MASTER))
|
| 380 |
+
line_name = f"{LineType.get_name(row['line_type_id'])} {row['line_idx']}"
|
| 381 |
+
|
| 382 |
+
heatmap_data.append({
|
| 383 |
+
'Line': line_name,
|
| 384 |
+
'Day': f"Day {row['day']}",
|
| 385 |
+
'Level': level_name,
|
| 386 |
+
'Units': row['units'],
|
| 387 |
+
'Hours': row['run_hours']
|
| 388 |
+
})
|
| 389 |
+
|
| 390 |
+
if not heatmap_data:
|
| 391 |
+
return go.Figure()
|
| 392 |
+
|
| 393 |
+
df = pd.DataFrame(heatmap_data)
|
| 394 |
+
|
| 395 |
+
# Pivot for heatmap
|
| 396 |
+
pivot_df = df.pivot_table(
|
| 397 |
+
values='Units',
|
| 398 |
+
index='Line',
|
| 399 |
+
columns='Day',
|
| 400 |
+
aggfunc='sum',
|
| 401 |
+
fill_value=0
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
fig = px.imshow(pivot_df.values,
|
| 405 |
+
x=pivot_df.columns,
|
| 406 |
+
y=pivot_df.index,
|
| 407 |
+
color_continuous_scale='Blues',
|
| 408 |
+
title='Production Volume Heatmap (Units per Day)',
|
| 409 |
+
labels=dict(x="Day", y="Production Line", color="Units"))
|
| 410 |
+
|
| 411 |
+
return fig
|
| 412 |
+
|
| 413 |
+
def create_dependency_network_chart(analytics_data):
|
| 414 |
+
"""Create network chart showing dependency relationships"""
|
| 415 |
+
dependencies = analytics_data.get('dependencies', {})
|
| 416 |
+
|
| 417 |
+
if not dependencies or not NETWORKX_AVAILABLE:
|
| 418 |
+
return go.Figure().add_annotation(
|
| 419 |
+
text="Dependency network visualization requires 'networkx' package. Install with: pip install networkx" if not NETWORKX_AVAILABLE else "No dependency relationships to display",
|
| 420 |
+
xref="paper", yref="paper",
|
| 421 |
+
x=0.5, y=0.5, showarrow=False
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
# Create network graph
|
| 425 |
+
G = nx.DiGraph()
|
| 426 |
+
|
| 427 |
+
# Add nodes and edges
|
| 428 |
+
for product, deps in dependencies.items():
|
| 429 |
+
if product and deps: # Only if product has dependencies
|
| 430 |
+
G.add_node(product)
|
| 431 |
+
for dep in deps:
|
| 432 |
+
if dep: # Only if dependency exists
|
| 433 |
+
G.add_node(dep)
|
| 434 |
+
G.add_edge(dep, product) # Dependency -> Product
|
| 435 |
+
|
| 436 |
+
if len(G.nodes()) == 0:
|
| 437 |
+
return go.Figure().add_annotation(
|
| 438 |
+
text="No dependency relationships to display",
|
| 439 |
+
xref="paper", yref="paper",
|
| 440 |
+
x=0.5, y=0.5, showarrow=False
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
# Calculate layout
|
| 444 |
+
pos = nx.spring_layout(G, k=3, iterations=50)
|
| 445 |
+
|
| 446 |
+
# Create edge traces
|
| 447 |
+
edge_x = []
|
| 448 |
+
edge_y = []
|
| 449 |
+
for edge in G.edges():
|
| 450 |
+
x0, y0 = pos[edge[0]]
|
| 451 |
+
x1, y1 = pos[edge[1]]
|
| 452 |
+
edge_x.extend([x0, x1, None])
|
| 453 |
+
edge_y.extend([y0, y1, None])
|
| 454 |
+
|
| 455 |
+
edge_trace = go.Scatter(x=edge_x, y=edge_y,
|
| 456 |
+
line=dict(width=0.5, color='#888'),
|
| 457 |
+
hoverinfo='none',
|
| 458 |
+
mode='lines')
|
| 459 |
+
|
| 460 |
+
# Create node traces
|
| 461 |
+
node_x = []
|
| 462 |
+
node_y = []
|
| 463 |
+
node_text = []
|
| 464 |
+
node_color = []
|
| 465 |
+
|
| 466 |
+
for node in G.nodes():
|
| 467 |
+
x, y = pos[node]
|
| 468 |
+
node_x.append(x)
|
| 469 |
+
node_y.append(y)
|
| 470 |
+
node_text.append(node)
|
| 471 |
+
|
| 472 |
+
# Color by hierarchy level
|
| 473 |
+
level = KIT_LEVELS.get(node, KitLevel.MASTER)
|
| 474 |
+
if level == KitLevel.PREPACK:
|
| 475 |
+
node_color.append('#90EE90')
|
| 476 |
+
elif level == KitLevel.SUBKIT:
|
| 477 |
+
node_color.append('#FFD700')
|
| 478 |
+
else:
|
| 479 |
+
node_color.append('#FF6347')
|
| 480 |
+
|
| 481 |
+
node_trace = go.Scatter(x=node_x, y=node_y,
|
| 482 |
+
mode='markers+text',
|
| 483 |
+
text=node_text,
|
| 484 |
+
textposition='middle center',
|
| 485 |
+
marker=dict(size=20, color=node_color, line=dict(width=2, color='black')),
|
| 486 |
+
hoverinfo='text',
|
| 487 |
+
hovertext=node_text)
|
| 488 |
+
|
| 489 |
+
fig = go.Figure(data=[edge_trace, node_trace],
|
| 490 |
+
layout=go.Layout(
|
| 491 |
+
title='Kit Dependency Network',
|
| 492 |
+
titlefont_size=16,
|
| 493 |
+
showlegend=False,
|
| 494 |
+
hovermode='closest',
|
| 495 |
+
margin=dict(b=20,l=5,r=5,t=40),
|
| 496 |
+
annotations=[ dict(
|
| 497 |
+
text="Green=Prepack, Gold=Subkit, Red=Master",
|
| 498 |
+
showarrow=False,
|
| 499 |
+
xref="paper", yref="paper",
|
| 500 |
+
x=0.005, y=-0.002,
|
| 501 |
+
xanchor='left', yanchor='bottom',
|
| 502 |
+
font=dict(size=12)
|
| 503 |
+
)],
|
| 504 |
+
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
|
| 505 |
+
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
|
| 506 |
+
|
| 507 |
+
return fig
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def generate_sequence_suggestions(sequence_data):
|
| 514 |
+
"""Generate optimization suggestions based on sequence analysis"""
|
| 515 |
+
suggestions = []
|
| 516 |
+
|
| 517 |
+
adherence = sequence_data.get('sequence_adherence_score', 0)
|
| 518 |
+
violations = sequence_data.get('early_productions', 0)
|
| 519 |
+
|
| 520 |
+
if adherence < 80:
|
| 521 |
+
suggestions.append(
|
| 522 |
+
"Consider adjusting production sequence to better follow hierarchy dependencies. "
|
| 523 |
+
"Current adherence is below optimal (80%)."
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
if violations > 0:
|
| 527 |
+
suggestions.append(
|
| 528 |
+
f"Found {violations} dependency violations. Review production scheduling to ensure "
|
| 529 |
+
"prepacks are produced before subkits, and subkits before masters."
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
if adherence >= 95:
|
| 533 |
+
suggestions.append(
|
| 534 |
+
"Excellent sequence adherence! Production is following optimal hierarchy flow."
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
if not suggestions:
|
| 538 |
+
suggestions.append("Production sequence analysis complete. No major issues detected.")
|
| 539 |
+
|
| 540 |
+
return suggestions
|
| 541 |
+
|
| 542 |
+
def get_hierarchy_level_summary(flow_data):
|
| 543 |
+
"""Get summary statistics for each hierarchy level"""
|
| 544 |
+
summary = {}
|
| 545 |
+
|
| 546 |
+
for level_name in ['prepack', 'subkit', 'master']:
|
| 547 |
+
level_products = [row for row in flow_data if row['level_name'] == level_name]
|
| 548 |
+
|
| 549 |
+
summary[level_name] = {
|
| 550 |
+
'count': len(set(row['product'] for row in level_products)),
|
| 551 |
+
'total_units': sum(row['units'] for row in level_products),
|
| 552 |
+
'total_hours': sum(row['hours'] for row in level_products)
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
return summary
|
src/visualization/kit_relationships.py
ADDED
|
@@ -0,0 +1,630 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Kit Relationship Visualization
|
| 3 |
+
Shows the actual dependency relationships between kits in production
|
| 4 |
+
based on kit_hierarchy.json data
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import streamlit as st
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import plotly.express as px
|
| 10 |
+
import plotly.graph_objects as go
|
| 11 |
+
from plotly.subplots import make_subplots
|
| 12 |
+
import json
|
| 13 |
+
import sys
|
| 14 |
+
sys.path.append('src')
|
| 15 |
+
|
| 16 |
+
from config.constants import ShiftType, LineType, KitLevel
|
| 17 |
+
|
| 18 |
+
# Optional networkx for advanced network layouts
|
| 19 |
+
try:
|
| 20 |
+
import networkx as nx
|
| 21 |
+
NETWORKX_AVAILABLE = True
|
| 22 |
+
except ImportError:
|
| 23 |
+
NETWORKX_AVAILABLE = False
|
| 24 |
+
nx = None
|
| 25 |
+
|
| 26 |
+
def load_kit_hierarchy():
|
| 27 |
+
"""Load kit hierarchy data from JSON file"""
|
| 28 |
+
try:
|
| 29 |
+
with open('data/hierarchy_exports/kit_hierarchy.json', 'r') as f:
|
| 30 |
+
return json.load(f)
|
| 31 |
+
except FileNotFoundError:
|
| 32 |
+
st.error("Kit hierarchy file not found. Please ensure kit_hierarchy.json exists in data/hierarchy_exports/")
|
| 33 |
+
return {}
|
| 34 |
+
except json.JSONDecodeError:
|
| 35 |
+
st.error("Invalid kit hierarchy JSON format")
|
| 36 |
+
return {}
|
| 37 |
+
|
| 38 |
+
def display_kit_relationships_dashboard(results):
|
| 39 |
+
"""Main dashboard showing kit relationships in production"""
|
| 40 |
+
st.header("π Kit Relationship Dashboard")
|
| 41 |
+
st.markdown("Visualizing dependencies between kits being produced")
|
| 42 |
+
st.markdown("---")
|
| 43 |
+
|
| 44 |
+
# Load hierarchy data
|
| 45 |
+
hierarchy_data = load_kit_hierarchy()
|
| 46 |
+
|
| 47 |
+
if not hierarchy_data:
|
| 48 |
+
st.warning("No kit hierarchy data available")
|
| 49 |
+
return
|
| 50 |
+
|
| 51 |
+
# Get produced kits from results
|
| 52 |
+
produced_kits = set()
|
| 53 |
+
if 'weekly_production' in results:
|
| 54 |
+
produced_kits = set(results['weekly_production'].keys())
|
| 55 |
+
elif 'run_schedule' in results:
|
| 56 |
+
produced_kits = set(row['product'] for row in results['run_schedule'])
|
| 57 |
+
|
| 58 |
+
if not produced_kits:
|
| 59 |
+
st.warning("No production data available")
|
| 60 |
+
return
|
| 61 |
+
|
| 62 |
+
# Create tabs for different relationship views
|
| 63 |
+
tab1, tab2, tab3, tab4 = st.tabs([
|
| 64 |
+
"π Dependency Network",
|
| 65 |
+
"π Relationship Matrix",
|
| 66 |
+
"π― Production Flow",
|
| 67 |
+
"β οΈ Dependency Analysis"
|
| 68 |
+
])
|
| 69 |
+
|
| 70 |
+
with tab1:
|
| 71 |
+
display_dependency_network(hierarchy_data, produced_kits, results)
|
| 72 |
+
|
| 73 |
+
with tab2:
|
| 74 |
+
display_relationship_matrix(hierarchy_data, produced_kits, results)
|
| 75 |
+
|
| 76 |
+
with tab3:
|
| 77 |
+
display_production_flow_relationships(hierarchy_data, produced_kits, results)
|
| 78 |
+
|
| 79 |
+
with tab4:
|
| 80 |
+
display_dependency_analysis(hierarchy_data, produced_kits, results)
|
| 81 |
+
|
| 82 |
+
def display_dependency_network(hierarchy_data, produced_kits, results):
|
| 83 |
+
"""Show interactive network graph of kit dependencies"""
|
| 84 |
+
st.subheader("π Kit Dependency Network")
|
| 85 |
+
st.markdown("Interactive graph showing which kits depend on other kits")
|
| 86 |
+
|
| 87 |
+
# Build relationship data for produced kits only
|
| 88 |
+
relationships = build_relationship_data(hierarchy_data, produced_kits)
|
| 89 |
+
|
| 90 |
+
if not relationships:
|
| 91 |
+
st.info("No dependency relationships found between produced kits")
|
| 92 |
+
return
|
| 93 |
+
|
| 94 |
+
# Get production timing data
|
| 95 |
+
production_timing = get_production_timing(results)
|
| 96 |
+
|
| 97 |
+
# Create network visualization
|
| 98 |
+
col1, col2 = st.columns([3, 1])
|
| 99 |
+
|
| 100 |
+
with col1:
|
| 101 |
+
if NETWORKX_AVAILABLE:
|
| 102 |
+
fig = create_interactive_network_graph(relationships, production_timing)
|
| 103 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 104 |
+
else:
|
| 105 |
+
fig = create_simple_dependency_chart(relationships, production_timing)
|
| 106 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 107 |
+
st.info("π‘ Install networkx for advanced network layouts: `pip install networkx`")
|
| 108 |
+
|
| 109 |
+
with col2:
|
| 110 |
+
# Network statistics
|
| 111 |
+
st.subheader("π Network Stats")
|
| 112 |
+
|
| 113 |
+
all_kits = set()
|
| 114 |
+
for rel in relationships:
|
| 115 |
+
all_kits.add(rel['source'])
|
| 116 |
+
all_kits.add(rel['target'])
|
| 117 |
+
|
| 118 |
+
st.metric("Total Kits", len(all_kits))
|
| 119 |
+
st.metric("Dependencies", len(relationships))
|
| 120 |
+
|
| 121 |
+
# Dependency depth analysis
|
| 122 |
+
max_depth = calculate_dependency_depth(relationships)
|
| 123 |
+
st.metric("Max Dependency Depth", max_depth)
|
| 124 |
+
|
| 125 |
+
# Most dependent kits
|
| 126 |
+
dependent_kits = get_most_dependent_kits(relationships)
|
| 127 |
+
st.subheader("π Most Dependencies")
|
| 128 |
+
for kit, count in dependent_kits[:5]:
|
| 129 |
+
st.write(f"**{kit}**: {count} dependencies")
|
| 130 |
+
|
| 131 |
+
def display_relationship_matrix(hierarchy_data, produced_kits, results):
|
| 132 |
+
"""Show dependency matrix heatmap"""
|
| 133 |
+
st.subheader("π Kit Dependency Matrix")
|
| 134 |
+
st.markdown("Heatmap showing which kits (rows) depend on which other kits (columns)")
|
| 135 |
+
|
| 136 |
+
# Build dependency matrix
|
| 137 |
+
matrix_data = build_dependency_matrix(hierarchy_data, produced_kits)
|
| 138 |
+
|
| 139 |
+
if matrix_data.empty:
|
| 140 |
+
st.info("No dependency relationships to visualize in matrix form")
|
| 141 |
+
return
|
| 142 |
+
|
| 143 |
+
# Create heatmap
|
| 144 |
+
fig = px.imshow(matrix_data.values,
|
| 145 |
+
x=matrix_data.columns,
|
| 146 |
+
y=matrix_data.index,
|
| 147 |
+
color_continuous_scale='Blues',
|
| 148 |
+
title='Kit Dependency Matrix (1 = depends on, 0 = no dependency)',
|
| 149 |
+
labels=dict(x="Dependency (what is needed)",
|
| 150 |
+
y="Kit (what depends on others)",
|
| 151 |
+
color="Dependency"))
|
| 152 |
+
|
| 153 |
+
fig.update_layout(height=600)
|
| 154 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 155 |
+
|
| 156 |
+
# Show matrix as table
|
| 157 |
+
with st.expander("π View Dependency Matrix as Table"):
|
| 158 |
+
st.dataframe(matrix_data, use_container_width=True)
|
| 159 |
+
|
| 160 |
+
def display_production_flow_relationships(hierarchy_data, produced_kits, results):
|
| 161 |
+
"""Show how relationships affect production timing"""
|
| 162 |
+
st.subheader("π― Production Flow with Relationships")
|
| 163 |
+
st.markdown("Timeline showing when dependent kits are produced")
|
| 164 |
+
|
| 165 |
+
# Get production timing and relationships
|
| 166 |
+
production_timing = get_production_timing(results)
|
| 167 |
+
relationships = build_relationship_data(hierarchy_data, produced_kits)
|
| 168 |
+
|
| 169 |
+
if not production_timing or not relationships:
|
| 170 |
+
st.info("Insufficient data for production flow analysis")
|
| 171 |
+
return
|
| 172 |
+
|
| 173 |
+
# Create timeline with dependency arrows
|
| 174 |
+
fig = create_production_timeline_with_dependencies(production_timing, relationships)
|
| 175 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 176 |
+
|
| 177 |
+
# Timing analysis table
|
| 178 |
+
st.subheader("β° Dependency Timing Analysis")
|
| 179 |
+
timing_analysis = analyze_dependency_timing(production_timing, relationships)
|
| 180 |
+
|
| 181 |
+
if timing_analysis:
|
| 182 |
+
df = pd.DataFrame(timing_analysis)
|
| 183 |
+
st.dataframe(df, use_container_width=True)
|
| 184 |
+
|
| 185 |
+
def display_dependency_analysis(hierarchy_data, produced_kits, results):
|
| 186 |
+
"""Analyze dependency fulfillment and violations"""
|
| 187 |
+
st.subheader("β οΈ Dependency Analysis & Violations")
|
| 188 |
+
|
| 189 |
+
production_timing = get_production_timing(results)
|
| 190 |
+
relationships = build_relationship_data(hierarchy_data, produced_kits)
|
| 191 |
+
|
| 192 |
+
# Analyze violations
|
| 193 |
+
violations = find_dependency_violations(production_timing, relationships)
|
| 194 |
+
|
| 195 |
+
# Summary metrics
|
| 196 |
+
col1, col2, col3, col4 = st.columns(4)
|
| 197 |
+
|
| 198 |
+
with col1:
|
| 199 |
+
total_deps = len(relationships)
|
| 200 |
+
st.metric("Total Dependencies", total_deps)
|
| 201 |
+
|
| 202 |
+
with col2:
|
| 203 |
+
violated_deps = len(violations)
|
| 204 |
+
st.metric("Violations", violated_deps,
|
| 205 |
+
delta=f"-{violated_deps}" if violated_deps > 0 else None)
|
| 206 |
+
|
| 207 |
+
with col3:
|
| 208 |
+
if total_deps > 0:
|
| 209 |
+
success_rate = ((total_deps - violated_deps) / total_deps) * 100
|
| 210 |
+
st.metric("Success Rate", f"{success_rate:.1f}%")
|
| 211 |
+
else:
|
| 212 |
+
st.metric("Success Rate", "N/A")
|
| 213 |
+
|
| 214 |
+
with col4:
|
| 215 |
+
if violations:
|
| 216 |
+
avg_violation = sum(v['days_early'] for v in violations) / len(violations)
|
| 217 |
+
st.metric("Avg Days Early", f"{avg_violation:.1f}")
|
| 218 |
+
else:
|
| 219 |
+
st.metric("Avg Days Early", "0")
|
| 220 |
+
|
| 221 |
+
# Violation details
|
| 222 |
+
if violations:
|
| 223 |
+
st.subheader("π¨ Dependency Violations")
|
| 224 |
+
st.markdown("Cases where kits were produced before their dependencies")
|
| 225 |
+
|
| 226 |
+
violation_df = pd.DataFrame(violations)
|
| 227 |
+
|
| 228 |
+
# Violation severity chart
|
| 229 |
+
fig = px.scatter(violation_df,
|
| 230 |
+
x='dependency_day', y='kit_day',
|
| 231 |
+
size='days_early', color='severity',
|
| 232 |
+
hover_data=['kit', 'dependency'],
|
| 233 |
+
title='Dependency Violations (Below diagonal = violation)',
|
| 234 |
+
labels={'dependency_day': 'When Dependency Was Made',
|
| 235 |
+
'kit_day': 'When Kit Was Made'})
|
| 236 |
+
|
| 237 |
+
# Add diagonal line showing ideal timing
|
| 238 |
+
max_day = max(violation_df['dependency_day'].max(), violation_df['kit_day'].max())
|
| 239 |
+
fig.add_shape(type="line", x0=0, y0=0, x1=max_day, y1=max_day,
|
| 240 |
+
line=dict(dash="dash", color="green"),
|
| 241 |
+
name="Ideal Timeline")
|
| 242 |
+
|
| 243 |
+
st.plotly_chart(fig, use_container_width=True)
|
| 244 |
+
|
| 245 |
+
# Detailed violation table
|
| 246 |
+
st.dataframe(violation_df[['kit', 'dependency', 'kit_day', 'dependency_day',
|
| 247 |
+
'days_early', 'severity']], use_container_width=True)
|
| 248 |
+
else:
|
| 249 |
+
st.success("π No dependency violations found! All kits produced in correct order.")
|
| 250 |
+
|
| 251 |
+
# Recommendations
|
| 252 |
+
st.subheader("π‘ Recommendations")
|
| 253 |
+
recommendations = generate_dependency_recommendations(violations, relationships, production_timing)
|
| 254 |
+
for rec in recommendations:
|
| 255 |
+
st.info(f"π‘ {rec}")
|
| 256 |
+
|
| 257 |
+
# Helper Functions
|
| 258 |
+
|
| 259 |
+
def build_relationship_data(hierarchy_data, produced_kits):
|
| 260 |
+
"""Build relationship data for visualization"""
|
| 261 |
+
relationships = []
|
| 262 |
+
|
| 263 |
+
for kit_id, kit_info in hierarchy_data.items():
|
| 264 |
+
if kit_id not in produced_kits:
|
| 265 |
+
continue
|
| 266 |
+
|
| 267 |
+
# Add direct dependencies
|
| 268 |
+
dependencies = kit_info.get('dependencies', [])
|
| 269 |
+
for dep in dependencies:
|
| 270 |
+
if dep in produced_kits: # Only show relationships between produced kits
|
| 271 |
+
relationships.append({
|
| 272 |
+
'source': dep, # Dependency (what's needed)
|
| 273 |
+
'target': kit_id, # Kit that depends on it
|
| 274 |
+
'type': 'direct',
|
| 275 |
+
'source_type': hierarchy_data.get(dep, {}).get('type', 'unknown'),
|
| 276 |
+
'target_type': kit_info.get('type', 'unknown')
|
| 277 |
+
})
|
| 278 |
+
|
| 279 |
+
return relationships
|
| 280 |
+
|
| 281 |
+
def build_dependency_matrix(hierarchy_data, produced_kits):
|
| 282 |
+
"""Build dependency matrix for heatmap"""
|
| 283 |
+
produced_list = sorted(list(produced_kits))
|
| 284 |
+
|
| 285 |
+
if len(produced_list) == 0:
|
| 286 |
+
return pd.DataFrame()
|
| 287 |
+
|
| 288 |
+
# Initialize matrix
|
| 289 |
+
matrix = pd.DataFrame(0, index=produced_list, columns=produced_list)
|
| 290 |
+
|
| 291 |
+
# Fill matrix with dependencies
|
| 292 |
+
for kit_id in produced_list:
|
| 293 |
+
kit_info = hierarchy_data.get(kit_id, {})
|
| 294 |
+
dependencies = kit_info.get('dependencies', [])
|
| 295 |
+
|
| 296 |
+
for dep in dependencies:
|
| 297 |
+
if dep in produced_list:
|
| 298 |
+
matrix.loc[kit_id, dep] = 1 # kit_id depends on dep
|
| 299 |
+
|
| 300 |
+
return matrix
|
| 301 |
+
|
| 302 |
+
def get_production_timing(results):
|
| 303 |
+
"""Extract production timing for each kit"""
|
| 304 |
+
timing = {}
|
| 305 |
+
|
| 306 |
+
if 'run_schedule' in results:
|
| 307 |
+
for run in results['run_schedule']:
|
| 308 |
+
kit = run['product']
|
| 309 |
+
day = run['day']
|
| 310 |
+
|
| 311 |
+
# Use earliest day if kit is produced multiple times
|
| 312 |
+
if kit not in timing or day < timing[kit]:
|
| 313 |
+
timing[kit] = day
|
| 314 |
+
|
| 315 |
+
return timing
|
| 316 |
+
|
| 317 |
+
def create_interactive_network_graph(relationships, production_timing):
|
| 318 |
+
"""Create interactive network graph using NetworkX layout"""
|
| 319 |
+
if not NETWORKX_AVAILABLE:
|
| 320 |
+
return create_simple_dependency_chart(relationships, production_timing)
|
| 321 |
+
|
| 322 |
+
# Create NetworkX graph
|
| 323 |
+
G = nx.DiGraph()
|
| 324 |
+
|
| 325 |
+
# Add edges (relationships)
|
| 326 |
+
for rel in relationships:
|
| 327 |
+
G.add_edge(rel['source'], rel['target'], type=rel['type'])
|
| 328 |
+
|
| 329 |
+
if len(G.nodes()) == 0:
|
| 330 |
+
return go.Figure().add_annotation(
|
| 331 |
+
text="No relationships to display",
|
| 332 |
+
xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
# Calculate layout
|
| 336 |
+
pos = nx.spring_layout(G, k=3, iterations=50)
|
| 337 |
+
|
| 338 |
+
# Create edge traces
|
| 339 |
+
edge_x, edge_y = [], []
|
| 340 |
+
edge_info = []
|
| 341 |
+
|
| 342 |
+
for edge in G.edges():
|
| 343 |
+
source, target = edge
|
| 344 |
+
x0, y0 = pos[source]
|
| 345 |
+
x1, y1 = pos[target]
|
| 346 |
+
|
| 347 |
+
edge_x.extend([x0, x1, None])
|
| 348 |
+
edge_y.extend([y0, y1, None])
|
| 349 |
+
|
| 350 |
+
# Add arrow annotation
|
| 351 |
+
edge_info.append({
|
| 352 |
+
'x': (x0 + x1) / 2,
|
| 353 |
+
'y': (y0 + y1) / 2,
|
| 354 |
+
'text': 'β',
|
| 355 |
+
'source': source,
|
| 356 |
+
'target': target
|
| 357 |
+
})
|
| 358 |
+
|
| 359 |
+
edge_trace = go.Scatter(x=edge_x, y=edge_y,
|
| 360 |
+
line=dict(width=2, color='#888'),
|
| 361 |
+
hoverinfo='none',
|
| 362 |
+
mode='lines')
|
| 363 |
+
|
| 364 |
+
# Create node traces
|
| 365 |
+
node_x, node_y, node_text, node_color, node_size = [], [], [], [], []
|
| 366 |
+
node_info = []
|
| 367 |
+
|
| 368 |
+
for node in G.nodes():
|
| 369 |
+
x, y = pos[node]
|
| 370 |
+
node_x.append(x)
|
| 371 |
+
node_y.append(y)
|
| 372 |
+
|
| 373 |
+
# Node size based on number of connections
|
| 374 |
+
in_degree = G.in_degree(node)
|
| 375 |
+
out_degree = G.out_degree(node)
|
| 376 |
+
total_degree = in_degree + out_degree
|
| 377 |
+
node_size.append(20 + total_degree * 5)
|
| 378 |
+
|
| 379 |
+
# Color by production timing
|
| 380 |
+
prod_day = production_timing.get(node, 0)
|
| 381 |
+
if prod_day == 1:
|
| 382 |
+
node_color.append('#90EE90') # Light green for early
|
| 383 |
+
elif prod_day <= 3:
|
| 384 |
+
node_color.append('#FFD700') # Gold for middle
|
| 385 |
+
else:
|
| 386 |
+
node_color.append('#FF6347') # Tomato for late
|
| 387 |
+
|
| 388 |
+
# Node text and info
|
| 389 |
+
short_name = node[:12] + "..." if len(node) > 12 else node
|
| 390 |
+
node_text.append(short_name)
|
| 391 |
+
|
| 392 |
+
node_info.append(f"{node}<br>Day: {prod_day}<br>In: {in_degree}, Out: {out_degree}")
|
| 393 |
+
|
| 394 |
+
node_trace = go.Scatter(x=node_x, y=node_y,
|
| 395 |
+
mode='markers+text',
|
| 396 |
+
text=node_text,
|
| 397 |
+
textposition='middle center',
|
| 398 |
+
hovertext=node_info,
|
| 399 |
+
hoverinfo='text',
|
| 400 |
+
marker=dict(size=node_size,
|
| 401 |
+
color=node_color,
|
| 402 |
+
line=dict(width=2, color='black')))
|
| 403 |
+
|
| 404 |
+
# Create figure
|
| 405 |
+
fig = go.Figure(data=[edge_trace, node_trace],
|
| 406 |
+
layout=go.Layout(
|
| 407 |
+
title='Kit Dependency Network (Size=Connections, Color=Production Day)',
|
| 408 |
+
showlegend=False,
|
| 409 |
+
hovermode='closest',
|
| 410 |
+
margin=dict(b=20,l=5,r=5,t=40),
|
| 411 |
+
annotations=[
|
| 412 |
+
dict(text="Green=Early, Gold=Middle, Red=Late production",
|
| 413 |
+
showarrow=False,
|
| 414 |
+
xref="paper", yref="paper",
|
| 415 |
+
x=0.005, y=-0.002,
|
| 416 |
+
xanchor='left', yanchor='bottom',
|
| 417 |
+
font=dict(size=12))
|
| 418 |
+
],
|
| 419 |
+
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
|
| 420 |
+
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
|
| 421 |
+
|
| 422 |
+
return fig
|
| 423 |
+
|
| 424 |
+
def create_simple_dependency_chart(relationships, production_timing):
|
| 425 |
+
"""Create simple dependency chart without NetworkX"""
|
| 426 |
+
if not relationships:
|
| 427 |
+
return go.Figure().add_annotation(
|
| 428 |
+
text="No dependencies to display",
|
| 429 |
+
xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
# Create a simple directed graph visualization
|
| 433 |
+
# Group kits by their role (sources, targets)
|
| 434 |
+
sources = set(rel['source'] for rel in relationships)
|
| 435 |
+
targets = set(rel['target'] for rel in relationships)
|
| 436 |
+
|
| 437 |
+
# Create positions
|
| 438 |
+
all_kits = list(sources | targets)
|
| 439 |
+
positions = {kit: (i, production_timing.get(kit, 0)) for i, kit in enumerate(all_kits)}
|
| 440 |
+
|
| 441 |
+
# Create traces
|
| 442 |
+
edge_x, edge_y = [], []
|
| 443 |
+
for rel in relationships:
|
| 444 |
+
source_pos = positions[rel['source']]
|
| 445 |
+
target_pos = positions[rel['target']]
|
| 446 |
+
|
| 447 |
+
edge_x.extend([source_pos[0], target_pos[0], None])
|
| 448 |
+
edge_y.extend([source_pos[1], target_pos[1], None])
|
| 449 |
+
|
| 450 |
+
# Edge trace
|
| 451 |
+
edge_trace = go.Scatter(x=edge_x, y=edge_y,
|
| 452 |
+
line=dict(width=2, color='#888'),
|
| 453 |
+
hoverinfo='none',
|
| 454 |
+
mode='lines')
|
| 455 |
+
|
| 456 |
+
# Node trace
|
| 457 |
+
node_x = [positions[kit][0] for kit in all_kits]
|
| 458 |
+
node_y = [positions[kit][1] for kit in all_kits]
|
| 459 |
+
node_text = [kit[:10] + "..." if len(kit) > 10 else kit for kit in all_kits]
|
| 460 |
+
|
| 461 |
+
node_trace = go.Scatter(x=node_x, y=node_y,
|
| 462 |
+
mode='markers+text',
|
| 463 |
+
text=node_text,
|
| 464 |
+
textposition='top center',
|
| 465 |
+
marker=dict(size=15, color='lightblue',
|
| 466 |
+
line=dict(width=2, color='black')),
|
| 467 |
+
hovertext=all_kits,
|
| 468 |
+
hoverinfo='text')
|
| 469 |
+
|
| 470 |
+
fig = go.Figure(data=[edge_trace, node_trace],
|
| 471 |
+
layout=go.Layout(
|
| 472 |
+
title='Kit Dependencies (Y-axis = Production Day)',
|
| 473 |
+
showlegend=False,
|
| 474 |
+
xaxis=dict(title='Kits'),
|
| 475 |
+
yaxis=dict(title='Production Day')))
|
| 476 |
+
|
| 477 |
+
return fig
|
| 478 |
+
|
| 479 |
+
def create_production_timeline_with_dependencies(production_timing, relationships):
|
| 480 |
+
"""Create timeline showing production order with dependency arrows"""
|
| 481 |
+
if not production_timing:
|
| 482 |
+
return go.Figure()
|
| 483 |
+
|
| 484 |
+
# Prepare data
|
| 485 |
+
timeline_data = []
|
| 486 |
+
for kit, day in production_timing.items():
|
| 487 |
+
timeline_data.append({
|
| 488 |
+
'Kit': kit,
|
| 489 |
+
'Day': day,
|
| 490 |
+
'Short_Name': kit[:15] + "..." if len(kit) > 15 else kit
|
| 491 |
+
})
|
| 492 |
+
|
| 493 |
+
df = pd.DataFrame(timeline_data)
|
| 494 |
+
|
| 495 |
+
# Create scatter plot
|
| 496 |
+
fig = px.scatter(df, x='Day', y='Kit',
|
| 497 |
+
hover_data=['Kit'],
|
| 498 |
+
title='Production Timeline with Dependencies')
|
| 499 |
+
|
| 500 |
+
# Add dependency arrows
|
| 501 |
+
for rel in relationships:
|
| 502 |
+
source_day = production_timing.get(rel['source'], 0)
|
| 503 |
+
target_day = production_timing.get(rel['target'], 0)
|
| 504 |
+
|
| 505 |
+
# Add arrow if both kits are in timeline
|
| 506 |
+
if source_day > 0 and target_day > 0:
|
| 507 |
+
fig.add_annotation(
|
| 508 |
+
x=target_day, y=rel['target'],
|
| 509 |
+
ax=source_day, ay=rel['source'],
|
| 510 |
+
arrowhead=2, arrowsize=1, arrowwidth=2,
|
| 511 |
+
arrowcolor="red" if source_day > target_day else "green"
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
fig.update_layout(height=max(400, len(df) * 20))
|
| 515 |
+
return fig
|
| 516 |
+
|
| 517 |
+
def calculate_dependency_depth(relationships):
|
| 518 |
+
"""Calculate maximum dependency depth"""
|
| 519 |
+
if not NETWORKX_AVAILABLE or not relationships:
|
| 520 |
+
return 0
|
| 521 |
+
|
| 522 |
+
G = nx.DiGraph()
|
| 523 |
+
for rel in relationships:
|
| 524 |
+
G.add_edge(rel['source'], rel['target'])
|
| 525 |
+
|
| 526 |
+
try:
|
| 527 |
+
return nx.dag_longest_path_length(G)
|
| 528 |
+
except:
|
| 529 |
+
return 0
|
| 530 |
+
|
| 531 |
+
def get_most_dependent_kits(relationships):
|
| 532 |
+
"""Get kits with most dependencies"""
|
| 533 |
+
dependency_counts = {}
|
| 534 |
+
|
| 535 |
+
for rel in relationships:
|
| 536 |
+
target = rel['target']
|
| 537 |
+
dependency_counts[target] = dependency_counts.get(target, 0) + 1
|
| 538 |
+
|
| 539 |
+
return sorted(dependency_counts.items(), key=lambda x: x[1], reverse=True)
|
| 540 |
+
|
| 541 |
+
def find_dependency_violations(production_timing, relationships):
|
| 542 |
+
"""Find cases where kits were produced before their dependencies"""
|
| 543 |
+
violations = []
|
| 544 |
+
|
| 545 |
+
for rel in relationships:
|
| 546 |
+
source = rel['source'] # dependency
|
| 547 |
+
target = rel['target'] # kit that depends on it
|
| 548 |
+
|
| 549 |
+
source_day = production_timing.get(source, 0)
|
| 550 |
+
target_day = production_timing.get(target, 0)
|
| 551 |
+
|
| 552 |
+
if source_day > 0 and target_day > 0 and source_day > target_day:
|
| 553 |
+
days_early = source_day - target_day
|
| 554 |
+
severity = 'high' if days_early > 2 else 'medium' if days_early > 1 else 'low'
|
| 555 |
+
|
| 556 |
+
violations.append({
|
| 557 |
+
'kit': target,
|
| 558 |
+
'dependency': source,
|
| 559 |
+
'kit_day': target_day,
|
| 560 |
+
'dependency_day': source_day,
|
| 561 |
+
'days_early': days_early,
|
| 562 |
+
'severity': severity
|
| 563 |
+
})
|
| 564 |
+
|
| 565 |
+
return violations
|
| 566 |
+
|
| 567 |
+
def analyze_dependency_timing(production_timing, relationships):
|
| 568 |
+
"""Analyze timing of all dependency relationships"""
|
| 569 |
+
timing_analysis = []
|
| 570 |
+
|
| 571 |
+
for rel in relationships:
|
| 572 |
+
source = rel['source']
|
| 573 |
+
target = rel['target']
|
| 574 |
+
|
| 575 |
+
source_day = production_timing.get(source, 0)
|
| 576 |
+
target_day = production_timing.get(target, 0)
|
| 577 |
+
|
| 578 |
+
if source_day > 0 and target_day > 0:
|
| 579 |
+
timing_diff = target_day - source_day
|
| 580 |
+
status = "β
Correct" if timing_diff >= 0 else "β Violation"
|
| 581 |
+
|
| 582 |
+
timing_analysis.append({
|
| 583 |
+
'Kit': target[:20] + "..." if len(target) > 20 else target,
|
| 584 |
+
'Dependency': source[:20] + "..." if len(source) > 20 else source,
|
| 585 |
+
'Kit Day': target_day,
|
| 586 |
+
'Dep Day': source_day,
|
| 587 |
+
'Gap (Days)': timing_diff,
|
| 588 |
+
'Status': status
|
| 589 |
+
})
|
| 590 |
+
|
| 591 |
+
return sorted(timing_analysis, key=lambda x: x['Gap (Days)'])
|
| 592 |
+
|
| 593 |
+
def generate_dependency_recommendations(violations, relationships, production_timing):
|
| 594 |
+
"""Generate recommendations based on dependency analysis"""
|
| 595 |
+
recommendations = []
|
| 596 |
+
|
| 597 |
+
if not violations:
|
| 598 |
+
recommendations.append("Excellent! All dependencies are being fulfilled in the correct order.")
|
| 599 |
+
return recommendations
|
| 600 |
+
|
| 601 |
+
# Group violations by severity
|
| 602 |
+
high_severity = [v for v in violations if v['severity'] == 'high']
|
| 603 |
+
medium_severity = [v for v in violations if v['severity'] == 'medium']
|
| 604 |
+
|
| 605 |
+
if high_severity:
|
| 606 |
+
recommendations.append(
|
| 607 |
+
f"π¨ High Priority: {len(high_severity)} critical dependency violations found. "
|
| 608 |
+
"Consider rescheduling production to ensure dependencies are produced first."
|
| 609 |
+
)
|
| 610 |
+
|
| 611 |
+
if medium_severity:
|
| 612 |
+
recommendations.append(
|
| 613 |
+
f"β οΈ Medium Priority: {len(medium_severity)} moderate dependency timing issues. "
|
| 614 |
+
"Review production sequence for optimization opportunities."
|
| 615 |
+
)
|
| 616 |
+
|
| 617 |
+
# Most problematic kits
|
| 618 |
+
problem_kits = {}
|
| 619 |
+
for v in violations:
|
| 620 |
+
kit = v['kit']
|
| 621 |
+
problem_kits[kit] = problem_kits.get(kit, 0) + 1
|
| 622 |
+
|
| 623 |
+
if problem_kits:
|
| 624 |
+
worst_kit = max(problem_kits.items(), key=lambda x: x[1])
|
| 625 |
+
recommendations.append(
|
| 626 |
+
f"π― Focus Area: Kit {worst_kit[0]} has {worst_kit[1]} dependency issues. "
|
| 627 |
+
"Consider moving its production later in the schedule."
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
return recommendations
|
test_hierarchy_viz.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test script for hierarchy visualization
|
| 4 |
+
Run this to see a demo of the hierarchy dashboard
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
sys.path.append('src')
|
| 10 |
+
|
| 11 |
+
# Sample data for testing
|
| 12 |
+
def create_sample_results():
|
| 13 |
+
"""Create sample optimization results for testing"""
|
| 14 |
+
|
| 15 |
+
# Sample schedule data showing hierarchy flow
|
| 16 |
+
sample_schedule = [
|
| 17 |
+
# Day 1: Prepacks first
|
| 18 |
+
{'day': 1, 'line_type_id': 6, 'line_idx': 1, 'shift': 1, 'product': 'PREPACK_A', 'run_hours': 4.0, 'units': 200},
|
| 19 |
+
{'day': 1, 'line_type_id': 7, 'line_idx': 1, 'shift': 1, 'product': 'PREPACK_B', 'run_hours': 3.5, 'units': 150},
|
| 20 |
+
|
| 21 |
+
# Day 2: Subkits using prepacks
|
| 22 |
+
{'day': 2, 'line_type_id': 6, 'line_idx': 1, 'shift': 1, 'product': 'SUBKIT_X', 'run_hours': 5.0, 'units': 100},
|
| 23 |
+
{'day': 2, 'line_type_id': 7, 'line_idx': 2, 'shift': 2, 'product': 'SUBKIT_Y', 'run_hours': 4.5, 'units': 80},
|
| 24 |
+
|
| 25 |
+
# Day 3: Master kits using subkits
|
| 26 |
+
{'day': 3, 'line_type_id': 6, 'line_idx': 2, 'shift': 1, 'product': 'MASTER_FINAL', 'run_hours': 6.0, 'units': 50},
|
| 27 |
+
{'day': 3, 'line_type_id': 7, 'line_idx': 1, 'shift': 3, 'product': 'MASTER_DELUXE', 'run_hours': 5.5, 'units': 40},
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
# Sample hierarchy data
|
| 31 |
+
sample_kit_levels = {
|
| 32 |
+
'PREPACK_A': 0, # Prepack
|
| 33 |
+
'PREPACK_B': 0, # Prepack
|
| 34 |
+
'SUBKIT_X': 1, # Subkit
|
| 35 |
+
'SUBKIT_Y': 1, # Subkit
|
| 36 |
+
'MASTER_FINAL': 2, # Master
|
| 37 |
+
'MASTER_DELUXE': 2, # Master
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
# Sample dependencies
|
| 41 |
+
sample_dependencies = {
|
| 42 |
+
'SUBKIT_X': ['PREPACK_A'],
|
| 43 |
+
'SUBKIT_Y': ['PREPACK_B'],
|
| 44 |
+
'MASTER_FINAL': ['SUBKIT_X', 'PREPACK_A'],
|
| 45 |
+
'MASTER_DELUXE': ['SUBKIT_Y', 'PREPACK_B'],
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
# Sample production totals
|
| 49 |
+
weekly_production = {
|
| 50 |
+
'PREPACK_A': 200,
|
| 51 |
+
'PREPACK_B': 150,
|
| 52 |
+
'SUBKIT_X': 100,
|
| 53 |
+
'SUBKIT_Y': 80,
|
| 54 |
+
'MASTER_FINAL': 50,
|
| 55 |
+
'MASTER_DELUXE': 40,
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
# Sample workforce data
|
| 59 |
+
person_hours_by_day = [
|
| 60 |
+
{'day': 1, 'emp_type': 'UNICEF Fixed term', 'used_person_hours': 16, 'cap_person_hours': 64},
|
| 61 |
+
{'day': 1, 'emp_type': 'Humanizer', 'used_person_hours': 40, 'cap_person_hours': 80},
|
| 62 |
+
{'day': 2, 'emp_type': 'UNICEF Fixed term', 'used_person_hours': 20, 'cap_person_hours': 64},
|
| 63 |
+
{'day': 2, 'emp_type': 'Humanizer', 'used_person_hours': 45, 'cap_person_hours': 80},
|
| 64 |
+
{'day': 3, 'emp_type': 'UNICEF Fixed term', 'used_person_hours': 18, 'cap_person_hours': 64},
|
| 65 |
+
{'day': 3, 'emp_type': 'Humanizer', 'used_person_hours': 42, 'cap_person_hours': 80},
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
return {
|
| 69 |
+
'objective': 12500.75, # Total cost
|
| 70 |
+
'run_schedule': sample_schedule,
|
| 71 |
+
'weekly_production': weekly_production,
|
| 72 |
+
'person_hours_by_day': person_hours_by_day,
|
| 73 |
+
'kit_levels': sample_kit_levels,
|
| 74 |
+
'kit_dependencies': sample_dependencies
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
def test_hierarchy_flow():
|
| 78 |
+
"""Test the hierarchy flow visualization components"""
|
| 79 |
+
print("π§ͺ Testing Hierarchy Visualization Components")
|
| 80 |
+
print("=" * 50)
|
| 81 |
+
|
| 82 |
+
# Create sample data
|
| 83 |
+
results = create_sample_results()
|
| 84 |
+
print(f"β
Created sample results with {len(results['run_schedule'])} production runs")
|
| 85 |
+
|
| 86 |
+
try:
|
| 87 |
+
# Test imports
|
| 88 |
+
from src.visualization.hierarchy_dashboard import (
|
| 89 |
+
prepare_hierarchy_flow_data,
|
| 90 |
+
prepare_hierarchy_analytics_data,
|
| 91 |
+
calculate_hierarchy_line_utilization,
|
| 92 |
+
get_hierarchy_level_summary
|
| 93 |
+
)
|
| 94 |
+
print("β
Successfully imported hierarchy dashboard functions")
|
| 95 |
+
|
| 96 |
+
# Test flow data preparation
|
| 97 |
+
flow_data = prepare_hierarchy_flow_data(results)
|
| 98 |
+
print(f"β
Prepared flow data: {len(flow_data)} flow records")
|
| 99 |
+
|
| 100 |
+
# Test analytics data
|
| 101 |
+
analytics = prepare_hierarchy_analytics_data(results)
|
| 102 |
+
print(f"β
Prepared analytics data: {analytics['dependency_violations']} violations detected")
|
| 103 |
+
|
| 104 |
+
# Test line utilization calculation
|
| 105 |
+
line_util = calculate_hierarchy_line_utilization(results)
|
| 106 |
+
print(f"β
Calculated line utilization for {len(line_util)} lines")
|
| 107 |
+
|
| 108 |
+
# Test hierarchy summary
|
| 109 |
+
summary = get_hierarchy_level_summary(flow_data)
|
| 110 |
+
print("β
Generated hierarchy level summary:")
|
| 111 |
+
for level, data in summary.items():
|
| 112 |
+
print(f" - {level.title()}: {data['count']} products, {data['total_units']} units")
|
| 113 |
+
|
| 114 |
+
print("\nπ All hierarchy visualization components working correctly!")
|
| 115 |
+
print("\nTo see the full visualization:")
|
| 116 |
+
print("1. Run your Streamlit app: streamlit run app.py")
|
| 117 |
+
print("2. Go to Settings page and run optimization")
|
| 118 |
+
print("3. Check the 'π Hierarchy Flow' tab in results")
|
| 119 |
+
|
| 120 |
+
return True
|
| 121 |
+
|
| 122 |
+
except Exception as e:
|
| 123 |
+
print(f"β Error testing hierarchy visualization: {e}")
|
| 124 |
+
import traceback
|
| 125 |
+
traceback.print_exc()
|
| 126 |
+
return False
|
| 127 |
+
|
| 128 |
+
def display_sample_hierarchy_info():
|
| 129 |
+
"""Display information about the sample hierarchy"""
|
| 130 |
+
print("\nπ Sample Hierarchy Structure:")
|
| 131 |
+
print("=" * 30)
|
| 132 |
+
|
| 133 |
+
print("π’ PREPACKS (Level 0):")
|
| 134 |
+
print(" - PREPACK_A: Basic components")
|
| 135 |
+
print(" - PREPACK_B: Basic components")
|
| 136 |
+
|
| 137 |
+
print("\nπ‘ SUBKITS (Level 1):")
|
| 138 |
+
print(" - SUBKIT_X: Uses PREPACK_A")
|
| 139 |
+
print(" - SUBKIT_Y: Uses PREPACK_B")
|
| 140 |
+
|
| 141 |
+
print("\nπ΄ MASTERS (Level 2):")
|
| 142 |
+
print(" - MASTER_FINAL: Uses SUBKIT_X + PREPACK_A")
|
| 143 |
+
print(" - MASTER_DELUXE: Uses SUBKIT_Y + PREPACK_B")
|
| 144 |
+
|
| 145 |
+
print("\nπ
Production Flow:")
|
| 146 |
+
print(" Day 1: Produce prepacks first (dependencies)")
|
| 147 |
+
print(" Day 2: Produce subkits (using prepacks)")
|
| 148 |
+
print(" Day 3: Produce masters (using subkits)")
|
| 149 |
+
|
| 150 |
+
print("\nThis demonstrates the optimal hierarchy flow!")
|
| 151 |
+
|
| 152 |
+
if __name__ == "__main__":
|
| 153 |
+
print("π Hierarchy Visualization Test")
|
| 154 |
+
print("=" * 40)
|
| 155 |
+
|
| 156 |
+
# Display sample info
|
| 157 |
+
display_sample_hierarchy_info()
|
| 158 |
+
|
| 159 |
+
# Test the components
|
| 160 |
+
success = test_hierarchy_flow()
|
| 161 |
+
|
| 162 |
+
if success:
|
| 163 |
+
print(f"\nβ
Test completed successfully!")
|
| 164 |
+
else:
|
| 165 |
+
print(f"\nβ Test failed - check error messages above")
|
| 166 |
+
|
| 167 |
+
print("\n" + "=" * 40)
|
test_kit_relationships.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test script for kit relationships visualization
|
| 4 |
+
Tests the actual kit dependency relationships from kit_hierarchy.json
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
sys.path.append('src')
|
| 10 |
+
|
| 11 |
+
def test_kit_relationships():
|
| 12 |
+
"""Test kit relationships visualization"""
|
| 13 |
+
print("π Testing Kit Relationships Visualization")
|
| 14 |
+
print("=" * 50)
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
# Test importing the kit relationships module
|
| 18 |
+
from src.visualization.kit_relationships import (
|
| 19 |
+
load_kit_hierarchy,
|
| 20 |
+
build_relationship_data,
|
| 21 |
+
get_production_timing,
|
| 22 |
+
find_dependency_violations
|
| 23 |
+
)
|
| 24 |
+
print("β
Successfully imported kit relationships module")
|
| 25 |
+
|
| 26 |
+
# Test loading hierarchy data
|
| 27 |
+
hierarchy_data = load_kit_hierarchy()
|
| 28 |
+
if hierarchy_data:
|
| 29 |
+
print(f"β
Loaded kit hierarchy: {len(hierarchy_data)} master kits")
|
| 30 |
+
|
| 31 |
+
# Show some example relationships
|
| 32 |
+
print("\nπ Sample Kit Relationships:")
|
| 33 |
+
count = 0
|
| 34 |
+
for kit_id, kit_info in hierarchy_data.items():
|
| 35 |
+
if kit_info.get('dependencies') and count < 5:
|
| 36 |
+
deps = kit_info['dependencies']
|
| 37 |
+
kit_name = kit_info.get('name', kit_id)[:50] + "..." if len(kit_info.get('name', '')) > 50 else kit_info.get('name', kit_id)
|
| 38 |
+
print(f" β’ {kit_id} ({kit_name})")
|
| 39 |
+
print(f" Depends on: {deps}")
|
| 40 |
+
count += 1
|
| 41 |
+
|
| 42 |
+
# Test with sample production data
|
| 43 |
+
sample_produced_kits = set(list(hierarchy_data.keys())[:10]) # First 10 kits
|
| 44 |
+
print(f"\nπ§ͺ Testing with {len(sample_produced_kits)} sample produced kits")
|
| 45 |
+
|
| 46 |
+
relationships = build_relationship_data(hierarchy_data, sample_produced_kits)
|
| 47 |
+
print(f"β
Found {len(relationships)} dependency relationships")
|
| 48 |
+
|
| 49 |
+
if relationships:
|
| 50 |
+
print("\nπ Sample Relationships:")
|
| 51 |
+
for i, rel in enumerate(relationships[:5]):
|
| 52 |
+
print(f" {i+1}. {rel['source']} β {rel['target']} ({rel['source_type']} β {rel['target_type']})")
|
| 53 |
+
|
| 54 |
+
# Test production timing analysis
|
| 55 |
+
sample_timing = {kit: i % 5 + 1 for i, kit in enumerate(sample_produced_kits)} # Random days 1-5
|
| 56 |
+
violations = find_dependency_violations(sample_timing, relationships)
|
| 57 |
+
print(f"β
Dependency analysis: {len(violations)} violations found")
|
| 58 |
+
|
| 59 |
+
print("\nπ Kit relationships visualization components working!")
|
| 60 |
+
return True
|
| 61 |
+
|
| 62 |
+
else:
|
| 63 |
+
print("β οΈ No kit hierarchy data found - please check kit_hierarchy.json")
|
| 64 |
+
return False
|
| 65 |
+
|
| 66 |
+
except FileNotFoundError:
|
| 67 |
+
print("β Kit hierarchy file not found at data/hierarchy_exports/kit_hierarchy.json")
|
| 68 |
+
return False
|
| 69 |
+
except Exception as e:
|
| 70 |
+
print(f"β Error testing kit relationships: {e}")
|
| 71 |
+
import traceback
|
| 72 |
+
traceback.print_exc()
|
| 73 |
+
return False
|
| 74 |
+
|
| 75 |
+
def display_hierarchy_structure():
|
| 76 |
+
"""Display the structure of the hierarchy data"""
|
| 77 |
+
print("\nπ Kit Hierarchy Structure Analysis")
|
| 78 |
+
print("=" * 40)
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
from src.visualization.kit_relationships import load_kit_hierarchy
|
| 82 |
+
hierarchy_data = load_kit_hierarchy()
|
| 83 |
+
|
| 84 |
+
if not hierarchy_data:
|
| 85 |
+
print("No hierarchy data available")
|
| 86 |
+
return
|
| 87 |
+
|
| 88 |
+
# Analyze hierarchy structure
|
| 89 |
+
masters = []
|
| 90 |
+
subkits = []
|
| 91 |
+
prepacks = []
|
| 92 |
+
|
| 93 |
+
total_dependencies = 0
|
| 94 |
+
|
| 95 |
+
for kit_id, kit_info in hierarchy_data.items():
|
| 96 |
+
kit_type = kit_info.get('type', 'unknown')
|
| 97 |
+
dependencies = kit_info.get('dependencies', [])
|
| 98 |
+
total_dependencies += len(dependencies)
|
| 99 |
+
|
| 100 |
+
if kit_type == 'master':
|
| 101 |
+
masters.append(kit_id)
|
| 102 |
+
elif kit_type == 'subkit':
|
| 103 |
+
subkits.append(kit_id)
|
| 104 |
+
elif kit_type == 'prepack':
|
| 105 |
+
prepacks.append(kit_id)
|
| 106 |
+
|
| 107 |
+
print(f"π¦ Total Kits: {len(hierarchy_data)}")
|
| 108 |
+
print(f" β’ Masters: {len(masters)}")
|
| 109 |
+
print(f" β’ Subkits: {len(subkits)}")
|
| 110 |
+
print(f" β’ Prepacks: {len(prepacks)}")
|
| 111 |
+
print(f"π Total Dependencies: {total_dependencies}")
|
| 112 |
+
|
| 113 |
+
# Find most complex kit (most dependencies)
|
| 114 |
+
max_deps = 0
|
| 115 |
+
most_complex = None
|
| 116 |
+
|
| 117 |
+
for kit_id, kit_info in hierarchy_data.items():
|
| 118 |
+
deps = len(kit_info.get('dependencies', []))
|
| 119 |
+
if deps > max_deps:
|
| 120 |
+
max_deps = deps
|
| 121 |
+
most_complex = kit_id
|
| 122 |
+
|
| 123 |
+
if most_complex:
|
| 124 |
+
print(f"π Most Complex Kit: {most_complex} ({max_deps} dependencies)")
|
| 125 |
+
|
| 126 |
+
# Show dependency chains
|
| 127 |
+
print(f"\nπ Sample Dependency Chains:")
|
| 128 |
+
chains_shown = 0
|
| 129 |
+
for kit_id, kit_info in hierarchy_data.items():
|
| 130 |
+
if kit_info.get('dependencies') and chains_shown < 3:
|
| 131 |
+
deps = kit_info['dependencies']
|
| 132 |
+
kit_name = kit_info.get('name', kit_id)[:40] + "..." if len(kit_info.get('name', '')) > 40 else kit_info.get('name', kit_id)
|
| 133 |
+
print(f" Chain {chains_shown + 1}: {kit_name}")
|
| 134 |
+
for dep in deps[:3]: # Show first 3 dependencies
|
| 135 |
+
dep_info = hierarchy_data.get(dep, {})
|
| 136 |
+
dep_name = dep_info.get('name', dep)[:30] + "..." if len(dep_info.get('name', '')) > 30 else dep_info.get('name', dep)
|
| 137 |
+
print(f" β³ Needs: {dep_name}")
|
| 138 |
+
chains_shown += 1
|
| 139 |
+
|
| 140 |
+
print(f"\nThis data will be visualized in the dashboard! π¨")
|
| 141 |
+
|
| 142 |
+
except Exception as e:
|
| 143 |
+
print(f"Error analyzing hierarchy: {e}")
|
| 144 |
+
|
| 145 |
+
if __name__ == "__main__":
|
| 146 |
+
# Display hierarchy structure
|
| 147 |
+
display_hierarchy_structure()
|
| 148 |
+
|
| 149 |
+
# Test kit relationships
|
| 150 |
+
success = test_kit_relationships()
|
| 151 |
+
|
| 152 |
+
if success:
|
| 153 |
+
print(f"\nβ
Kit relationships test completed successfully!")
|
| 154 |
+
print(f"\nTo see the visualization:")
|
| 155 |
+
print(f"1. Run: streamlit run app.py")
|
| 156 |
+
print(f"2. Go to Settings β Run Optimization")
|
| 157 |
+
print(f"3. Check 'Hierarchy Flow' β 'Kit Relationships' tab")
|
| 158 |
+
print(f"4. See the interactive network graph! πΈοΈ")
|
| 159 |
+
else:
|
| 160 |
+
print(f"\nβ Test failed - check error messages above")
|
| 161 |
+
|
| 162 |
+
print(f"\n" + "=" * 50)
|