HaLim commited on
Commit
709359a
Β·
1 Parent(s): e542954

demand filtering / visualization done.

Browse files
app.py CHANGED
@@ -45,12 +45,15 @@ elif page == "πŸ“Š Optimization Results":
45
 
46
  # Check if we have results in session state
47
  if 'optimization_results' in st.session_state and st.session_state.optimization_results:
 
48
  display_optimization_results(st.session_state.optimization_results)
 
49
  else:
50
  st.title("πŸ“Š Optimization Results")
51
  st.info("πŸ”„ No optimization results available yet.")
52
  st.markdown("Please run an optimization from the **βš™οΈ Settings** page first to see results here.")
53
 
 
54
  # Add helpful instructions
55
  st.markdown("### πŸ“‹ How to Get Results:")
56
  st.markdown("1. Go to **βš™οΈ Settings** page")
 
45
 
46
  # Check if we have results in session state
47
  if 'optimization_results' in st.session_state and st.session_state.optimization_results:
48
+
49
  display_optimization_results(st.session_state.optimization_results)
50
+
51
  else:
52
  st.title("πŸ“Š Optimization Results")
53
  st.info("πŸ”„ No optimization results available yet.")
54
  st.markdown("Please run an optimization from the **βš™οΈ Settings** page first to see results here.")
55
 
56
+
57
  # Add helpful instructions
58
  st.markdown("### πŸ“‹ How to Get Results:")
59
  st.markdown("1. Go to **βš™οΈ Settings** page")
config_page.py CHANGED
@@ -18,6 +18,8 @@ def render_config_page():
18
 
19
  st.title("βš™οΈ Settings")
20
  st.markdown("---")
 
 
21
  st.markdown("Adjust the settings for your workforce optimization. These settings control how the system schedules employees and calculates costs.")
22
 
23
  # Initialize session state for all configuration values
@@ -48,8 +50,28 @@ def render_config_page():
48
  if st.button("πŸ’Ύ Save Settings", type="primary", use_container_width=True):
49
  config = save_configuration()
50
  st.success("βœ… Settings saved successfully!")
51
- # Trigger demand validation after saving settings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  st.session_state.show_validation_after_save = True
 
 
 
 
53
 
54
  # Display settings summary at full width (outside columns)
55
  st.markdown("---")
@@ -58,27 +80,133 @@ def render_config_page():
58
  display_user_friendly_summary(st.session_state.optimization_config)
59
 
60
  # Show demand validation after saving settings
61
- if st.session_state.get('show_validation_after_save', False):
62
  st.markdown("---")
63
  st.header("πŸ“‹ Data Validation Results")
64
  st.markdown("Analyzing your demand data to identify potential optimization issues...")
65
 
66
- try:
67
- from src.demand_validation_viz import display_demand_validation
68
- display_demand_validation()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  # Show validation reminder before optimization
71
  st.info("πŸ’‘ **Review validation results above before running optimization.** " +
72
  "Fix any critical issues (especially missing line assignments) to improve optimization success.")
73
-
74
- except Exception as e:
75
- st.error(f"❌ Error in demand validation: {str(e)}")
76
  st.info("πŸ’‘ You can still proceed with optimization, but data issues may cause problems.")
77
 
78
- # Reset the flag so validation doesn't show every time
79
- if st.button("βœ… Validation Reviewed - Continue to Optimization"):
80
- st.session_state.show_validation_after_save = False
81
- st.rerun()
 
 
 
 
 
 
82
 
83
  # Optimization section
84
  st.markdown("---")
@@ -109,6 +237,8 @@ def render_config_page():
109
  # Show status of current results
110
  if 'optimization_results' in st.session_state and st.session_state.optimization_results is not None:
111
  st.success("βœ… Results Available")
 
 
112
  else:
113
  st.info("⏳ No Results Yet")
114
 
@@ -293,7 +423,7 @@ def render_workforce_config():
293
  max_value=20,
294
  value=st.session_state.fixed_min_unicef_per_day,
295
  help="Minimum number of UNICEF Fixed term employees required every working day (constraint)"
296
- )
297
 
298
  # Working hours configuration
299
  st.subheader("⏰ Working Hours Configuration")
@@ -582,17 +712,21 @@ def save_configuration():
582
  calculated_days = (calculated_end_date - st.session_state.start_date).days + 1
583
  st.info(f"πŸ“Š Found demand data starting on {st.session_state.start_date} ending on {calculated_end_date} ({calculated_days} days, {len(demand_data)} orders)")
584
  else:
585
- calculated_end_date = st.session_state.start_date
586
- calculated_days = 1
 
587
  else:
588
- calculated_end_date = st.session_state.start_date
589
- calculated_days = 1
590
  st.warning(f"⚠️ No demand data found for start date {st.session_state.start_date}")
 
591
  except Exception as e:
592
  st.warning(f"Could not determine date range from data: {e}. Using default 5-day period.")
593
- from datetime import timedelta
594
- calculated_end_date = st.session_state.start_date + timedelta(days=4)
595
- calculated_days = 5
 
 
596
 
597
  # Store calculated values in session state for compatibility
598
  st.session_state.end_date = calculated_end_date
@@ -602,8 +736,8 @@ def save_configuration():
602
  config = {
603
  'date_range': {
604
  'start_date': st.session_state.start_date,
605
- 'end_date': calculated_end_date,
606
- 'planning_days': calculated_days,
607
  },
608
  'schedule_type': st.session_state.schedule_type,
609
  'evening_shift_mode': st.session_state.evening_shift_mode,
@@ -657,7 +791,7 @@ def save_configuration():
657
  }
658
 
659
  # Calculate date span for proper employee limits (use calculated planning_days)
660
- date_span_length = calculated_days
661
  date_span = list(range(1, date_span_length + 1))
662
 
663
  # Store individual items in session state for optimization_config.py to access
@@ -931,6 +1065,7 @@ def run_optimization():
931
  """Run the optimization model and store results"""
932
  try:
933
  st.info("πŸ”„ Running optimization... This may take a few moments.")
 
934
 
935
  # Always clear everything first for clean slate
936
  clear_all_cache_and_results()
@@ -1018,9 +1153,9 @@ def check_critical_data_issues():
1018
  warnings.append(f"{summary_stats['no_speed']} products missing production speed data")
1019
 
1020
  # Calculate failure risk
1021
- invalid_ratio = summary_stats['invalid_products'] / summary_stats['total_products']
1022
- if invalid_ratio > 0.5:
1023
- warnings.append(f"High failure risk: {invalid_ratio:.0%} of products have data issues")
1024
 
1025
  return warnings
1026
 
 
18
 
19
  st.title("βš™οΈ Settings")
20
  st.markdown("---")
21
+
22
+
23
  st.markdown("Adjust the settings for your workforce optimization. These settings control how the system schedules employees and calculates costs.")
24
 
25
  # Initialize session state for all configuration values
 
50
  if st.button("πŸ’Ύ Save Settings", type="primary", use_container_width=True):
51
  config = save_configuration()
52
  st.success("βœ… Settings saved successfully!")
53
+
54
+ # Clear any previous optimization results since settings changed
55
+ if 'optimization_results' in st.session_state:
56
+ del st.session_state.optimization_results
57
+
58
+ # Clear any previous validation state
59
+ if 'show_validation_after_save' in st.session_state:
60
+ del st.session_state.show_validation_after_save
61
+ if 'settings_just_saved' in st.session_state:
62
+ del st.session_state.settings_just_saved
63
+
64
+ # Clear any cached validation results since settings changed
65
+ validation_cache_key = f"validation_results_{st.session_state.get('start_date', 'default')}"
66
+ if validation_cache_key in st.session_state:
67
+ del st.session_state[validation_cache_key]
68
+
69
+ # Trigger fresh demand validation after saving settings
70
  st.session_state.show_validation_after_save = True
71
+ st.session_state.settings_just_saved = True
72
+
73
+ # Force a page refresh to show the updated validation
74
+ st.rerun()
75
 
76
  # Display settings summary at full width (outside columns)
77
  st.markdown("---")
 
80
  display_user_friendly_summary(st.session_state.optimization_config)
81
 
82
  # Show demand validation after saving settings
83
+ if st.session_state.get('show_validation_after_save', False) or st.session_state.get('settings_just_saved', False):
84
  st.markdown("---")
85
  st.header("πŸ“‹ Data Validation Results")
86
  st.markdown("Analyzing your demand data to identify potential optimization issues...")
87
 
88
+ # Check if we have cached validation results
89
+ validation_cache_key = f"validation_results_{st.session_state.get('start_date', 'default')}"
90
+
91
+ if validation_cache_key not in st.session_state:
92
+ # Run validation and cache results
93
+ with st.spinner("πŸ”„ Running data validation..."):
94
+ try:
95
+ from src.demand_validation_viz import DemandValidationViz
96
+
97
+ # Initialize validator and run validation
98
+ validator = DemandValidationViz()
99
+ print("validator",validator)
100
+ if validator.load_data():
101
+
102
+ validation_df = validator.validate_all_products()
103
+ print("validation_df", validation_df)
104
+ summary_stats = validator.get_summary_statistics(validation_df)
105
+ print("summary_stats", summary_stats)
106
+ # Cache the results
107
+ st.session_state[validation_cache_key] = {
108
+ 'validation_df': validation_df,
109
+ 'summary_stats': summary_stats,
110
+ 'validator': validator
111
+ }
112
+ else:
113
+ st.error("❌ Failed to load validation data")
114
+ st.session_state[validation_cache_key] = None
115
+
116
+ except Exception as e:
117
+ st.error(f"❌ Error in demand validation: {str(e)}")
118
+ st.session_state[validation_cache_key] = None
119
+
120
+ # Display cached validation results
121
+ if st.session_state.get(validation_cache_key):
122
+ # print("cached_results",st.session_state[validation_cache_key])
123
+ cached_results = st.session_state[validation_cache_key]
124
+
125
+ validation_df = cached_results['validation_df']
126
+ # print("validation_df",validation_df)
127
+ summary_stats = cached_results['summary_stats']
128
+ # print("summary_stats",summary_stats)
129
+ # Display summary statistics
130
+ st.subheader("πŸ“Š Summary Statistics")
131
+
132
+ col1, col2, col3, col4 = st.columns(4)
133
+
134
+ with col1:
135
+ st.metric("Total Products", summary_stats['total_products'])
136
+ st.metric("Included in Optimization", summary_stats['included_products'], delta="Ready for optimization")
137
+
138
+ with col2:
139
+ st.metric("Total Demand", f"{summary_stats['total_demand']:,}")
140
+ st.metric("Excluded from Optimization", summary_stats['excluded_products'], delta="Omitted")
141
+
142
+ with col3:
143
+ st.metric("Included Demand", f"{summary_stats['included_demand']:,}", delta="Will be optimized")
144
+ st.metric("UNICEF Staff Needed", summary_stats['total_unicef_needed'])
145
+
146
+ with col4:
147
+ st.metric("Excluded Demand", f"{summary_stats['excluded_demand']:,}", delta="Omitted")
148
+ st.metric("Humanizer Staff Needed", summary_stats['total_humanizer_needed'])
149
+
150
+ # Separate the results into included and excluded
151
+ included_df = validation_df[validation_df['Excluded from Optimization'] == False].copy()
152
+ excluded_df = validation_df[validation_df['Excluded from Optimization'] == True].copy()
153
+
154
+ # Products Included in Optimization
155
+ st.subheader("βœ… Products Included in Optimization")
156
+ st.write(f"**{len(included_df)} products** will be included in the optimization with total demand of **{included_df['Demand'].sum():,} units**")
157
+
158
+ if len(included_df) > 0:
159
+ # Configure column display for included
160
+ included_columns = ['Product ID', 'Demand', 'Product Type', 'Line Type', 'UNICEF Staff', 'Humanizer Staff', 'Production Speed (units/hour)', 'Validation Status']
161
+
162
+ st.dataframe(
163
+ included_df[included_columns],
164
+ use_container_width=True,
165
+ height=300
166
+ )
167
+ else:
168
+ st.warning("No products are included in optimization!")
169
+
170
+ # Products Excluded from Optimization
171
+ st.subheader("🚫 Products Excluded from Optimization")
172
+ st.write(f"**{len(excluded_df)} products** are excluded from optimization with total demand of **{excluded_df['Demand'].sum():,} units**")
173
+ st.info("These products are omitted from optimization due to missing line assignments or zero staffing requirements.")
174
+
175
+ if len(excluded_df) > 0:
176
+ # Show exclusion breakdown
177
+ exclusion_reasons = excluded_df['Exclusion Reasons'].value_counts()
178
+ st.write("**Exclusion reasons:**")
179
+ for reason, count in exclusion_reasons.items():
180
+ st.write(f"β€’ {reason}: {count} products")
181
+
182
+ # Configure column display for excluded
183
+ excluded_columns = ['Product ID', 'Demand', 'Product Type', 'Exclusion Reasons', 'UNICEF Staff', 'Humanizer Staff', 'Line Type']
184
+
185
+ st.dataframe(
186
+ excluded_df[excluded_columns],
187
+ use_container_width=True,
188
+ height=200
189
+ )
190
+ else:
191
+ st.info("No products are excluded from optimization.")
192
 
193
  # Show validation reminder before optimization
194
  st.info("πŸ’‘ **Review validation results above before running optimization.** " +
195
  "Fix any critical issues (especially missing line assignments) to improve optimization success.")
196
+ else:
197
+ st.error("❌ Validation failed to run properly")
 
198
  st.info("πŸ’‘ You can still proceed with optimization, but data issues may cause problems.")
199
 
200
+ # Reset the flags so validation doesn't show every time
201
+ col1, col2, col3 = st.columns([1, 1, 1])
202
+ with col2:
203
+ if st.button("βœ… Validation Reviewed - Continue to Optimization", use_container_width=True):
204
+ st.session_state.show_validation_after_save = False
205
+ st.session_state.settings_just_saved = False
206
+ # Clear validation cache to force fresh validation next time
207
+ if validation_cache_key in st.session_state:
208
+ del st.session_state[validation_cache_key]
209
+ st.rerun() # Refresh to hide validation section
210
 
211
  # Optimization section
212
  st.markdown("---")
 
237
  # Show status of current results
238
  if 'optimization_results' in st.session_state and st.session_state.optimization_results is not None:
239
  st.success("βœ… Results Available")
240
+ elif 'optimization_config' in st.session_state:
241
+ st.info("πŸ”„ Settings Saved - Ready to Optimize")
242
  else:
243
  st.info("⏳ No Results Yet")
244
 
 
423
  max_value=20,
424
  value=st.session_state.fixed_min_unicef_per_day,
425
  help="Minimum number of UNICEF Fixed term employees required every working day (constraint)"
426
+ )
427
 
428
  # Working hours configuration
429
  st.subheader("⏰ Working Hours Configuration")
 
712
  calculated_days = (calculated_end_date - st.session_state.start_date).days + 1
713
  st.info(f"πŸ“Š Found demand data starting on {st.session_state.start_date} ending on {calculated_end_date} ({calculated_days} days, {len(demand_data)} orders)")
714
  else:
715
+ raise Exception("No finish date found for start date")
716
+ # calculated_end_date = st.session_state.start_date
717
+ # calculated_days = 1
718
  else:
719
+ # calculated_end_date = st.session_state.start_date
720
+ # calculated_days = 1
721
  st.warning(f"⚠️ No demand data found for start date {st.session_state.start_date}")
722
+ raise Exception("No demand data found for start date")
723
  except Exception as e:
724
  st.warning(f"Could not determine date range from data: {e}. Using default 5-day period.")
725
+ raise Exception("Could not determine date range from data")
726
+
727
+ # from datetime import timedelta
728
+ # calculated_end_date = st.session_state.start_date + timedelta(days=4)
729
+ # calculated_days = 5
730
 
731
  # Store calculated values in session state for compatibility
732
  st.session_state.end_date = calculated_end_date
 
736
  config = {
737
  'date_range': {
738
  'start_date': st.session_state.start_date,
739
+ 'end_date': st.session_state.end_date,
740
+ 'planning_days': st.session_state.planning_days,
741
  },
742
  'schedule_type': st.session_state.schedule_type,
743
  'evening_shift_mode': st.session_state.evening_shift_mode,
 
791
  }
792
 
793
  # Calculate date span for proper employee limits (use calculated planning_days)
794
+ date_span_length = st.session_state.planning_days
795
  date_span = list(range(1, date_span_length + 1))
796
 
797
  # Store individual items in session state for optimization_config.py to access
 
1065
  """Run the optimization model and store results"""
1066
  try:
1067
  st.info("πŸ”„ Running optimization... This may take a few moments.")
1068
+
1069
 
1070
  # Always clear everything first for clean slate
1071
  clear_all_cache_and_results()
 
1153
  warnings.append(f"{summary_stats['no_speed']} products missing production speed data")
1154
 
1155
  # Calculate failure risk
1156
+ # invalid_ratio = summary_stats['invalid_products'] / summary_stats['total_products']
1157
+ # if invalid_ratio > 0.5:
1158
+ # warnings.append(f"High failure risk: {invalid_ratio:.0%} of products have data issues")
1159
 
1160
  return warnings
1161
 
optimization_results.py CHANGED
@@ -94,7 +94,8 @@ def display_weekly_summary(results):
94
  with col3:
95
  # Calculate fulfillment rate
96
  sys.path.append('src')
97
- from config.optimization_config import DEMAND_DICTIONARY
 
98
  total_demand = sum(DEMAND_DICTIONARY.values())
99
  fulfillment_rate = (total_production / total_demand * 100) if total_demand > 0 else 0
100
  st.metric("Fulfillment Rate", f"{fulfillment_rate:.1f}%")
@@ -109,7 +110,8 @@ def display_weekly_summary(results):
109
  # Production vs Demand Chart
110
  st.subheader("🎯 Production vs Demand")
111
 
112
- from config.optimization_config import DEMAND_DICTIONARY
 
113
  prod_demand_data = []
114
  for product, production in results['weekly_production'].items():
115
  demand = DEMAND_DICTIONARY.get(product, 0)
@@ -157,7 +159,8 @@ def display_daily_deep_dive(results):
157
  shift_name = {1: 'Regular', 2: 'Evening', 3: 'Overtime'}.get(row['shift'], f"Shift {row['shift']}")
158
 
159
  # Get team requirements for this production run
160
- from src.config.optimization_config import TEAM_REQ_PER_PRODUCT
 
161
 
162
  for emp_type in ['UNICEF Fixed term', 'Humanizer']:
163
  if row['product'] in TEAM_REQ_PER_PRODUCT.get(emp_type, {}):
@@ -263,7 +266,9 @@ def display_line_schedules(results):
263
  # Process schedule data
264
  schedule_data = []
265
  sys.path.append('src')
266
- from config.optimization_config import TEAM_REQ_PER_PRODUCT, shift_code_to_name, line_code_to_name, DEMAND_DICTIONARY
 
 
267
 
268
  # Get the mapping dictionaries
269
  shift_names = shift_code_to_name()
@@ -501,7 +506,8 @@ def display_kit_production(results):
501
  # Weekly production summary
502
  production_data = []
503
  sys.path.append('src')
504
- from config.optimization_config import DEMAND_DICTIONARY
 
505
 
506
  for product, production in results['weekly_production'].items():
507
  demand = DEMAND_DICTIONARY.get(product, 0)
@@ -536,7 +542,8 @@ def display_cost_analysis(results):
536
 
537
  # Calculate cost breakdown
538
  sys.path.append('src')
539
- from config.optimization_config import COST_LIST_PER_EMP_SHIFT, TEAM_REQ_PER_PRODUCT, shift_code_to_name, line_code_to_name
 
540
 
541
  # Get the mapping dictionaries
542
  shift_names = shift_code_to_name()
@@ -717,7 +724,8 @@ def display_input_data_inspection():
717
  shift_name = ShiftType.get_name(shift_id)
718
  st.write(f"β€’ {shift_name}: {hours} hours/shift")
719
 
720
- max_daily_hours = optimization_config.get_max_hour_per_person_per_day()
 
721
  st.write(f"β€’ Maximum daily hours per person: {max_daily_hours}")
722
 
723
  with st.expander("πŸ‘₯ **Workforce Parameters**", expanded=False):
@@ -774,7 +782,8 @@ def display_input_data_inspection():
774
 
775
  with col2:
776
  st.write("**Product-Line Matching:**")
777
- kit_line_match = optimization_config.get_kit_line_match_dict()
 
778
  st.write("*Sample mappings:*")
779
  sample_items = list(kit_line_match.items())[:10]
780
  for product, line_type in sample_items:
@@ -912,11 +921,13 @@ def display_input_data_inspection():
912
 
913
  with col1:
914
  st.write("**Schedule Mode:**")
915
- schedule_mode = optimization_config.get_daily_weekly_schedule()
 
916
  st.write(f"β€’ Planning mode: {schedule_mode}")
917
 
918
  st.write("**Evening Shift Mode:**")
919
- evening_mode = optimization_config.get_evening_shift_mode()
 
920
  evening_threshold = optimization_config.get_evening_shift_demand_threshold()
921
  st.write(f"β€’ Mode: {evening_mode}")
922
  st.write(f"β€’ Activation threshold: {evening_threshold:.1%}")
 
94
  with col3:
95
  # Calculate fulfillment rate
96
  sys.path.append('src')
97
+ from config.optimization_config import get_demand_dictionary
98
+ DEMAND_DICTIONARY = get_demand_dictionary()
99
  total_demand = sum(DEMAND_DICTIONARY.values())
100
  fulfillment_rate = (total_production / total_demand * 100) if total_demand > 0 else 0
101
  st.metric("Fulfillment Rate", f"{fulfillment_rate:.1f}%")
 
110
  # Production vs Demand Chart
111
  st.subheader("🎯 Production vs Demand")
112
 
113
+ from config.optimization_config import get_demand_dictionary
114
+ DEMAND_DICTIONARY = get_demand_dictionary()
115
  prod_demand_data = []
116
  for product, production in results['weekly_production'].items():
117
  demand = DEMAND_DICTIONARY.get(product, 0)
 
159
  shift_name = {1: 'Regular', 2: 'Evening', 3: 'Overtime'}.get(row['shift'], f"Shift {row['shift']}")
160
 
161
  # Get team requirements for this production run
162
+ from src.config.optimization_config import get_team_requirements
163
+ TEAM_REQ_PER_PRODUCT = get_team_requirements()
164
 
165
  for emp_type in ['UNICEF Fixed term', 'Humanizer']:
166
  if row['product'] in TEAM_REQ_PER_PRODUCT.get(emp_type, {}):
 
266
  # Process schedule data
267
  schedule_data = []
268
  sys.path.append('src')
269
+ from config.optimization_config import get_team_requirements, get_demand_dictionary, shift_code_to_name, line_code_to_name
270
+ TEAM_REQ_PER_PRODUCT = get_team_requirements()
271
+ DEMAND_DICTIONARY = get_demand_dictionary()
272
 
273
  # Get the mapping dictionaries
274
  shift_names = shift_code_to_name()
 
506
  # Weekly production summary
507
  production_data = []
508
  sys.path.append('src')
509
+ from config.optimization_config import get_demand_dictionary
510
+ DEMAND_DICTIONARY = get_demand_dictionary()
511
 
512
  for product, production in results['weekly_production'].items():
513
  demand = DEMAND_DICTIONARY.get(product, 0)
 
542
 
543
  # Calculate cost breakdown
544
  sys.path.append('src')
545
+ from config.optimization_config import COST_LIST_PER_EMP_SHIFT, get_team_requirements, shift_code_to_name, line_code_to_name
546
+ TEAM_REQ_PER_PRODUCT = get_team_requirements()
547
 
548
  # Get the mapping dictionaries
549
  shift_names = shift_code_to_name()
 
724
  shift_name = ShiftType.get_name(shift_id)
725
  st.write(f"β€’ {shift_name}: {hours} hours/shift")
726
 
727
+ from src.config.optimization_config import MAX_HOUR_PER_PERSON_PER_DAY
728
+ max_daily_hours = MAX_HOUR_PER_PERSON_PER_DAY
729
  st.write(f"β€’ Maximum daily hours per person: {max_daily_hours}")
730
 
731
  with st.expander("πŸ‘₯ **Workforce Parameters**", expanded=False):
 
782
 
783
  with col2:
784
  st.write("**Product-Line Matching:**")
785
+ from src.config.optimization_config import KIT_LINE_MATCH_DICT
786
+ kit_line_match = KIT_LINE_MATCH_DICT
787
  st.write("*Sample mappings:*")
788
  sample_items = list(kit_line_match.items())[:10]
789
  for product, line_type in sample_items:
 
921
 
922
  with col1:
923
  st.write("**Schedule Mode:**")
924
+ from src.config.optimization_config import DAILY_WEEKLY_SCHEDULE
925
+ schedule_mode = DAILY_WEEKLY_SCHEDULE
926
  st.write(f"β€’ Planning mode: {schedule_mode}")
927
 
928
  st.write("**Evening Shift Mode:**")
929
+ from src.config.optimization_config import EVENING_SHIFT_MODE
930
+ evening_mode = EVENING_SHIFT_MODE
931
  evening_threshold = optimization_config.get_evening_shift_demand_threshold()
932
  st.write(f"β€’ Mode: {evening_mode}")
933
  st.write(f"β€’ Activation threshold: {evening_threshold:.1%}")
src/config/optimization_config.py CHANGED
@@ -70,16 +70,32 @@ extract.set_global_dates(start_date, end_date)
70
  print(f"\nπŸ“… DATE RANGE: {start_date} to {end_date}")
71
  print(f"πŸ“ PRODUCT SOURCE: COOIS_Released_Prod_Orders.csv")
72
 
73
- # Get filtered products ready for optimization
74
- try:
75
- from src.demand_filtering import get_filtered_product_list
76
- PRODUCT_LIST = get_filtered_product_list()
77
- print(f"πŸ“¦ FILTERED PRODUCTS: {len(PRODUCT_LIST)} products ready for optimization")
78
- print(f"🎯 Products: {PRODUCT_LIST}")
79
- except ImportError:
80
- # Fallback to unfiltered list if demand_filtering not available
81
- PRODUCT_LIST = transformed_data.get_released_product_list(start_date)
82
- print(f"πŸ“¦ UNFILTERED PRODUCTS: {len(PRODUCT_LIST)} products -> {PRODUCT_LIST}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
 
85
  def get_employee_type_list():
@@ -239,33 +255,29 @@ def get_line_cnt_per_type():
239
  LINE_CNT_PER_TYPE = get_line_cnt_per_type()
240
  print("line cnt per type",LINE_CNT_PER_TYPE)
241
 
242
- def get_demand_dictionary():
243
- try:
244
- # Try to get from streamlit session state (from config page)
245
- import streamlit as st
246
- if hasattr(st, 'session_state') and 'demand_dictionary' in st.session_state:
247
- print(f"Using demand dictionary from config page: {len(st.session_state.demand_dictionary)} products")
248
- return st.session_state.demand_dictionary
249
- except Exception as e:
250
- print(f"Could not get demand dictionary from streamlit session: {e}")
251
-
252
- print(f"Loading demand values from data files")
253
-
254
- # Try to get filtered demand (only for products ready for optimization)
255
  try:
256
- from src.demand_filtering import get_filtered_demand_dictionary
257
- demand_dictionary = get_filtered_demand_dictionary()
258
- print(f"πŸ“ˆ FILTERED DEMAND: {len(demand_dictionary)} products with total demand {sum(demand_dictionary.values())}")
259
- return demand_dictionary
260
- except ImportError:
261
- # Fallback to unfiltered demand if demand_filtering not available
262
- demand_df = extract.read_orders_data(start_date=start_date)
263
- demand_dictionary = demand_df.groupby('Material Number')["Order quantity (GMEIN)"].sum().to_dict()
264
- print(f"πŸ“ˆ UNFILTERED DEMAND: {len(demand_dictionary)} products with total demand {sum(demand_dictionary.values())}")
 
265
  return demand_dictionary
 
 
 
266
 
267
- DEMAND_DICTIONARY = get_demand_dictionary()
268
- print(f"🎯 FINAL DEMAND: {DEMAND_DICTIONARY}")
269
 
270
  def get_cost_list_per_emp_shift():
271
  try:
@@ -304,11 +316,14 @@ COST_LIST_PER_EMP_SHIFT = get_cost_list_per_emp_shift()
304
 
305
 
306
 
307
- def get_team_requirements(PRODUCT_LIST):
308
  """
309
  Extract team requirements from Kits Calculation CSV.
310
  Returns dictionary with employee type as key and product requirements as nested dict.
311
  """
 
 
 
312
  try:
313
  # Check if streamlit has this data (for future extension)
314
  # streamlit_team_req = dashboard.team_requirements
@@ -330,7 +345,7 @@ def get_team_requirements(PRODUCT_LIST):
330
  }
331
 
332
  # Process each product in the product list
333
- for product in PRODUCT_LIST:
334
  print("product",product)
335
  print(f"Processing team requirements for product: {product}")
336
  product_data = kits_df[kits_df['Kit'] == product]
@@ -349,8 +364,8 @@ def get_team_requirements(PRODUCT_LIST):
349
 
350
  return team_req_dict
351
 
352
- TEAM_REQ_PER_PRODUCT = get_team_requirements(PRODUCT_LIST)
353
- print("team requirements per product:", TEAM_REQ_PER_PRODUCT)
354
 
355
 
356
  def get_max_employee_per_type_on_day():
@@ -380,7 +395,52 @@ print("max employee per type on day",MAX_EMPLOYEE_PER_TYPE_ON_DAY)
380
  # available employee but for fixed in shift 1, it is mandatory employment
381
 
382
  MAX_HOUR_PER_PERSON_PER_DAY = 14 # legal standard
383
- MAX_HOUR_PER_SHIFT_PER_PERSON = DefaultConfig.MAX_HOUR_PER_SHIFT_PER_PERSON
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  def get_per_product_speed():
385
  try:
386
  # Try to get from streamlit session state (from config page)
@@ -419,7 +479,19 @@ def get_kit_hierarchy_data():
419
  KIT_LEVELS, KIT_DEPENDENCIES, PRODUCTION_PRIORITY_ORDER = get_kit_hierarchy_data()
420
  print(f"Kit Hierarchy loaded: {len(KIT_LEVELS)} kits, Priority order: {len(PRODUCTION_PRIORITY_ORDER)} items")
421
 
422
- MAX_PARALLEL_WORKERS = DefaultConfig.MAX_PARALLEL_WORKERS
 
 
 
 
 
 
 
 
 
 
 
 
423
  # maximum number of workers that can work on a line at the same time
424
 
425
  DAILY_WEEKLY_SCHEDULE = "daily" # daily or weekly ,this needs to be implementedin in if F_x1_day is not None... F_x1_week is not None... also need to change x1 to Fixedstaff_first_shift
 
70
  print(f"\nπŸ“… DATE RANGE: {start_date} to {end_date}")
71
  print(f"πŸ“ PRODUCT SOURCE: COOIS_Released_Prod_Orders.csv")
72
 
73
+ def get_product_list():
74
+ """
75
+ Get filtered product list.
76
+ IMPORTANT: This dynamically loads data to reflect current Streamlit configs/dates.
77
+ """
78
+ try:
79
+ # Always get fresh filtered products to reflect current configs
80
+ from src.demand_filtering import get_shared_filter_instance
81
+ filter_instance = get_shared_filter_instance()
82
+
83
+ # Force reload data to pick up new dates/configs
84
+ filter_instance.load_data(force_reload=True)
85
+
86
+ product_list = filter_instance.get_filtered_product_list()
87
+ print(f"πŸ“¦ FRESH FILTERED PRODUCTS: {len(product_list)} products ready for optimization")
88
+ print(f"🎯 Products: {product_list}")
89
+ return product_list
90
+ except Exception as e:
91
+ print(f"Error loading dynamic product list: {e}")
92
+ # Fallback to unfiltered list
93
+ product_list = transformed_data.get_released_product_list(start_date)
94
+ print(f"πŸ“¦ FALLBACK UNFILTERED PRODUCTS: {len(product_list)} products -> {product_list}")
95
+ return product_list
96
+
97
+ # DO NOT load at import time - always call get_product_list() dynamically
98
+ # PRODUCT_LIST = get_product_list() # REMOVED - was causing stale data!
99
 
100
 
101
  def get_employee_type_list():
 
255
  LINE_CNT_PER_TYPE = get_line_cnt_per_type()
256
  print("line cnt per type",LINE_CNT_PER_TYPE)
257
 
258
+ def get_demand_dictionary(force_reload=False):
259
+ """
260
+ Get filtered demand dictionary.
261
+ IMPORTANT: This dynamically loads data to reflect current Streamlit configs/dates.
262
+ """
 
 
 
 
 
 
 
 
263
  try:
264
+ # Always get fresh filtered demand to reflect current configs
265
+ from src.demand_filtering import get_shared_filter_instance
266
+ filter_instance = get_shared_filter_instance()
267
+
268
+ # Force reload data to pick up new dates/configs
269
+ filter_instance.load_data(force_reload=True)
270
+
271
+ demand_dictionary = filter_instance.get_filtered_demand_dictionary()
272
+ print(f"πŸ“ˆ FRESH FILTERED DEMAND: {len(demand_dictionary)} products with total demand {sum(demand_dictionary.values())}")
273
+ print(f"πŸ”„ LOADED DYNAMICALLY: Reflects current Streamlit configs")
274
  return demand_dictionary
275
+ except Exception as e:
276
+ print(f"Error loading dynamic demand dictionary: {e}")
277
+ raise Exception("Demand dictionary not found with error:"+str(e))
278
 
279
+ # DO NOT load at import time - always call get_demand_dictionary() dynamically
280
+ # DEMAND_DICTIONARY = get_demand_dictionary() # REMOVED - was causing stale data!
281
 
282
  def get_cost_list_per_emp_shift():
283
  try:
 
316
 
317
 
318
 
319
+ def get_team_requirements(product_list=None):
320
  """
321
  Extract team requirements from Kits Calculation CSV.
322
  Returns dictionary with employee type as key and product requirements as nested dict.
323
  """
324
+ if product_list is None:
325
+ product_list = get_product_list() # Get fresh product list
326
+
327
  try:
328
  # Check if streamlit has this data (for future extension)
329
  # streamlit_team_req = dashboard.team_requirements
 
345
  }
346
 
347
  # Process each product in the product list
348
+ for product in product_list:
349
  print("product",product)
350
  print(f"Processing team requirements for product: {product}")
351
  product_data = kits_df[kits_df['Kit'] == product]
 
364
 
365
  return team_req_dict
366
 
367
+ # DO NOT load at import time - always call get_team_requirements() dynamically
368
+ # TEAM_REQ_PER_PRODUCT = get_team_requirements(PRODUCT_LIST) # REMOVED - was causing stale data!
369
 
370
 
371
  def get_max_employee_per_type_on_day():
 
395
  # available employee but for fixed in shift 1, it is mandatory employment
396
 
397
  MAX_HOUR_PER_PERSON_PER_DAY = 14 # legal standard
398
+ def get_max_hour_per_shift_per_person():
399
+ """Get max hours per shift per person - checks Streamlit session state first"""
400
+ try:
401
+ import streamlit as st
402
+ if hasattr(st, 'session_state') and 'max_hour_per_shift_per_person' in st.session_state:
403
+ return st.session_state.max_hour_per_shift_per_person
404
+ except Exception as e:
405
+ print(f"Could not get max hours per shift from session: {e}")
406
+
407
+ # Fallback to default only if not configured by user
408
+ return DefaultConfig.MAX_HOUR_PER_SHIFT_PER_PERSON
409
+
410
+ MAX_HOUR_PER_SHIFT_PER_PERSON = get_max_hour_per_shift_per_person()
411
+
412
+ # Removed unnecessary getter functions - use direct imports instead:
413
+ # - MAX_HOUR_PER_PERSON_PER_DAY
414
+ # - MAX_HOUR_PER_SHIFT_PER_PERSON
415
+ # - KIT_LINE_MATCH_DICT
416
+ # - MAX_PARALLEL_WORKERS
417
+ # - DAILY_WEEKLY_SCHEDULE
418
+ # - EVENING_SHIFT_MODE
419
+
420
+ # Keep these complex getters that access DefaultConfig or have complex logic:
421
+ def get_evening_shift_demand_threshold():
422
+ """Get evening shift demand threshold - checks Streamlit session state first"""
423
+ try:
424
+ import streamlit as st
425
+ if hasattr(st, 'session_state') and 'evening_shift_demand_threshold' in st.session_state:
426
+ return st.session_state.evening_shift_demand_threshold
427
+ except Exception as e:
428
+ print(f"Could not get evening shift threshold from session: {e}")
429
+
430
+ # Fallback to default only if not configured by user
431
+ return getattr(DefaultConfig, 'EVENING_SHIFT_DEMAND_THRESHOLD', 10000)
432
+
433
+ def get_fixed_min_unicef_per_day():
434
+ """Get fixed minimum UNICEF staff per day - checks Streamlit session state first"""
435
+ try:
436
+ import streamlit as st
437
+ if hasattr(st, 'session_state') and 'fixed_min_unicef_per_day' in st.session_state:
438
+ return st.session_state.fixed_min_unicef_per_day
439
+ except Exception as e:
440
+ print(f"Could not get fixed min UNICEF from session: {e}")
441
+
442
+ # Fallback to default only if not configured by user
443
+ return getattr(DefaultConfig, 'FIXED_MIN_UNICEF_PER_DAY', {1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
444
  def get_per_product_speed():
445
  try:
446
  # Try to get from streamlit session state (from config page)
 
479
  KIT_LEVELS, KIT_DEPENDENCIES, PRODUCTION_PRIORITY_ORDER = get_kit_hierarchy_data()
480
  print(f"Kit Hierarchy loaded: {len(KIT_LEVELS)} kits, Priority order: {len(PRODUCTION_PRIORITY_ORDER)} items")
481
 
482
+ def get_max_parallel_workers():
483
+ """Get max parallel workers - checks Streamlit session state first"""
484
+ try:
485
+ import streamlit as st
486
+ if hasattr(st, 'session_state') and 'max_parallel_workers' in st.session_state:
487
+ return st.session_state.max_parallel_workers
488
+ except Exception as e:
489
+ print(f"Could not get max parallel workers from session: {e}")
490
+
491
+ # Fallback to default only if not configured by user
492
+ return DefaultConfig.MAX_PARALLEL_WORKERS
493
+
494
+ MAX_PARALLEL_WORKERS = get_max_parallel_workers()
495
  # maximum number of workers that can work on a line at the same time
496
 
497
  DAILY_WEEKLY_SCHEDULE = "daily" # daily or weekly ,this needs to be implementedin in if F_x1_day is not None... F_x1_week is not None... also need to change x1 to Fixedstaff_first_shift
src/demand_filtering.py CHANGED
@@ -26,12 +26,19 @@ class DemandFilter:
26
  self.line_assignments = None
27
  self.team_requirements = None
28
 
29
- def load_data(self):
30
  """Load all necessary data for filtering"""
31
  try:
32
- # Get start date for demand data
33
- from datetime import datetime
34
- start_date = datetime(2025, 7, 7) # Default date, can be made configurable
 
 
 
 
 
 
 
35
 
36
  # Load demand data directly from extract
37
  demand_df = extract.read_orders_data(start_date=start_date)
@@ -204,6 +211,24 @@ class DemandFilter:
204
  for reason, count in reason_counts.items():
205
  print(f" β€’ {reason}: {count} products")
206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  return included_products, included_demand, excluded_products, excluded_demand
208
 
209
  def get_filtered_product_list(self) -> List[str]:
@@ -223,6 +248,14 @@ class DemandFilter:
223
  all_products = {**included_demand, **excluded_demand}
224
  product_details = {}
225
 
 
 
 
 
 
 
 
 
226
  for product_id, demand in all_products.items():
227
  product_type, is_standalone_master = self.classify_product_type(product_id)
228
  is_ready, exclusion_reasons = self.is_product_ready_for_optimization(product_id)
@@ -234,6 +267,9 @@ class DemandFilter:
234
  # Get line assignment
235
  line_assignment = self.line_assignments.get(product_id)
236
 
 
 
 
237
  product_details[product_id] = {
238
  'demand': demand,
239
  'product_type': product_type,
@@ -246,9 +282,14 @@ class DemandFilter:
246
  'line_assignment': line_assignment,
247
  'has_line_assignment': line_assignment is not None,
248
  'has_staffing': (unicef_staff + humanizer_staff) > 0,
249
- 'has_hierarchy': product_type != "unclassified"
 
250
  }
251
 
 
 
 
 
252
  return {
253
  'included_count': len(included_products),
254
  'included_demand': sum(included_demand.values()),
@@ -259,7 +300,10 @@ class DemandFilter:
259
  'product_details': product_details,
260
  'standalone_masters_count': sum(1 for p in product_details.values() if p['is_standalone_master']),
261
  'included_products': included_products,
262
- 'excluded_products': excluded_products
 
 
 
263
  }
264
 
265
  def get_exclusion_summary(self) -> Dict:
@@ -302,6 +346,22 @@ def get_exclusion_summary() -> Dict:
302
  return filter_instance.get_exclusion_summary()
303
 
304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
  if __name__ == "__main__":
306
  # Test the filtering
307
  filter_instance = DemandFilter()
 
26
  self.line_assignments = None
27
  self.team_requirements = None
28
 
29
+ def load_data(self, force_reload=False):
30
  """Load all necessary data for filtering"""
31
  try:
32
+ # Skip loading if data already exists and not forcing reload
33
+ if not force_reload and self.demand_data is not None:
34
+ print("πŸ“Š Using cached filter data (set force_reload=True to refresh)")
35
+ return True
36
+
37
+ print("πŸ”„ Loading fresh filtering data...")
38
+ # Get start date for demand data from optimization config
39
+ from src.config.optimization_config import get_date_span
40
+ date_span, start_date, end_date = get_date_span()
41
+ print(f"πŸ—“οΈ DEMAND FILTERING DATE: Using {start_date.date() if start_date else 'None'} (same as optimization)")
42
 
43
  # Load demand data directly from extract
44
  demand_df = extract.read_orders_data(start_date=start_date)
 
211
  for reason, count in reason_counts.items():
212
  print(f" β€’ {reason}: {count} products")
213
 
214
+ # Print data quality warnings for included products (without recursion)
215
+ # Load speed data for validation
216
+ speed_data = None
217
+ try:
218
+ from src.config import optimization_config
219
+ speed_data = optimization_config.PER_PRODUCT_SPEED
220
+ except Exception as e:
221
+ print(f"Warning: Could not load speed data for validation: {e}")
222
+
223
+ if speed_data:
224
+ included_without_speed = sum(1 for pid in included_products if pid not in speed_data)
225
+ if included_without_speed > 0:
226
+ print(f"\n⚠️ DATA QUALITY WARNING: {included_without_speed} included products missing speed data (will use default 106.7 units/hour)")
227
+
228
+ included_without_hierarchy = sum(1 for pid in included_products if self.classify_product_type(pid)[0] == "unclassified")
229
+ if included_without_hierarchy > 0:
230
+ print(f"⚠️ DATA QUALITY WARNING: {included_without_hierarchy} included products missing hierarchy data")
231
+
232
  return included_products, included_demand, excluded_products, excluded_demand
233
 
234
  def get_filtered_product_list(self) -> List[str]:
 
248
  all_products = {**included_demand, **excluded_demand}
249
  product_details = {}
250
 
251
+ # Load speed data for additional validation
252
+ speed_data = None
253
+ try:
254
+ from src.config import optimization_config
255
+ speed_data = optimization_config.PER_PRODUCT_SPEED
256
+ except Exception as e:
257
+ print(f"Warning: Could not load speed data for analysis: {e}")
258
+
259
  for product_id, demand in all_products.items():
260
  product_type, is_standalone_master = self.classify_product_type(product_id)
261
  is_ready, exclusion_reasons = self.is_product_ready_for_optimization(product_id)
 
267
  # Get line assignment
268
  line_assignment = self.line_assignments.get(product_id)
269
 
270
+ # Get production speed info
271
+ has_speed_data = speed_data is not None and product_id in speed_data
272
+
273
  product_details[product_id] = {
274
  'demand': demand,
275
  'product_type': product_type,
 
282
  'line_assignment': line_assignment,
283
  'has_line_assignment': line_assignment is not None,
284
  'has_staffing': (unicef_staff + humanizer_staff) > 0,
285
+ 'has_hierarchy': product_type != "unclassified",
286
+ 'has_speed_data': has_speed_data
287
  }
288
 
289
+ # Calculate data quality statistics for included products
290
+ included_without_speed = sum(1 for pid in included_products if not product_details[pid]['has_speed_data'])
291
+ included_without_hierarchy = sum(1 for pid in included_products if not product_details[pid]['has_hierarchy'])
292
+
293
  return {
294
  'included_count': len(included_products),
295
  'included_demand': sum(included_demand.values()),
 
300
  'product_details': product_details,
301
  'standalone_masters_count': sum(1 for p in product_details.values() if p['is_standalone_master']),
302
  'included_products': included_products,
303
+ 'excluded_products': excluded_products,
304
+ # Data quality metrics for included products
305
+ 'included_missing_speed_count': included_without_speed,
306
+ 'included_missing_hierarchy_count': included_without_hierarchy
307
  }
308
 
309
  def get_exclusion_summary(self) -> Dict:
 
346
  return filter_instance.get_exclusion_summary()
347
 
348
 
349
+ def get_complete_analysis() -> Dict:
350
+ """Get complete product analysis including data quality metrics"""
351
+ filter_instance = DemandFilter()
352
+ return filter_instance.get_complete_product_analysis()
353
+
354
+
355
+ # Singleton instance for consistency across modules
356
+ _SHARED_FILTER_INSTANCE = None
357
+
358
+ def get_shared_filter_instance():
359
+ """Returns a shared singleton instance of DemandFilter."""
360
+ global _SHARED_FILTER_INSTANCE
361
+ if _SHARED_FILTER_INSTANCE is None:
362
+ _SHARED_FILTER_INSTANCE = DemandFilter()
363
+ return _SHARED_FILTER_INSTANCE
364
+
365
  if __name__ == "__main__":
366
  # Test the filtering
367
  filter_instance = DemandFilter()
src/demand_validation.py DELETED
@@ -1,435 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Demand Data Validation Module
4
-
5
- This module provides comprehensive validation and analysis of demand data,
6
- including product classification, hierarchy analysis, line assignments,
7
- and staffing requirements.
8
- """
9
-
10
- import pandas as pd
11
- import streamlit as st
12
- from typing import Dict, List, Tuple, Optional
13
- import json
14
- from src.config.constants import ShiftType, LineType, KitLevel
15
-
16
-
17
- class DemandValidator:
18
- """
19
- Validates and analyzes demand data for optimization readiness
20
- """
21
-
22
- def __init__(self):
23
- self.demand_data = None
24
- self.hierarchy_data = None
25
- self.team_requirements = None
26
- self.line_assignments = None
27
- self.speed_data = None
28
- self.validation_results = {}
29
-
30
- def load_data(self):
31
- """Load all necessary data for validation"""
32
- try:
33
- # Load demand data
34
- from src.config import optimization_config
35
- self.demand_data = optimization_config.get_demand_dictionary()
36
-
37
- # Load hierarchy data
38
- try:
39
- with open('data/hierarchy_exports/kit_hierarchy.json', 'r') as f:
40
- self.hierarchy_data = json.load(f)
41
- except FileNotFoundError:
42
- print("Warning: Kit hierarchy file not found. Some analysis will be limited.")
43
- self.hierarchy_data = {}
44
-
45
- # Load team requirements
46
- self.team_requirements = optimization_config.TEAM_REQ_PER_PRODUCT
47
-
48
- # Load line assignments
49
- self.line_assignments = optimization_config.KIT_LINE_MATCH_DICT
50
-
51
- # Load speed data
52
- self.speed_data = optimization_config.PER_PRODUCT_SPEED
53
-
54
- # Load kit levels and dependencies
55
- self.kit_levels = optimization_config.KIT_LEVELS
56
- self.kit_dependencies = optimization_config.KIT_DEPENDENCIES
57
-
58
- return True
59
-
60
- except Exception as e:
61
- error_msg = f"Error loading data: {str(e)}"
62
- print(error_msg)
63
- # Only use st.error if we're in a Streamlit context
64
- try:
65
- st.error(error_msg)
66
- except:
67
- pass
68
- return False
69
-
70
- def classify_product_type(self, product_id: str) -> Tuple[str, str]:
71
- """
72
- Classify product as master/subkit/prepack
73
- Returns: (type, level_name)
74
- """
75
- if product_id in self.kit_levels:
76
- level = self.kit_levels[product_id]
77
- level_name = KitLevel.get_name(level)
78
-
79
- if level == 0:
80
- return "prepack", level_name
81
- elif level == 1:
82
- return "subkit", level_name
83
- elif level == 2:
84
- return "master", level_name
85
- else:
86
- return "unknown", f"level_{level}"
87
- else:
88
- return "unclassified", "no_hierarchy_data"
89
-
90
- def is_standalone_master(self, product_id: str) -> bool:
91
- """
92
- Check if this is a standalone master (master with no subkits/prepacks)
93
- """
94
- product_type, _ = self.classify_product_type(product_id)
95
-
96
- if product_type != "master":
97
- return False
98
-
99
- # Check if this master has any dependencies
100
- dependencies = self.kit_dependencies.get(product_id, [])
101
- return len(dependencies) == 0
102
-
103
- def get_line_assignment(self, product_id: str) -> Tuple[Optional[int], str]:
104
- """
105
- Get line assignment for product
106
- Returns: (line_type_id, line_name)
107
- """
108
- if product_id in self.line_assignments:
109
- line_type = self.line_assignments[product_id]
110
- line_name = LineType.get_name(line_type)
111
- return line_type, line_name
112
- else:
113
- return None, "no_assignment"
114
-
115
- def get_staffing_requirements(self, product_id: str) -> Dict[str, int]:
116
- """
117
- Get staffing requirements for product
118
- Returns: {employee_type: count}
119
- """
120
- staffing = {}
121
-
122
- for emp_type, products in self.team_requirements.items():
123
- staffing[emp_type] = products.get(product_id, 0)
124
-
125
- return staffing
126
-
127
- def get_production_speed(self, product_id: str) -> Optional[float]:
128
- """Get production speed for product"""
129
- return self.speed_data.get(product_id, None)
130
-
131
- def get_dependencies(self, product_id: str) -> List[str]:
132
- """Get list of dependencies for product"""
133
- return self.kit_dependencies.get(product_id, [])
134
-
135
- def validate_all_products(self) -> pd.DataFrame:
136
- """
137
- Perform comprehensive validation of all products with demand
138
- Returns: DataFrame with validation results
139
- """
140
- results = []
141
-
142
- for product_id, demand in self.demand_data.items():
143
- if demand > 0: # Only analyze products with actual demand
144
-
145
- # Basic classification
146
- product_type, level_name = self.classify_product_type(product_id)
147
- is_standalone = self.is_standalone_master(product_id)
148
-
149
- # Line assignment
150
- line_type_id, line_name = self.get_line_assignment(product_id)
151
-
152
- # Staffing requirements
153
- staffing = self.get_staffing_requirements(product_id)
154
- unicef_staff = staffing.get('UNICEF Fixed term', 0)
155
- humanizer_staff = staffing.get('Humanizer', 0)
156
- total_staff = unicef_staff + humanizer_staff
157
-
158
- # Production speed
159
- speed = self.get_production_speed(product_id)
160
-
161
- # Dependencies
162
- dependencies = self.get_dependencies(product_id)
163
- dependency_count = len(dependencies)
164
-
165
- # Calculate production time needed (if speed available)
166
- production_hours_needed = None
167
- if speed and speed > 0:
168
- production_hours_needed = demand / speed
169
-
170
- # Validation flags
171
- has_line_assignment = line_type_id is not None
172
- has_staffing_data = total_staff > 0
173
- has_speed_data = speed is not None
174
- has_hierarchy_data = product_type != "unclassified"
175
-
176
- # Overall validation status
177
- validation_issues = []
178
- if not has_line_assignment:
179
- validation_issues.append("no_line_assignment")
180
- if not has_staffing_data:
181
- validation_issues.append("no_staffing_data")
182
- if not has_speed_data:
183
- validation_issues.append("no_speed_data")
184
- if not has_hierarchy_data:
185
- validation_issues.append("no_hierarchy_data")
186
-
187
- validation_status = "βœ… Valid" if not validation_issues else f"⚠️ Issues: {', '.join(validation_issues)}"
188
-
189
- results.append({
190
- 'Product ID': product_id,
191
- 'Demand': demand,
192
- 'Product Type': product_type.title(),
193
- 'Level': level_name,
194
- 'Is Standalone Master': "Yes" if is_standalone else "No",
195
- 'Line Type ID': line_type_id if line_type_id else "N/A",
196
- 'Line Type': line_name,
197
- 'UNICEF Staff': unicef_staff,
198
- 'Humanizer Staff': humanizer_staff,
199
- 'Total Staff': total_staff,
200
- 'Production Speed (units/hour)': f"{speed:.1f}" if speed else "N/A",
201
- 'Production Hours Needed': f"{production_hours_needed:.1f}" if production_hours_needed else "N/A",
202
- 'Dependencies Count': dependency_count,
203
- 'Dependencies': ", ".join(dependencies[:3]) + ("..." if len(dependencies) > 3 else ""),
204
- 'Has Line Assignment': "βœ…" if has_line_assignment else "❌",
205
- 'Has Staffing Data': "βœ…" if has_staffing_data else "❌",
206
- 'Has Speed Data': "βœ…" if has_speed_data else "❌",
207
- 'Has Hierarchy Data': "βœ…" if has_hierarchy_data else "❌",
208
- 'Validation Status': validation_status
209
- })
210
-
211
- df = pd.DataFrame(results)
212
-
213
- # Sort by validation issues first, then by demand
214
- df['Has Issues'] = df['Validation Status'].str.contains('Issues')
215
- df = df.sort_values(['Has Issues', 'Demand'], ascending=[False, False])
216
- df = df.drop('Has Issues', axis=1)
217
-
218
- return df
219
-
220
- def get_summary_statistics(self, df: pd.DataFrame) -> Dict:
221
- """Generate summary statistics from validation results"""
222
-
223
- total_products = len(df)
224
- total_demand = df['Demand'].sum()
225
-
226
- # Product type distribution
227
- type_counts = df['Product Type'].value_counts().to_dict()
228
-
229
- # Validation status
230
- valid_products = len(df[df['Validation Status'] == "βœ… Valid"])
231
- invalid_products = total_products - valid_products
232
-
233
- # Line assignment issues
234
- no_line_assignment = len(df[df['Has Line Assignment'] == "❌"])
235
-
236
- # Staffing issues
237
- no_staffing = len(df[df['Has Staffing Data'] == "❌"])
238
-
239
- # Speed data issues
240
- no_speed = len(df[df['Has Speed Data'] == "❌"])
241
-
242
- # Hierarchy issues
243
- no_hierarchy = len(df[df['Has Hierarchy Data'] == "❌"])
244
-
245
- # Standalone masters
246
- standalone_masters = len(df[(df['Product Type'] == 'Master') & (df['Is Standalone Master'] == 'Yes')])
247
-
248
- # Staffing summary
249
- total_unicef_needed = df['UNICEF Staff'].sum()
250
- total_humanizer_needed = df['Humanizer Staff'].sum()
251
-
252
- return {
253
- 'total_products': total_products,
254
- 'total_demand': total_demand,
255
- 'valid_products': valid_products,
256
- 'invalid_products': invalid_products,
257
- 'type_counts': type_counts,
258
- 'no_line_assignment': no_line_assignment,
259
- 'no_staffing': no_staffing,
260
- 'no_speed': no_speed,
261
- 'no_hierarchy': no_hierarchy,
262
- 'standalone_masters': standalone_masters,
263
- 'total_unicef_needed': total_unicef_needed,
264
- 'total_humanizer_needed': total_humanizer_needed
265
- }
266
-
267
-
268
- def display_demand_validation():
269
- """
270
- Display demand validation analysis in Streamlit
271
- """
272
- st.header("πŸ“‹ Demand Data Validation")
273
- st.markdown("Comprehensive analysis of products with demand to identify potential optimization issues.")
274
-
275
- # Initialize validator
276
- validator = DemandValidator()
277
-
278
- # Load data
279
- with st.spinner("Loading data for validation..."):
280
- if not validator.load_data():
281
- st.error("Failed to load data for validation.")
282
- return
283
-
284
- # Perform validation
285
- with st.spinner("Analyzing demand data..."):
286
- validation_df = validator.validate_all_products()
287
- summary_stats = validator.get_summary_statistics(validation_df)
288
-
289
- # Display summary statistics
290
- st.subheader("πŸ“Š Summary Statistics")
291
-
292
- col1, col2, col3, col4 = st.columns(4)
293
-
294
- with col1:
295
- st.metric("Total Products", summary_stats['total_products'])
296
- st.metric("Valid Products", summary_stats['valid_products'])
297
-
298
- with col2:
299
- st.metric("Total Demand", f"{summary_stats['total_demand']:,}")
300
- st.metric("Invalid Products", summary_stats['invalid_products'])
301
-
302
- with col3:
303
- st.metric("UNICEF Staff Needed", summary_stats['total_unicef_needed'])
304
- st.metric("Standalone Masters", summary_stats['standalone_masters'])
305
-
306
- with col4:
307
- st.metric("Humanizer Staff Needed", summary_stats['total_humanizer_needed'])
308
- st.metric("No Line Assignment", summary_stats['no_line_assignment'])
309
-
310
- # Product type distribution
311
- st.subheader("πŸ“ˆ Product Type Distribution")
312
- if summary_stats['type_counts']:
313
- col1, col2 = st.columns(2)
314
-
315
- with col1:
316
- type_df = pd.DataFrame(list(summary_stats['type_counts'].items()),
317
- columns=['Product Type', 'Count'])
318
- st.bar_chart(type_df.set_index('Product Type'))
319
-
320
- with col2:
321
- for ptype, count in summary_stats['type_counts'].items():
322
- percentage = (count / summary_stats['total_products']) * 100
323
- st.write(f"**{ptype}:** {count} products ({percentage:.1f}%)")
324
-
325
- # Validation issues summary
326
- st.subheader("⚠️ Validation Issues Summary")
327
-
328
- col1, col2, col3, col4 = st.columns(4)
329
-
330
- with col1:
331
- st.metric("No Line Assignment", summary_stats['no_line_assignment'],
332
- delta=None if summary_stats['no_line_assignment'] == 0 else "Issue")
333
-
334
- with col2:
335
- st.metric("No Staffing Data", summary_stats['no_staffing'],
336
- delta=None if summary_stats['no_staffing'] == 0 else "Issue")
337
-
338
- with col3:
339
- st.metric("No Speed Data", summary_stats['no_speed'],
340
- delta=None if summary_stats['no_speed'] == 0 else "Issue")
341
-
342
- with col4:
343
- st.metric("No Hierarchy Data", summary_stats['no_hierarchy'],
344
- delta=None if summary_stats['no_hierarchy'] == 0 else "Issue")
345
-
346
- # Detailed validation results
347
- st.subheader("πŸ“‹ Detailed Validation Results")
348
-
349
- # Filter options
350
- col1, col2, col3 = st.columns(3)
351
-
352
- with col1:
353
- show_only_issues = st.checkbox("Show only products with issues", value=False)
354
-
355
- with col2:
356
- product_type_filter = st.selectbox("Filter by product type",
357
- options=["All"] + list(summary_stats['type_counts'].keys()))
358
-
359
- with col3:
360
- min_demand = st.number_input("Minimum demand", min_value=0, value=0)
361
-
362
- # Apply filters
363
- filtered_df = validation_df.copy()
364
-
365
- if show_only_issues:
366
- filtered_df = filtered_df[filtered_df['Validation Status'] != "βœ… Valid"]
367
-
368
- if product_type_filter != "All":
369
- filtered_df = filtered_df[filtered_df['Product Type'] == product_type_filter]
370
-
371
- if min_demand > 0:
372
- filtered_df = filtered_df[filtered_df['Demand'] >= min_demand]
373
-
374
- # Display filtered results
375
- st.write(f"Showing {len(filtered_df)} of {len(validation_df)} products")
376
-
377
- if len(filtered_df) > 0:
378
- # Configure column display
379
- column_config = {
380
- 'Product ID': st.column_config.TextColumn('Product ID', width="medium"),
381
- 'Demand': st.column_config.NumberColumn('Demand', format="%d"),
382
- 'Production Speed (units/hour)': st.column_config.TextColumn('Speed', width="small"),
383
- 'Production Hours Needed': st.column_config.TextColumn('Hours Needed', width="small"),
384
- 'Validation Status': st.column_config.TextColumn('Status', width="large"),
385
- }
386
-
387
- st.dataframe(
388
- filtered_df,
389
- column_config=column_config,
390
- use_container_width=True,
391
- height=400
392
- )
393
-
394
- # Export option
395
- if st.button("πŸ“₯ Export Validation Results to CSV"):
396
- csv = validation_df.to_csv(index=False)
397
- st.download_button(
398
- label="Download CSV",
399
- data=csv,
400
- file_name="demand_validation_results.csv",
401
- mime="text/csv"
402
- )
403
- else:
404
- st.info("No products match the selected filters.")
405
-
406
- # Recommendations
407
- st.subheader("πŸ’‘ Recommendations")
408
-
409
- recommendations = []
410
-
411
- if summary_stats['no_line_assignment'] > 0:
412
- recommendations.append(f"**Line Assignment**: {summary_stats['no_line_assignment']} products need line type assignments in the kit-line mapping.")
413
-
414
- if summary_stats['no_staffing'] > 0:
415
- recommendations.append(f"**Staffing Data**: {summary_stats['no_staffing']} products are missing staffing requirements.")
416
-
417
- if summary_stats['no_speed'] > 0:
418
- recommendations.append(f"**Speed Data**: {summary_stats['no_speed']} products are missing production speed data.")
419
-
420
- if summary_stats['no_hierarchy'] > 0:
421
- recommendations.append(f"**Hierarchy Data**: {summary_stats['no_hierarchy']} products are not in the kit hierarchy.")
422
-
423
- if summary_stats['standalone_masters'] > 0:
424
- recommendations.append(f"**Standalone Masters**: {summary_stats['standalone_masters']} master kits have no dependencies (verify if this is intended).")
425
-
426
- if recommendations:
427
- for rec in recommendations:
428
- st.warning(rec)
429
- else:
430
- st.success("βœ… All validation checks passed! Your demand data appears to be complete and ready for optimization.")
431
-
432
-
433
- if __name__ == "__main__":
434
- # For testing
435
- display_demand_validation()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/demand_validation_viz.py CHANGED
@@ -17,7 +17,8 @@ from src.demand_filtering import DemandFilter
17
 
18
  class DemandValidationViz:
19
  """
20
- Provides visualization for demand data validation using the filtering module
 
21
  """
22
 
23
  def __init__(self):
@@ -43,40 +44,7 @@ class DemandValidationViz:
43
  pass
44
  return False
45
 
46
- def classify_product_type(self, product_id: str) -> Tuple[str, str]:
47
- """Get product classification from filter instance"""
48
- if product_id in self.filter_instance.kit_levels:
49
- level = self.filter_instance.kit_levels[product_id]
50
- level_name = KitLevel.get_name(level)
51
-
52
- if level == 0:
53
- return "prepack", level_name
54
- elif level == 1:
55
- return "subkit", level_name
56
- elif level == 2:
57
- return "master", level_name
58
- else:
59
- return "unknown", f"level_{level}"
60
- else:
61
- return "unclassified", "no_hierarchy_data"
62
-
63
- def get_line_assignment(self, product_id: str) -> Tuple[Optional[int], str]:
64
- """Get line assignment from filter instance"""
65
- if product_id in self.filter_instance.line_assignments:
66
- line_type = self.filter_instance.line_assignments[product_id]
67
- line_name = LineType.get_name(line_type)
68
- return line_type, line_name
69
- else:
70
- return None, "no_assignment"
71
-
72
- def get_staffing_requirements(self, product_id: str) -> Dict[str, int]:
73
- """Get staffing requirements from filter instance"""
74
- staffing = {}
75
-
76
- for emp_type, products in self.filter_instance.team_requirements.items():
77
- staffing[emp_type] = products.get(product_id, 0)
78
-
79
- return staffing
80
 
81
  def get_production_speed(self, product_id: str) -> Optional[float]:
82
  """Get production speed for product"""
@@ -122,21 +90,21 @@ class DemandValidationViz:
122
  else:
123
  level_name = f"level_{details['product_type']}"
124
 
125
- # Overall status
126
  if not details['is_included_in_optimization']:
127
  validation_status = f"🚫 Excluded: {', '.join(details['exclusion_reasons'])}"
128
  else:
129
- # Check for other issues that don't exclude from optimization
130
- other_issues = []
131
  if speed is None:
132
- other_issues.append("no_speed_data")
133
  if not details['has_hierarchy']:
134
- other_issues.append("no_hierarchy_data")
135
 
136
- if other_issues:
137
- validation_status = f"⚠️ Issues: {', '.join(other_issues)}"
138
  else:
139
- validation_status = "βœ… Included in optimization"
140
 
141
  results.append({
142
  'Product ID': product_id,
@@ -153,10 +121,11 @@ class DemandValidationViz:
153
  'Production Hours Needed': f"{production_hours_needed:.1f}" if production_hours_needed else "N/A",
154
  'Has Line Assignment': "βœ…" if details['has_line_assignment'] else "❌",
155
  'Has Staffing Data': "βœ…" if details['has_staffing'] else "❌",
156
- 'Has Speed Data': "βœ…" if speed is not None else "❌",
157
  'Has Hierarchy Data': "βœ…" if details['has_hierarchy'] else "❌",
158
  'Excluded from Optimization': not details['is_included_in_optimization'],
159
  'Exclusion Reasons': ', '.join(details['exclusion_reasons']) if details['exclusion_reasons'] else '',
 
160
  'Validation Status': validation_status
161
  })
162
 
@@ -208,10 +177,12 @@ class DemandValidationViz:
208
 
209
  def display_demand_validation():
210
  """
211
- Display demand validation analysis in Streamlit
 
212
  """
213
  st.header("πŸ“‹ Demand Data Validation")
214
- st.markdown("Comprehensive analysis of products with demand to identify potential optimization issues.")
 
215
 
216
  # Initialize validator
217
  validator = DemandValidationViz()
@@ -265,7 +236,7 @@ def display_demand_validation():
265
 
266
  # Validation issues summary for included products
267
  st.subheader("⚠️ Data Quality Issues (Products Included in Optimization)")
268
- st.write("Issues affecting products that **will be** included in optimization:")
269
 
270
  col1, col2, col3, col4 = st.columns(4)
271
 
@@ -277,9 +248,9 @@ def display_demand_validation():
277
  st.metric("No Staffing Data", summary_stats['no_staffing'],
278
  delta=None if summary_stats['no_staffing'] == 0 else "Issue")
279
 
280
- with col3:
281
- st.metric("No Speed Data", summary_stats['no_speed'],
282
- delta=None if summary_stats['no_speed'] == 0 else "Issue")
283
 
284
  with col4:
285
  st.metric("No Hierarchy Data", summary_stats['no_hierarchy'],
@@ -313,7 +284,7 @@ def display_demand_validation():
313
  filtered_included = filtered_included[filtered_included['Demand'] >= included_min_demand]
314
 
315
  # Configure column display for included
316
- included_columns = ['Product ID', 'Demand', 'Product Type', 'Line Type', 'UNICEF Staff', 'Humanizer Staff', 'Production Speed (units/hour)', 'Validation Status']
317
 
318
  st.dataframe(
319
  filtered_included[included_columns],
@@ -326,7 +297,10 @@ def display_demand_validation():
326
  # Products Excluded from Optimization
327
  st.subheader("🚫 Products Excluded from Optimization")
328
  st.write(f"**{len(excluded_df)} products** are excluded from optimization with total demand of **{excluded_df['Demand'].sum():,} units**")
329
- st.info("These products are omitted from optimization due to missing line assignments or zero staffing requirements.")
 
 
 
330
 
331
  if len(excluded_df) > 0:
332
  # Show exclusion breakdown
@@ -373,7 +347,7 @@ def display_demand_validation():
373
  recommendations.append(f"**Staffing Data**: {summary_stats['no_staffing']} products included in optimization are missing staffing requirements.")
374
 
375
  if summary_stats['no_speed'] > 0:
376
- recommendations.append(f"**Speed Data**: {summary_stats['no_speed']} products included in optimization are missing production speed data (will use defaults).")
377
 
378
  if summary_stats['no_hierarchy'] > 0:
379
  recommendations.append(f"**Hierarchy Data**: {summary_stats['no_hierarchy']} products included in optimization are not in the kit hierarchy.")
@@ -384,9 +358,11 @@ def display_demand_validation():
384
 
385
  # Overall status
386
  if summary_stats['included_products'] > 0:
387
- st.success(f"βœ… **Ready for Optimization**: {summary_stats['included_products']} products with {summary_stats['included_demand']:,} units demand are ready for optimization!")
 
 
388
  else:
389
- st.error("❌ No products are ready for optimization. Please review the exclusion criteria above.")
390
 
391
 
392
  if __name__ == "__main__":
 
17
 
18
  class DemandValidationViz:
19
  """
20
+ Provides visualization for demand data validation using the filtering module.
21
+ This class focuses purely on displaying the results from demand filtering.
22
  """
23
 
24
  def __init__(self):
 
44
  pass
45
  return False
46
 
47
+ # Remove duplicate methods - use filter_instance data directly
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  def get_production_speed(self, product_id: str) -> Optional[float]:
50
  """Get production speed for product"""
 
90
  else:
91
  level_name = f"level_{details['product_type']}"
92
 
93
+ # Overall status with enhanced detail
94
  if not details['is_included_in_optimization']:
95
  validation_status = f"🚫 Excluded: {', '.join(details['exclusion_reasons'])}"
96
  else:
97
+ # Check for data quality issues that don't exclude from optimization
98
+ data_quality_issues = []
99
  if speed is None:
100
+ data_quality_issues.append("missing_speed_data (will use default)")
101
  if not details['has_hierarchy']:
102
+ data_quality_issues.append("no_hierarchy_data")
103
 
104
+ if data_quality_issues:
105
+ validation_status = f"⚠️ Data Issues: {', '.join(data_quality_issues)}"
106
  else:
107
+ validation_status = "βœ… Ready for optimization"
108
 
109
  results.append({
110
  'Product ID': product_id,
 
121
  'Production Hours Needed': f"{production_hours_needed:.1f}" if production_hours_needed else "N/A",
122
  'Has Line Assignment': "βœ…" if details['has_line_assignment'] else "❌",
123
  'Has Staffing Data': "βœ…" if details['has_staffing'] else "❌",
124
+ 'Has Speed Data': "βœ…" if speed is not None else "❌ (will use default)",
125
  'Has Hierarchy Data': "βœ…" if details['has_hierarchy'] else "❌",
126
  'Excluded from Optimization': not details['is_included_in_optimization'],
127
  'Exclusion Reasons': ', '.join(details['exclusion_reasons']) if details['exclusion_reasons'] else '',
128
+ 'Data Quality Issues': ', '.join(data_quality_issues) if details['is_included_in_optimization'] and data_quality_issues else '',
129
  'Validation Status': validation_status
130
  })
131
 
 
177
 
178
  def display_demand_validation():
179
  """
180
+ Display demand validation analysis in Streamlit.
181
+ Shows the results of demand filtering with detailed data quality analysis.
182
  """
183
  st.header("πŸ“‹ Demand Data Validation")
184
+ st.markdown("""**Analysis of Filtered Demand Data**: This page shows which products are included/excluded from optimization
185
+ based on the demand filtering criteria, plus data quality assessment for included products.""")
186
 
187
  # Initialize validator
188
  validator = DemandValidationViz()
 
236
 
237
  # Validation issues summary for included products
238
  st.subheader("⚠️ Data Quality Issues (Products Included in Optimization)")
239
+ st.write("Data quality issues affecting products that **will be** included in optimization (these don't exclude products but may affect optimization quality):")
240
 
241
  col1, col2, col3, col4 = st.columns(4)
242
 
 
248
  st.metric("No Staffing Data", summary_stats['no_staffing'],
249
  delta=None if summary_stats['no_staffing'] == 0 else "Issue")
250
 
251
+ with col3:
252
+ st.metric("No Speed Data", summary_stats['no_speed'],
253
+ delta=None if summary_stats['no_speed'] == 0 else "Will use default")
254
 
255
  with col4:
256
  st.metric("No Hierarchy Data", summary_stats['no_hierarchy'],
 
284
  filtered_included = filtered_included[filtered_included['Demand'] >= included_min_demand]
285
 
286
  # Configure column display for included
287
+ included_columns = ['Product ID', 'Demand', 'Product Type', 'Line Type', 'UNICEF Staff', 'Humanizer Staff', 'Production Speed (units/hour)', 'Data Quality Issues', 'Validation Status']
288
 
289
  st.dataframe(
290
  filtered_included[included_columns],
 
297
  # Products Excluded from Optimization
298
  st.subheader("🚫 Products Excluded from Optimization")
299
  st.write(f"**{len(excluded_df)} products** are excluded from optimization with total demand of **{excluded_df['Demand'].sum():,} units**")
300
+ st.info("""These products are **filtered out** from optimization due to:
301
+ β€’ Missing line assignments (for non-standalone masters)
302
+ β€’ Zero staffing requirements (both UNICEF and Humanizer staff = 0)
303
+ β€’ Non-standalone masters (excluded from production planning)""")
304
 
305
  if len(excluded_df) > 0:
306
  # Show exclusion breakdown
 
347
  recommendations.append(f"**Staffing Data**: {summary_stats['no_staffing']} products included in optimization are missing staffing requirements.")
348
 
349
  if summary_stats['no_speed'] > 0:
350
+ recommendations.append(f"**Speed Data**: {summary_stats['no_speed']} products included in optimization are missing production speed data. The optimization will use a default speed of 106.7 units/hour for these products.")
351
 
352
  if summary_stats['no_hierarchy'] > 0:
353
  recommendations.append(f"**Hierarchy Data**: {summary_stats['no_hierarchy']} products included in optimization are not in the kit hierarchy.")
 
358
 
359
  # Overall status
360
  if summary_stats['included_products'] > 0:
361
+ st.success(f"βœ… **Optimization Scope**: {summary_stats['included_products']} products with {summary_stats['included_demand']:,} units demand will be included in optimization!")
362
+ if summary_stats['no_speed'] == 0 and summary_stats['no_hierarchy'] == 0:
363
+ st.info("πŸŽ‰ All included products have complete data - optimization should run smoothly!")
364
  else:
365
+ st.error("❌ No products passed the filtering criteria. Please review the exclusion reasons above and check your data configuration.")
366
 
367
 
368
  if __name__ == "__main__":
src/models/optimizer_real.py CHANGED
@@ -14,12 +14,12 @@ from src.config.constants import ShiftType, LineType, KitLevel
14
  # ---- config import (ν”„λ‘œμ νŠΈ κ²½λ‘œμ— 맞좰 μ‘°μ •) ----
15
  from src.config.optimization_config import (
16
  DATE_SPAN, # [1..N]
17
- PRODUCT_LIST, # list of products (e.g., ['A','B',...])
18
  EMPLOYEE_TYPE_LIST, # e.g., ['UNICEF Fixed term','Humanizer']
19
  SHIFT_LIST, # e.g., [1,2,3]
20
  LINE_LIST, # e.g., [6,7] (line type ids)
21
  LINE_CNT_PER_TYPE, # {6: count_of_long_lines, 7: count_of_short_lines}
22
- DEMAND_DICTIONARY, # {product: total_units_over_period}
23
  COST_LIST_PER_EMP_SHIFT, # {emp_type: {shift: cost_per_hour}}
24
  MAX_EMPLOYEE_PER_TYPE_ON_DAY, # {emp_type: {t: headcount}}
25
  MAX_HOUR_PER_PERSON_PER_DAY, # e.g., 14
@@ -28,7 +28,7 @@ from src.config.optimization_config import (
28
  MAX_PARALLEL_WORKERS, # {6: max_workers, 7: max_workers}
29
  DAILY_WEEKLY_SCHEDULE, # 'daily' or 'weekly'
30
  FIXED_STAFF_CONSTRAINT_MODE, # not used in fixed-team model (λ™μ‹œ νˆ¬μž…μ΄λΌ 무의미)
31
- TEAM_REQ_PER_PRODUCT, # {emp_type: {product: team_size}} from Kits_Calculation.csv
32
  PAYMENT_MODE_CONFIG, # {shift: 'bulk'/'partial'} payment mode configuration
33
  KIT_LINE_MATCH_DICT,
34
  EVENING_SHIFT_MODE,
@@ -51,7 +51,7 @@ KIT_LINE_MATCH_DICT
51
  print("KIT_LINE_MATCH_DICT",KIT_LINE_MATCH_DICT)
52
 
53
  # 3) If specific product is not produced on specific date, set it to 0
54
- ACTIVE = {t: {p: 1 for p in PRODUCT_LIST} for t in DATE_SPAN}
55
  # Example: ACTIVE[2]['C'] = 0 # Disable product C on day 2
56
 
57
 
@@ -159,11 +159,29 @@ def sort_products_by_hierarchy(product_list):
159
  # Dependency ordering is now handled by topological sorting in sort_products_by_hierarchy()
160
 
161
  def solve_fixed_team_weekly():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  # --- Sets ---
163
  D = list(DATE_SPAN)
 
164
  S = sorted(list(SHIFT_LIST))
165
  E = list(EMPLOYEE_TYPE_LIST) # e.g., ['UNICEF Fixed term','Humanizer']
166
-
167
  # *** HIERARCHY SORTING: Sort products by production priority ***
168
  print("\n" + "="*60)
169
  print("πŸ”— APPLYING HIERARCHY-BASED PRODUCTION ORDERING")
@@ -180,7 +198,7 @@ def solve_fixed_team_weekly():
180
  N_day = MAX_EMPLOYEE_PER_TYPE_ON_DAY # {emp_type:{t:headcount}}
181
  cost = COST_LIST_PER_EMP_SHIFT # {emp_type:{shift:cost}}
182
  d_week = DEMAND_DICTIONARY # {product: demand over period}
183
-
184
  # --- Feasibility quick checks ---
185
 
186
  # 1) If team size is greater than max_workers_line, block the product-line type combination
 
14
  # ---- config import (ν”„λ‘œμ νŠΈ κ²½λ‘œμ— 맞좰 μ‘°μ •) ----
15
  from src.config.optimization_config import (
16
  DATE_SPAN, # [1..N]
17
+ get_product_list, # DYNAMIC: list of products (e.g., ['A','B',...])
18
  EMPLOYEE_TYPE_LIST, # e.g., ['UNICEF Fixed term','Humanizer']
19
  SHIFT_LIST, # e.g., [1,2,3]
20
  LINE_LIST, # e.g., [6,7] (line type ids)
21
  LINE_CNT_PER_TYPE, # {6: count_of_long_lines, 7: count_of_short_lines}
22
+ get_demand_dictionary, # DYNAMIC: {product: total_units_over_period}
23
  COST_LIST_PER_EMP_SHIFT, # {emp_type: {shift: cost_per_hour}}
24
  MAX_EMPLOYEE_PER_TYPE_ON_DAY, # {emp_type: {t: headcount}}
25
  MAX_HOUR_PER_PERSON_PER_DAY, # e.g., 14
 
28
  MAX_PARALLEL_WORKERS, # {6: max_workers, 7: max_workers}
29
  DAILY_WEEKLY_SCHEDULE, # 'daily' or 'weekly'
30
  FIXED_STAFF_CONSTRAINT_MODE, # not used in fixed-team model (λ™μ‹œ νˆ¬μž…μ΄λΌ 무의미)
31
+ get_team_requirements, # DYNAMIC: {emp_type: {product: team_size}} from Kits_Calculation.csv
32
  PAYMENT_MODE_CONFIG, # {shift: 'bulk'/'partial'} payment mode configuration
33
  KIT_LINE_MATCH_DICT,
34
  EVENING_SHIFT_MODE,
 
51
  print("KIT_LINE_MATCH_DICT",KIT_LINE_MATCH_DICT)
52
 
53
  # 3) If specific product is not produced on specific date, set it to 0
54
+ # ACTIVE will be built dynamically in solve function based on fresh PRODUCT_LIST
55
  # Example: ACTIVE[2]['C'] = 0 # Disable product C on day 2
56
 
57
 
 
159
  # Dependency ordering is now handled by topological sorting in sort_products_by_hierarchy()
160
 
161
  def solve_fixed_team_weekly():
162
+ # *** CRITICAL: Load fresh data to reflect current Streamlit configs ***
163
+ print("\n" + "="*60)
164
+ print("πŸ”„ LOADING FRESH DATA FOR OPTIMIZATION")
165
+ print("="*60)
166
+
167
+ # Get fresh product list and demand data
168
+ PRODUCT_LIST = get_product_list()
169
+ DEMAND_DICTIONARY = get_demand_dictionary()
170
+ TEAM_REQ_PER_PRODUCT = get_team_requirements(PRODUCT_LIST)
171
+
172
+ print(f"πŸ“¦ LOADED PRODUCTS: {len(PRODUCT_LIST)} products")
173
+ print(f"πŸ“ˆ LOADED DEMAND: {sum(DEMAND_DICTIONARY.values())} total units")
174
+ print(f"πŸ‘₯ LOADED TEAM REQUIREMENTS: {len(TEAM_REQ_PER_PRODUCT)} employee types")
175
+
176
+ # Build ACTIVE schedule for fresh product list
177
+ ACTIVE = {t: {p: 1 for p in PRODUCT_LIST} for t in DATE_SPAN}
178
+
179
  # --- Sets ---
180
  D = list(DATE_SPAN)
181
+ # print("D",D)
182
  S = sorted(list(SHIFT_LIST))
183
  E = list(EMPLOYEE_TYPE_LIST) # e.g., ['UNICEF Fixed term','Humanizer']
184
+ print("E",E)
185
  # *** HIERARCHY SORTING: Sort products by production priority ***
186
  print("\n" + "="*60)
187
  print("πŸ”— APPLYING HIERARCHY-BASED PRODUCTION ORDERING")
 
198
  N_day = MAX_EMPLOYEE_PER_TYPE_ON_DAY # {emp_type:{t:headcount}}
199
  cost = COST_LIST_PER_EMP_SHIFT # {emp_type:{shift:cost}}
200
  d_week = DEMAND_DICTIONARY # {product: demand over period}
201
+ print("d_week",d_week)
202
  # --- Feasibility quick checks ---
203
 
204
  # 1) If team size is greater than max_workers_line, block the product-line type combination