|
|
import pandas as pd |
|
|
import numpy as np |
|
|
import matplotlib.pyplot as plt |
|
|
import io |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_slotting_analysis(message, slotting_df): |
|
|
reasoning_steps = [] |
|
|
|
|
|
df = slotting_df.copy() |
|
|
|
|
|
velocity_map = { |
|
|
"fast": 3, |
|
|
"medium": 2, |
|
|
"slow": 1 |
|
|
} |
|
|
|
|
|
df["VelocityNorm"] = df["Velocity"].str.lower().map(velocity_map) |
|
|
reasoning_steps.append("Mapped velocity categories to numerical weights.") |
|
|
|
|
|
df["FreqNorm"] = (df["Frequency"] - df["Frequency"].min()) / ( |
|
|
df["Frequency"].max() - df["Frequency"].min() + 1e-8 |
|
|
) |
|
|
reasoning_steps.append("Normalized frequency to 0β1 scale.") |
|
|
|
|
|
df["Score"] = (0.6 * df["VelocityNorm"]) + (0.4 * df["FreqNorm"]) |
|
|
reasoning_steps.append("Computed weighted slotting score.") |
|
|
|
|
|
df = df.sort_values("Score", ascending=False).reset_index(drop=True) |
|
|
|
|
|
df["Aisle"] = np.arange(1, len(df) + 1) |
|
|
df["Rack"] = np.linspace(1, 20, len(df)).astype(int) |
|
|
reasoning_steps.append("Assigned optimal aisle & rack positions.") |
|
|
|
|
|
explanation = ( |
|
|
"### π¦ Slotting Optimization\n" |
|
|
"High-velocity & high-frequency SKUs placed in prime aisles.\n\n" |
|
|
"#### π Reasoning\n" + "\n".join([f"- {r}" for r in reasoning_steps]) |
|
|
) |
|
|
|
|
|
return explanation, df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_picking_optimization(message, picking_df): |
|
|
reasoning_steps = [] |
|
|
|
|
|
df = picking_df.copy() |
|
|
|
|
|
df["x"] = df["Aisle"] |
|
|
df["y"] = df["Rack"] |
|
|
reasoning_steps.append("Converted AisleβRack to coordinate grid.") |
|
|
|
|
|
df["Distance"] = df["x"].abs() + df["y"].abs() |
|
|
df = df.sort_values("Distance").reset_index(drop=True) |
|
|
reasoning_steps.append("Calculated Manhattan distance & sorted sequence.") |
|
|
|
|
|
|
|
|
plt.figure(figsize=(6, 6)) |
|
|
plt.plot(df["x"], df["y"], marker="o", linestyle="-") |
|
|
plt.title("Optimized Picking Route") |
|
|
plt.xlabel("Aisle") |
|
|
plt.ylabel("Rack") |
|
|
|
|
|
buffer = io.BytesIO() |
|
|
plt.savefig(buffer, format="png") |
|
|
plt.close() |
|
|
buffer.seek(0) |
|
|
|
|
|
|
|
|
image = Image.open(buffer) |
|
|
|
|
|
reasoning_steps.append("Generated walking route visualization.") |
|
|
|
|
|
explanation = ( |
|
|
"### π Picking Route Optimization\n" |
|
|
"Manhattan-distance-based walk path generated.\n\n" |
|
|
"#### π Reasoning\n" + "\n".join([f"- {r}" for r in reasoning_steps]) |
|
|
) |
|
|
|
|
|
return explanation, image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_demand_forecast(message, slotting_df): |
|
|
reasoning_steps = [] |
|
|
df = slotting_df.copy() |
|
|
|
|
|
if "Frequency" not in df.columns: |
|
|
return "Frequency missing β cannot forecast.", None, None |
|
|
|
|
|
demand = df["Frequency"].astype(float) |
|
|
reasoning_steps.append("Used SKU picking frequency as demand signal.") |
|
|
|
|
|
moving_avg = demand.mean() |
|
|
reasoning_steps.append(f"Computed moving average: {moving_avg:.2f}") |
|
|
|
|
|
weights = np.linspace(0.1, 1.0, len(demand)) |
|
|
trend = np.sum(demand * weights) / np.sum(weights) |
|
|
reasoning_steps.append(f"Weighted trend adjustment: {trend:.2f}") |
|
|
|
|
|
forecast_value = (moving_avg * 0.6) + (trend * 0.4) |
|
|
|
|
|
next_7_days = [forecast_value * (1 + 0.05 * i) for i in range(7)] |
|
|
forecast_df = pd.DataFrame({ |
|
|
"Day": [f"Day {i+1}" for i in range(7)], |
|
|
"Forecasted_Demand": next_7_days |
|
|
}) |
|
|
|
|
|
reasoning_steps.append("Generated 7-day demand projection.") |
|
|
|
|
|
|
|
|
plt.figure(figsize=(6, 4)) |
|
|
plt.plot(forecast_df["Day"], forecast_df["Forecasted_Demand"], marker="o") |
|
|
plt.title("7-Day Demand Forecast") |
|
|
plt.xlabel("Day") |
|
|
plt.ylabel("Forecasted Demand") |
|
|
|
|
|
buffer = io.BytesIO() |
|
|
plt.savefig(buffer, format="png") |
|
|
plt.close() |
|
|
buffer.seek(0) |
|
|
|
|
|
|
|
|
image = Image.open(buffer) |
|
|
|
|
|
explanation = ( |
|
|
"### π Demand Forecasting\n" |
|
|
"Trend-weighted moving average model applied.\n\n" |
|
|
"#### π Reasoning\n" + "\n".join([f"- {r}" for r in reasoning_steps]) |
|
|
) |
|
|
|
|
|
return explanation, image, forecast_df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_replenishment_analysis(message, slotting_df): |
|
|
reasoning_steps = [] |
|
|
df = slotting_df.copy() |
|
|
|
|
|
if "Frequency" not in df.columns: |
|
|
return "Frequency missing β cannot run replenishment.", None |
|
|
|
|
|
BIN_CAPACITY = 200 |
|
|
df["CurrentStock"] = BIN_CAPACITY * 0.5 |
|
|
reasoning_steps.append("Assumed current stock = 50% bin capacity.") |
|
|
|
|
|
df["SafetyStock"] = df["Frequency"] * 3 |
|
|
reasoning_steps.append("Safety stock = 3 days of demand.") |
|
|
|
|
|
df["DaysUntilStockout"] = df["CurrentStock"] / df["Frequency"].replace(0, 0.1) |
|
|
reasoning_steps.append("Estimated days until stock-out.") |
|
|
|
|
|
df["ReplenishmentQty"] = (BIN_CAPACITY - df["CurrentStock"]).clip(lower=0) |
|
|
reasoning_steps.append("Calculated replenishment quantity needed.") |
|
|
|
|
|
df["Risk"] = df["DaysUntilStockout"].apply( |
|
|
lambda x: "π΄ HIGH" if x < 3 else ("π‘ MEDIUM" if x < 7 else "π’ LOW") |
|
|
) |
|
|
reasoning_steps.append("Assigned risk level based on depletion rate.") |
|
|
|
|
|
explanation = ( |
|
|
"### π Replenishment Analysis\n" |
|
|
"Replenishment needs evaluated using bin capacity, demand & stock-out timing.\n\n" |
|
|
"#### π Reasoning\n" + "\n".join([f"- {r}" for r in reasoning_steps]) |
|
|
) |
|
|
|
|
|
return explanation, df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_rebalancing_analysis(message, slotting_df): |
|
|
reasoning = [] |
|
|
df = slotting_df.copy() |
|
|
|
|
|
if "Frequency" not in df.columns: |
|
|
return "Frequency missing β cannot rebalance.", None |
|
|
|
|
|
if "Aisle" not in df.columns: |
|
|
df["Aisle"] = np.arange(1, len(df) + 1) |
|
|
reasoning.append("Aisle data missing β assigned aisles automatically.") |
|
|
|
|
|
velocity_map = {"fast": 3, "medium": 2, "slow": 1} |
|
|
df["VelScore"] = df["Velocity"].str.lower().map(velocity_map) |
|
|
df["LoadScore"] = df["VelScore"] * 0.6 + df["Frequency"] * 0.4 |
|
|
reasoning.append("Calculated SKU load score (velocity + frequency).") |
|
|
|
|
|
aisle_load = df.groupby("Aisle")["LoadScore"].sum().reset_index() |
|
|
avg_load = aisle_load["LoadScore"].mean() |
|
|
|
|
|
aisle_load["Congestion"] = aisle_load["LoadScore"].apply( |
|
|
lambda x: "π΄ High" if x > avg_load * 1.25 |
|
|
else ("π‘ Medium" if x > avg_load * 0.75 else "π’ Low") |
|
|
) |
|
|
|
|
|
df = df.merge(aisle_load[["Aisle", "Congestion"]], on="Aisle", how="left") |
|
|
reasoning.append("Assigned congestion levels to aisles.") |
|
|
|
|
|
high_aisles = aisle_load[aisle_load["Congestion"] == "π΄ High"]["Aisle"].tolist() |
|
|
low_aisles = aisle_load[aisle_load["Congestion"] == "π’ Low"]["Aisle"].tolist() |
|
|
|
|
|
move_plan = [] |
|
|
|
|
|
if high_aisles and low_aisles: |
|
|
for aisle in high_aisles: |
|
|
congested_skus = df[df["Aisle"] == aisle].sort_values("LoadScore", ascending=False) |
|
|
top_to_move = congested_skus.head(2) |
|
|
|
|
|
for i, row in top_to_move.iterrows(): |
|
|
target_aisle = low_aisles[i % len(low_aisles)] |
|
|
move_plan.append({ |
|
|
"SKU": row["SKU"], |
|
|
"FromAisle": row["Aisle"], |
|
|
"ToAisle": target_aisle, |
|
|
"LoadScore": round(row["LoadScore"], 2), |
|
|
"Reason": "Reduce congestion" |
|
|
}) |
|
|
|
|
|
reasoning.append("Generated SKU redistribution plan.") |
|
|
else: |
|
|
reasoning.append("No congestion found β no rebalancing needed.") |
|
|
|
|
|
move_df = pd.DataFrame(move_plan) |
|
|
|
|
|
explanation = ( |
|
|
"### π Inventory Rebalancing\n" |
|
|
"SKU redistribution plan to reduce aisle congestion.\n\n" |
|
|
"#### π Reasoning\n" + "\n".join([f"- {r}" for r in reasoning]) |
|
|
) |
|
|
|
|
|
return explanation, move_df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_workforce_optimization(message, slotting_df): |
|
|
reasoning = [] |
|
|
df = slotting_df.copy() |
|
|
|
|
|
if "Frequency" not in df.columns: |
|
|
return "Cannot calculate workforce β missing Frequency column.", None |
|
|
|
|
|
df["Workload"] = df["Frequency"] * 1.2 |
|
|
total_workload = df["Workload"].sum() |
|
|
workers_needed = max(1, int(total_workload // 150)) |
|
|
|
|
|
reasoning.append(f"Total workload: {total_workload:.2f}") |
|
|
reasoning.append(f"Workers required (estimated): {workers_needed}") |
|
|
|
|
|
result = pd.DataFrame({ |
|
|
"Metric": ["Total Workload", "Estimated Workers Needed"], |
|
|
"Value": [total_workload, workers_needed] |
|
|
}) |
|
|
|
|
|
explanation = ( |
|
|
"### π· Workforce Optimization\n" |
|
|
"Estimated staffing requirement based on SKU workload.\n\n" |
|
|
"#### π Reasoning\n" + "\n".join([f"- {r}" for r in reasoning]) |
|
|
) |
|
|
|
|
|
return explanation, result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_dock_scheduling(message, slotting_df): |
|
|
reasoning = [] |
|
|
df = slotting_df.copy() |
|
|
|
|
|
df["Priority"] = df["Frequency"].rank(ascending=False) |
|
|
df["AssignedDock"] = df["Priority"].apply(lambda x: int((x - 1) % 3) + 1) |
|
|
|
|
|
reasoning.append("Assigned SKUs to 3 docks based on priority rank.") |
|
|
|
|
|
explanation = ( |
|
|
"### π Dock Scheduling Optimization\n" |
|
|
"SKUs allocated to dock doors based on priority.\n\n" |
|
|
"#### π Reasoning\n" + "\n".join([f"- {r}" for r in reasoning]) |
|
|
) |
|
|
|
|
|
return explanation, df[["SKU", "Frequency", "Priority", "AssignedDock"]] |
|
|
|