| |
| """ |
| Beer Game — Full Streamlit app for Hugging Face Spaces |
| - Classic parameters (transport delay 2 weeks, order delay 1 week) |
| - Human = Distributor (must Submit Order, then press Next Week) |
| - LLM agents (Retailer, Wholesaler, Factory) using OpenAI gpt-4o-mini |
| - Info sharing toggle + configurable demand history length |
| - Per-participant sessions (participant_id via URL query param or input) |
| - Detailed logging (orders, shipments, inventory, backlog, timestamps, raw LLM outputs) |
| - Automatic upload of per-participant CSV logs to Hugging Face Datasets Hub |
| """ |
|
|
| import os |
| import re |
| import time |
| import uuid |
| import random |
| import json |
| from datetime import datetime |
| from pathlib import Path |
|
|
| import streamlit as st |
| import pandas as pd |
| import openai |
| from huggingface_hub import upload_file, HfApi |
|
|
| |
| |
| |
| |
| DEFAULT_WEEKS = 36 |
| TRANSPORT_DELAY = 2 |
| ORDER_DELAY = 1 |
| INITIAL_INVENTORY = 12 |
| INITIAL_BACKLOG = 0 |
|
|
| |
| OPENAI_MODEL = "gpt-4o-mini" |
|
|
| |
| LOCAL_LOG_DIR = Path("logs") |
| LOCAL_LOG_DIR.mkdir(exist_ok=True) |
|
|
| |
| |
| |
| def now_iso(): |
| return datetime.utcnow().isoformat(timespec="milliseconds") + "Z" |
|
|
| def fmt(o): |
| try: |
| return json.dumps(o, ensure_ascii=False) |
| except Exception: |
| return str(o) |
|
|
| |
| |
| |
| HF_TOKEN = os.getenv("HF_TOKEN") |
| HF_REPO_ID = os.getenv("HF_REPO_ID") |
| hf_api = HfApi() |
|
|
| def upload_log_to_hf(local_path: Path, participant_id: str): |
| """ |
| Upload a local CSV file to HF dataset repo under path logs/<participant_id>/... |
| Requires HF_TOKEN and HF_REPO_ID set as environment variables (Space secrets). |
| """ |
| if not HF_TOKEN or not HF_REPO_ID: |
| st.info("HF_TOKEN or HF_REPO_ID not configured; skipping upload to HF Hub.") |
| return None |
|
|
| dest_path_in_repo = f"logs/{participant_id}/{local_path.name}" |
| try: |
| upload_file( |
| path_or_fileobj=str(local_path), |
| path_in_repo=dest_path_in_repo, |
| repo_id=HF_REPO_ID, |
| repo_type="dataset", |
| token=HF_TOKEN |
| ) |
| st.success(f"Uploaded logs to Hugging Face: {HF_REPO_ID}/{dest_path_in_repo}") |
| return f"https://huggingface.co/datasets/{HF_REPO_ID}/resolve/main/{dest_path_in_repo}" |
| except Exception as e: |
| st.error(f"Failed to upload logs to HF Hub: {e}") |
| return None |
|
|
| |
| |
| |
| openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
| def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool, demand_history: list, max_tokens=40, temperature=0.7): |
| """ |
| Call OpenAI to decide an integer order for `role`. |
| Returns (order_int, raw_text) |
| """ |
| |
| visible_history = demand_history if info_sharing_visible else [] |
|
|
| prompt = ( |
| f"You are the {role} in a 4-player Beer Game (Retailer -> Wholesaler -> Distributor -> Factory).\n" |
| f"Current week: {local_state['week']}\n" |
| f"Local state for {role}:\n" |
| f"- Inventory: {local_state['inventory'][role]}\n" |
| f"- Backlog: {local_state['backlog'][role]}\n" |
| f"- Incoming shipment next week (front of pipeline): {local_state['pipeline'][role][0] if local_state['pipeline'][role] else 0}\n" |
| f"- Incoming order this week: {local_state['incoming_orders'].get(role, 0)}\n" |
| ) |
| if visible_history: |
| prompt += f"- Customer demand history (visible to you): {visible_history}\n" |
| prompt += ( |
| "\nDecide a non-negative integer order quantity to place to your upstream supplier this week.\n" |
| "Reply with a single integer only. You may optionally append a short one-sentence reason after a dash." |
| ) |
|
|
| try: |
| resp = openai.ChatCompletion.create( |
| model=OPENAI_MODEL, |
| messages=[ |
| {"role": "system", "content": "You are an automated Beer Game agent who decides weekly orders."}, |
| {"role": "user", "content": prompt} |
| ], |
| max_tokens=max_tokens, |
| temperature=temperature, |
| n=1 |
| ) |
| raw = resp.choices[0].message.get("content", "").strip() |
| except Exception as e: |
| raw = f"OPENAI_ERROR: {str(e)}" |
| |
|
|
| |
| m = re.search(r"(-?\d+)", raw or "") |
| order = None |
| if m: |
| try: |
| order = int(m.group(1)) |
| if order < 0: |
| order = 0 |
| except: |
| order = None |
|
|
| |
| if order is None: |
| |
| incoming = local_state['incoming_orders'].get(role, 0) or 0 |
| target = INITIAL_INVENTORY + incoming |
| order = max(0, target - (local_state['inventory'].get(role, 0) or 0)) |
| raw = (raw + " | PARSE_FALLBACK").strip() |
|
|
| return int(order), raw |
|
|
| |
| |
| |
| def make_classic_demand(weeks: int): |
| """ |
| Typical demand: first 4 weeks stable (4), then shock (8) for many weeks, then maybe fluctuations. |
| We'll implement: weeks 0-3 => 4; weeks 4..(weeks-1) => 8 |
| You can adjust as needed. |
| """ |
| demand = [] |
| for t in range(weeks): |
| if t < 4: |
| demand.append(4) |
| else: |
| demand.append(8) |
| return demand |
|
|
| def init_game(weeks=DEFAULT_WEEKS): |
| """ |
| Return a dict representing full game state for a single participant/session. |
| """ |
| roles = ["retailer", "wholesaler", "distributor", "factory"] |
| state = { |
| "participant_id": None, |
| "week": 1, |
| "weeks_total": weeks, |
| "roles": roles, |
| "inventory": {r: INITIAL_INVENTORY for r in roles}, |
| "backlog": {r: INITIAL_BACKLOG for r in roles}, |
| |
| |
| "pipeline": {r: [0] * TRANSPORT_DELAY for r in roles}, |
| "incoming_orders": {r: 0 for r in roles}, |
| "orders_history": {r: [] for r in roles}, |
| "shipments_history": {r: [] for r in roles}, |
| "logs": [], |
| "info_sharing": False, |
| "info_history_weeks": 0, |
| "customer_demand": make_classic_demand(weeks), |
| } |
| return state |
|
|
| def step_game(state: dict, distributor_order: int): |
| """ |
| Apply one week's dynamics. |
| Order of events (typical simplification): |
| 1. Customer demand hits retailer this week. |
| 2. Deliveries that are at pipeline[front] arrive to each role this week. |
| 3. Roles fulfill incoming orders from downstream (if backlog arises). |
| 4. Human (distributor) order is recorded; LLMs decide orders for their roles. |
| 5. Place orders into upstream's pipeline so they will arrive after TRANSPORT_DELAY. |
| 6. Log everything. |
| """ |
| week = state["week"] |
| roles = state["roles"] |
|
|
| |
| demand = state["customer_demand"][week - 1] |
| state["incoming_orders"]["retailer"] = demand |
|
|
| |
| arriving = {} |
| for r in roles: |
| |
| arr = 0 |
| if len(state["pipeline"][r]) > 0: |
| arr = state["pipeline"][r].pop(0) |
| state["inventory"][r] += arr |
| arriving[r] = arr |
|
|
| |
| |
| |
| shipments_out = {} |
| for r in roles: |
| incoming = state["incoming_orders"].get(r, 0) or 0 |
| inv = state["inventory"].get(r, 0) or 0 |
| shipped = min(inv, incoming) |
| state["inventory"][r] -= shipped |
| |
| unfilled = incoming - shipped |
| if unfilled > 0: |
| state["backlog"][r] += unfilled |
| shipments_out[r] = shipped |
| state["shipments_history"][r].append(shipped) |
|
|
| |
| |
| |
| state["orders_history"]["distributor"].append(int(distributor_order)) |
| |
| state["incoming_orders"]["wholesaler"] = int(distributor_order) |
|
|
| |
| demand_history_visible = [] |
| if state["info_sharing"] and state["info_history_weeks"] > 0: |
| start_idx = max(0, (week - 1) - state["info_history_weeks"]) |
| demand_history_visible = state["customer_demand"][start_idx: (week - 1)] |
|
|
| llm_outputs = {} |
| for role in ["retailer", "wholesaler", "factory"]: |
| order_val, raw = call_llm_for_order(role, state_snapshot_for_prompt(state), state["info_sharing"], demand_history_visible) |
| order_val = max(0, int(order_val)) |
| state["orders_history"][role].append(order_val) |
| llm_outputs[role] = {"order": order_val, "raw": raw} |
| |
| |
| if role == "retailer": |
| state["incoming_orders"]["distributor"] = order_val |
| elif role == "wholesaler": |
| state["incoming_orders"]["factory"] = order_val |
| |
|
|
| |
| |
| |
| for role in roles: |
| |
| if role == "distributor": |
| placed_order = int(distributor_order) |
| else: |
| |
| placed_order = state["orders_history"][role][-1] if state["orders_history"][role] else 0 |
|
|
| |
| |
| |
| |
| downstream_map = { |
| "factory": "wholesaler", |
| "wholesaler": "distributor", |
| "distributor": "retailer", |
| "retailer": None |
| } |
| downstream = downstream_map.get(role) |
| if downstream: |
| |
| |
| state["pipeline"][downstream].append(placed_order) |
|
|
| |
| log_entry = { |
| "timestamp": now_iso(), |
| "week": week, |
| "demand": demand, |
| "arriving": arriving, |
| "shipments_out": shipments_out, |
| "orders_submitted": { |
| "distributor": int(distributor_order), |
| "retailer": state["orders_history"]["retailer"][-1] if state["orders_history"]["retailer"] else None, |
| "wholesaler": state["orders_history"]["wholesaler"][-1] if state["orders_history"]["wholesaler"] else None, |
| "factory": state["orders_history"]["factory"][-1] if state["orders_history"]["factory"] else None, |
| }, |
| "inventory": dict(state["inventory"]), |
| "backlog": dict(state["backlog"]), |
| "info_sharing": state["info_sharing"], |
| "info_history_weeks": state["info_history_weeks"], |
| "llm_raw": {k: v["raw"] for k, v in llm_outputs.items()} |
| } |
| state["logs"].append(log_entry) |
|
|
| |
| state["week"] += 1 |
|
|
| return state |
|
|
| def state_snapshot_for_prompt(state): |
| """ |
| Prepare a compact snapshot of state for LLM prompt (avoid sending huge objects). |
| We'll include week, inventory and backlog for each role and incoming_orders for this week. |
| """ |
| snap = { |
| "week": state["week"], |
| "inventory": state["inventory"].copy(), |
| "backlog": state["backlog"].copy(), |
| "incoming_orders": state["incoming_orders"].copy(), |
| |
| "incoming_shipments_next_week": {r: (state["pipeline"][r][0] if state["pipeline"][r] else 0) for r in state["roles"]} |
| } |
| return snap |
|
|
| |
| |
| |
| def save_logs_local(state, participant_id): |
| df = pd.json_normalize(state["logs"]) |
| fname = LOCAL_LOG_DIR / f"logs_{participant_id}_{int(time.time())}.csv" |
| df.to_csv(fname, index=False) |
| return fname |
|
|
| def save_and_upload(state, participant_id): |
| local_path = save_logs_local(state, participant_id) |
| url = upload_log_to_hf(local_path, participant_id) |
| return local_path, url |
|
|
| |
| |
| |
| st.set_page_config(page_title="Beer Game — Distributor (Human) + LLM Agents", layout="wide") |
| st.title("🍺 Beer Game — Human Distributor vs LLM agents") |
|
|
| |
| qp = st.query_params |
| pid_from_q = qp.get("participant_id", [None])[0] if qp else None |
|
|
| pid_input = st.text_input("Participant ID (leave blank to auto-generate or use ?participant_id=ID in URL)", value=pid_from_q or "") |
| if pid_input: |
| participant_id = pid_input.strip() |
| else: |
| if "auto_pid" not in st.session_state: |
| st.session_state["auto_pid"] = str(uuid.uuid4())[:8] |
| participant_id = st.session_state["auto_pid"] |
|
|
| st.sidebar.markdown(f"**Participant ID:** `{participant_id}`") |
|
|
| |
| if "sessions" not in st.session_state: |
| st.session_state["sessions"] = {} |
|
|
| if participant_id not in st.session_state["sessions"]: |
| st.session_state["sessions"][participant_id] = init_game(DEFAULT_WEEKS) |
| st.session_state["sessions"][participant_id]["participant_id"] = participant_id |
|
|
| state = st.session_state["sessions"][participant_id] |
|
|
| |
| st.sidebar.header("Experiment controls") |
| state["info_sharing"] = st.sidebar.checkbox("Enable Information Sharing (show customer demand to all roles)", value=state.get("info_sharing", False)) |
| state["info_history_weeks"] = st.sidebar.slider("How many past weeks of demand to share (0 = none)", 0, 8, value=state.get("info_history_weeks", 0)) |
| st.sidebar.markdown("---") |
| st.sidebar.write("Model for LLM agents:") |
| st.sidebar.write(OPENAI_MODEL) |
| st.sidebar.markdown("---") |
| st.sidebar.write("HF upload settings:") |
| st.sidebar.write(f"- HF_REPO_ID: {HF_REPO_ID or 'NOT SET'}") |
| st.sidebar.write(f"- HF_TOKEN: {'SET' if HF_TOKEN else 'NOT SET'}") |
|
|
| |
| col_main, col_sidebar = st.columns([3, 1]) |
|
|
| with col_main: |
| st.header(f"Week {state['week']} / {state['weeks_total']}") |
| |
| demand_display = state["customer_demand"][state["week"] - 1] if state["week"] - 1 < len(state["customer_demand"]) else None |
| st.subheader(f"Customer demand (retailer receives this week): {demand_display}") |
|
|
| |
| roles = state["roles"] |
| panels = st.columns(len(roles)) |
| for i, role in enumerate(roles): |
| with panels[i]: |
| st.markdown(f"### {role.title()}") |
| st.metric("Inventory", state["inventory"][role]) |
| st.metric("Backlog", state["backlog"][role]) |
| incoming = state["incoming_orders"].get(role, 0) |
| st.write(f"Incoming order (this week): **{incoming}**") |
| next_shipment = state["pipeline"][role][0] if state["pipeline"][role] else 0 |
| st.write(f"Incoming shipment next week: **{next_shipment}**") |
|
|
| st.markdown("---") |
| |
| with st.form(key=f"order_form_{participant_id}", clear_on_submit=False): |
| st.write("### Your (Distributor) decision this week") |
| default_val = state["incoming_orders"].get("distributor", 4) or 4 |
| distributor_order = st.number_input("Order to place to upstream (Wholesaler):", min_value=0, step=1, value=default_val) |
| submitted = st.form_submit_button("Submit Order (locks your decision)") |
|
|
| if submitted: |
| |
| st.session_state.setdefault("pending_orders", {}) |
| st.session_state["pending_orders"][participant_id] = int(distributor_order) |
| st.success(f"Order submitted: {distributor_order}. Now click 'Next Week' to process the week.") |
|
|
| st.markdown("---") |
| |
| pending = st.session_state.get("pending_orders", {}).get(participant_id, None) |
| if pending is None: |
| st.info("Please submit your order first to enable Next Week processing.") |
| else: |
| if st.button("Next Week — process week and invoke LLM agents"): |
| |
| try: |
| state = step_game(state, pending) |
| |
| st.session_state["sessions"][participant_id] = state |
| |
| local_path = save_logs_local_and_return(state, participant_id) if 'save_logs_local_and_return' in globals() else None |
| |
| local_file = save_logs_local(state, participant_id) |
| uploaded_url = None |
| if HF_TOKEN and HF_REPO_ID: |
| uploaded_url = upload_log_to_hf(local_file, participant_id) |
| |
| del st.session_state["pending_orders"][participant_id] |
| st.success(f"Week processed. Advanced to week {state['week']}.") |
| if uploaded_url: |
| st.info(f"Logs uploaded to HF: {uploaded_url}") |
| except Exception as e: |
| st.error(f"Error during Next Week processing: {e}") |
|
|
| st.markdown("### Recent logs") |
| if state["logs"]: |
| |
| df = pd.json_normalize(state["logs"][-6:]) |
| st.dataframe(df, use_container_width=True) |
| else: |
| st.write("No logs yet. Submit your first order and press Next Week.") |
|
|
| with col_sidebar: |
| st.subheader("Information Sharing (preview)") |
| st.write("Toggle on to share real customer demand (current + recent weeks) with all LLM agents.") |
| st.write(f"Sharing {state['info_history_weeks']} weeks of history (0 = only current week).") |
| if state["info_sharing"]: |
| |
| h = state["info_history_weeks"] |
| start = max(0, (state["week"] - 1) - h) |
| hist = state["customer_demand"][start: state["week"]] |
| st.write("Demand visible to agents:", hist) |
|
|
| st.markdown("---") |
| st.subheader("Admin / Debug") |
| if st.button("Test LLM connection"): |
| if not openai.api_key: |
| st.error("OpenAI API key is missing. Set OPENAI_API_KEY in Space Secrets.") |
| else: |
| |
| try: |
| test_prompt = "You are a helpful agent. Reply with '42'." |
| resp = openai.ChatCompletion.create( |
| model=OPENAI_MODEL, |
| messages=[{"role":"user","content":test_prompt}], |
| max_tokens=10 |
| ) |
| st.write("LLM raw:", resp.choices[0].message.get("content")) |
| except Exception as e: |
| st.error(f"LLM test failed: {e}") |
|
|
| st.markdown("---") |
| if st.button("Save logs now (manual)"): |
| if not state["logs"]: |
| st.info("No logs to save yet.") |
| else: |
| local_file = save_logs_local(state, participant_id) |
| if HF_TOKEN and HF_REPO_ID: |
| url = upload_log_to_hf(local_file, participant_id) |
| if url: |
| st.success("Logs uploaded.") |
| else: |
| st.success(f"Saved local file: {local_file}") |
|
|
| |
| |
| |
| def save_logs_local(state: dict, participant_id: str): |
| """ |
| Save logs to local logs directory and return Path. |
| """ |
| df = pd.json_normalize(state["logs"]) |
| fname = LOCAL_LOG_DIR / f"logs_{participant_id}_{int(time.time())}.csv" |
| df.to_csv(fname, index=False) |
| return fname |
|
|
| |
| def save_logs_local_and_return(state: dict, participant_id: str): |
| return save_logs_local(state, participant_id) |
|
|
| |
| |
| |
| |
| if state["week"] > state["weeks_total"]: |
| st.success("Game completed for this participant.") |
| |
| final_csv = save_logs_local(state, participant_id) |
| with open(final_csv, "rb") as f: |
| st.download_button("Download final logs CSV", data=f, file_name=final_csv.name, mime="text/csv") |
| if HF_TOKEN and HF_REPO_ID: |
| url = upload_log_to_hf(final_csv, participant_id) |
| if url: |
| st.write(f"Final logs uploaded to HF Hub: {url}") |
|
|