Lilli98 commited on
Commit
52f95bf
·
verified ·
1 Parent(s): 7250881

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +227 -278
app.py CHANGED
@@ -1,13 +1,10 @@
1
  # app.py
2
  """
3
- Beer Game — Full Streamlit app for Hugging Face Spaces
4
- - Classic parameters (transport delay 2 weeks, order delay 1 week)
5
- - Human = Distributor (must Submit Order, then press Next Week)
6
- - LLM agents (Retailer, Wholesaler, Factory) using OpenAI gpt-4o-mini
7
- - Info sharing toggle + configurable demand history length
8
- - Per-participant sessions (participant_id via URL query param or input)
9
- - Detailed logging (orders, shipments, inventory, backlog, timestamps, raw LLM outputs)
10
- - Automatic upload of per-participant CSV logs to Hugging Face Datasets Hub
11
  """
12
 
13
  import os
@@ -16,109 +13,143 @@ import time
16
  import uuid
17
  import random
18
  import json
 
19
  from datetime import datetime
20
  from pathlib import Path
21
 
22
  import streamlit as st
23
  import pandas as pd
24
- import openai
25
  from huggingface_hub import upload_file, HfApi
26
 
27
  # ---------------------------
28
- # CONFIGURABLE PARAMETERS
29
  # ---------------------------
30
- # Classic Beer Game choices: choose 24 or 36 depending on experiment design
31
- DEFAULT_WEEKS = 36
32
- TRANSPORT_DELAY = 2 # shipments take 2 weeks to arrive
33
- ORDER_DELAY = 1 # orders incur 1-week processing delay (modeled via pipeline)
34
  INITIAL_INVENTORY = 12
35
  INITIAL_BACKLOG = 0
36
 
37
- # OpenAI model to use for agents
38
- OPENAI_MODEL = "gpt-4o-mini"
39
 
40
- # Local folder to hold temporary log files before upload
41
  LOCAL_LOG_DIR = Path("logs")
42
  LOCAL_LOG_DIR.mkdir(exist_ok=True)
43
 
 
 
 
 
 
 
 
 
44
  # ---------------------------
45
- # Helper functions
46
  # ---------------------------
47
  def now_iso():
48
  return datetime.utcnow().isoformat(timespec="milliseconds") + "Z"
49
 
 
 
 
 
 
 
 
 
 
 
50
  def fmt(o):
51
  try:
52
  return json.dumps(o, ensure_ascii=False)
53
- except Exception:
54
  return str(o)
55
 
56
  # ---------------------------
57
- # Hugging Face upload helper
58
  # ---------------------------
59
- HF_TOKEN = os.getenv("HF_TOKEN")
60
- HF_REPO_ID = os.getenv("HF_REPO_ID") # e.g., "XinyuLi/beer-game-logs"
61
- hf_api = HfApi()
62
-
63
- def upload_log_to_hf(local_path: Path, participant_id: str):
64
  """
65
- Upload a local CSV file to HF dataset repo under path logs/<participant_id>/...
66
- Requires HF_TOKEN and HF_REPO_ID set as environment variables (Space secrets).
67
  """
68
- if not HF_TOKEN or not HF_REPO_ID:
69
- st.info("HF_TOKEN or HF_REPO_ID not configured; skipping upload to HF Hub.")
70
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- dest_path_in_repo = f"logs/{participant_id}/{local_path.name}"
73
- try:
74
- upload_file(
75
- path_or_fileobj=str(local_path),
76
- path_in_repo=dest_path_in_repo,
77
- repo_id=HF_REPO_ID,
78
- repo_type="dataset",
79
- token=HF_TOKEN
80
- )
81
- st.success(f"Uploaded logs to Hugging Face: {HF_REPO_ID}/{dest_path_in_repo}")
82
- return f"https://huggingface.co/datasets/{HF_REPO_ID}/resolve/main/{dest_path_in_repo}"
83
- except Exception as e:
84
- st.error(f"Failed to upload logs to HF Hub: {e}")
85
- return None
86
 
87
  # ---------------------------
88
- # OpenAI helper
89
  # ---------------------------
90
- openai.api_key = os.getenv("OPENAI_API_KEY")
91
-
92
  def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool, demand_history: list, max_tokens=40, temperature=0.7):
93
  """
94
- Call OpenAI to decide an integer order for `role`.
95
  Returns (order_int, raw_text)
96
  """
97
- # Compose a careful prompt giving only local info unless info_sharing_visible is True
 
 
 
 
 
98
  visible_history = demand_history if info_sharing_visible else []
99
 
 
100
  prompt = (
101
- f"You are the {role} in a 4-player Beer Game (Retailer -> Wholesaler -> Distributor -> Factory).\n"
102
- f"Current week: {local_state['week']}\n"
103
- f"Local state for {role}:\n"
104
- f"- Inventory: {local_state['inventory'][role]}\n"
105
- f"- Backlog: {local_state['backlog'][role]}\n"
106
- f"- Incoming shipment next week (front of pipeline): {local_state['pipeline'][role][0] if local_state['pipeline'][role] else 0}\n"
107
- f"- Incoming order this week: {local_state['incoming_orders'].get(role, 0)}\n"
108
  )
109
  if visible_history:
110
- prompt += f"- Customer demand history (visible to you): {visible_history}\n"
111
- prompt += (
112
- "\nDecide a non-negative integer order quantity to place to your upstream supplier this week.\n"
113
- "Reply with a single integer only. You may optionally append a short one-sentence reason after a dash."
114
- )
115
 
116
  try:
117
  resp = openai.ChatCompletion.create(
118
  model=OPENAI_MODEL,
119
  messages=[
120
- {"role": "system", "content": "You are an automated Beer Game agent who decides weekly orders."},
121
- {"role": "user", "content": prompt}
122
  ],
123
  max_tokens=max_tokens,
124
  temperature=temperature,
@@ -126,10 +157,10 @@ def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool,
126
  )
127
  raw = resp.choices[0].message.get("content", "").strip()
128
  except Exception as e:
129
- raw = f"OPENAI_ERROR: {str(e)}"
130
  # fallback later
131
 
132
- # Extract first integer from model output
133
  m = re.search(r"(-?\d+)", raw or "")
134
  order = None
135
  if m:
@@ -140,37 +171,19 @@ def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool,
140
  except:
141
  order = None
142
 
143
- # fallback heuristic if parsing failed or error
144
  if order is None:
145
- # simple policy: target inventory = INITIAL_INVENTORY + incoming_order
146
- incoming = local_state['incoming_orders'].get(role, 0) or 0
147
  target = INITIAL_INVENTORY + incoming
148
- order = max(0, target - (local_state['inventory'].get(role, 0) or 0))
149
  raw = (raw + " | PARSE_FALLBACK").strip()
150
 
151
  return int(order), raw
152
 
153
  # ---------------------------
154
- # Game mechanics
155
  # ---------------------------
156
- def make_classic_demand(weeks: int):
157
- """
158
- Typical demand: first 4 weeks stable (4), then shock (8) for many weeks, then maybe fluctuations.
159
- We'll implement: weeks 0-3 => 4; weeks 4..(weeks-1) => 8
160
- You can adjust as needed.
161
- """
162
- demand = []
163
- for t in range(weeks):
164
- if t < 4:
165
- demand.append(4)
166
- else:
167
- demand.append(8)
168
- return demand
169
-
170
  def init_game(weeks=DEFAULT_WEEKS):
171
- """
172
- Return a dict representing full game state for a single participant/session.
173
- """
174
  roles = ["retailer", "wholesaler", "distributor", "factory"]
175
  state = {
176
  "participant_id": None,
@@ -179,10 +192,8 @@ def init_game(weeks=DEFAULT_WEEKS):
179
  "roles": roles,
180
  "inventory": {r: INITIAL_INVENTORY for r in roles},
181
  "backlog": {r: INITIAL_BACKLOG for r in roles},
182
- # pipeline: each role has a queue representing shipments that will arrive next weeks;
183
- # we keep length = TRANSPORT_DELAY, front is arriving next week.
184
  "pipeline": {r: [0] * TRANSPORT_DELAY for r in roles},
185
- "incoming_orders": {r: 0 for r in roles}, # orders received this week from downstream
186
  "orders_history": {r: [] for r in roles},
187
  "shipments_history": {r: [] for r in roles},
188
  "logs": [],
@@ -192,193 +203,171 @@ def init_game(weeks=DEFAULT_WEEKS):
192
  }
193
  return state
194
 
 
 
 
 
 
 
 
 
 
 
195
  def step_game(state: dict, distributor_order: int):
196
- """
197
- Apply one week's dynamics.
198
- Order of events (typical simplification):
199
- 1. Customer demand hits retailer this week.
200
- 2. Deliveries that are at pipeline[front] arrive to each role this week.
201
- 3. Roles fulfill incoming orders from downstream (if backlog arises).
202
- 4. Human (distributor) order is recorded; LLMs decide orders for their roles.
203
- 5. Place orders into upstream's pipeline so they will arrive after TRANSPORT_DELAY.
204
- 6. Log everything.
205
- """
206
  week = state["week"]
207
  roles = state["roles"]
208
 
209
- # 1) Customer demand for this week to retailer
210
- demand = state["customer_demand"][week - 1] # week is 1-indexed
 
 
 
 
211
  state["incoming_orders"]["retailer"] = demand
212
 
213
- # 2) Shipments arrive (front of pipeline)
214
  arriving = {}
215
  for r in roles:
216
- # Pop front arrival if exists
217
  arr = 0
218
- if len(state["pipeline"][r]) > 0:
219
- arr = state["pipeline"][r].pop(0)
220
- state["inventory"][r] += arr
 
 
 
 
221
  arriving[r] = arr
222
 
223
- # 3) Fulfill incoming orders from downstream (downstream -> this role)
224
- # For each role, the incoming_order is whatever downstream ordered last turn.
225
- # For first week, incoming_orders maybe zero for non-retailer; that's fine.
226
  shipments_out = {}
227
  for r in roles:
228
- incoming = state["incoming_orders"].get(r, 0) or 0
229
- inv = state["inventory"].get(r, 0) or 0
230
  shipped = min(inv, incoming)
231
- state["inventory"][r] -= shipped
232
- # any unfilled becomes backlog
233
  unfilled = incoming - shipped
234
  if unfilled > 0:
235
- state["backlog"][r] += unfilled
236
  shipments_out[r] = shipped
237
- state["shipments_history"][r].append(shipped)
238
 
239
- # 4) Record human distributor order (this week's order placed by distributor)
240
- # distributor_order is the order placed to wholesaler by the distributor this week
241
- # Save to orders_history for distributor
242
  state["orders_history"]["distributor"].append(int(distributor_order))
243
- # Also set downstream->upstream linking: the upstream (wholesaler) will see distributor_order as incoming next period
244
  state["incoming_orders"]["wholesaler"] = int(distributor_order)
245
 
246
- # 5) LLM decisions for AI roles (retailer, wholesaler, factory)
247
  demand_history_visible = []
248
- if state["info_sharing"] and state["info_history_weeks"] > 0:
249
  start_idx = max(0, (week - 1) - state["info_history_weeks"])
250
- demand_history_visible = state["customer_demand"][start_idx: (week - 1)]
251
 
252
  llm_outputs = {}
253
  for role in ["retailer", "wholesaler", "factory"]:
254
- order_val, raw = call_llm_for_order(role, state_snapshot_for_prompt(state), state["info_sharing"], demand_history_visible)
255
  order_val = max(0, int(order_val))
256
  state["orders_history"][role].append(order_val)
257
  llm_outputs[role] = {"order": order_val, "raw": raw}
258
- # set incoming_orders for upstream relation: upstream will see this order next period
259
- # e.g., if retailer orders X, upstream (distributor) incoming_orders will be X
260
  if role == "retailer":
261
  state["incoming_orders"]["distributor"] = order_val
262
  elif role == "wholesaler":
263
  state["incoming_orders"]["factory"] = order_val
264
- # factory's upstream is the supplier/external: we don't model beyond factory
265
 
266
- # 6) Place orders into pipelines: these are shipments that will be sent upstream now and arrive after TRANSPORT_DELAY
267
- # In the simple Beer Game, the shipped amounts are based on inventories; but orders placed lead to upstream shipments in future after they process.
268
- # We'll model that orders placed this week translate into future shipments arriving after TRANSPORT_DELAY at the ordering party.
269
  for role in roles:
270
- # Determine the order placed by this role this week:
271
  if role == "distributor":
272
  placed_order = int(distributor_order)
273
- else:
274
- # role in orders_history last appended
275
- placed_order = state["orders_history"][role][-1] if state["orders_history"][role] else 0
276
-
277
- # For the downstream partner (the entity that will receive the shipment), we append to that partner's pipeline tail
278
- # Example: distributor placed order to wholesaler -> wholesaler will receive shipment after TRANSPORT_DELAY
279
- # Map role -> downstream partner (who receives shipments from role)
280
- # shipments flow downstream: factory -> wholesaler -> distributor -> retailer
281
- downstream_map = {
282
- "factory": "wholesaler",
283
- "wholesaler": "distributor",
284
- "distributor": "retailer",
285
- "retailer": None
286
- }
287
  downstream = downstream_map.get(role)
288
  if downstream:
289
- # append zeros if pipeline too short to ensure correct index, then append placed_order at tail
290
- # We want the placed_order to be delivered to downstream after TRANSPORT_DELAY weeks (so push at tail)
291
  state["pipeline"][downstream].append(placed_order)
292
 
293
- # 7) Log the week's summary
294
  log_entry = {
295
  "timestamp": now_iso(),
296
  "week": week,
297
  "demand": demand,
298
  "arriving": arriving,
299
  "shipments_out": shipments_out,
300
- "orders_submitted": {
301
- "distributor": int(distributor_order),
302
- "retailer": state["orders_history"]["retailer"][-1] if state["orders_history"]["retailer"] else None,
303
- "wholesaler": state["orders_history"]["wholesaler"][-1] if state["orders_history"]["wholesaler"] else None,
304
- "factory": state["orders_history"]["factory"][-1] if state["orders_history"]["factory"] else None,
305
- },
306
- "inventory": dict(state["inventory"]),
307
- "backlog": dict(state["backlog"]),
308
- "info_sharing": state["info_sharing"],
309
- "info_history_weeks": state["info_history_weeks"],
310
  "llm_raw": {k: v["raw"] for k, v in llm_outputs.items()}
311
  }
312
  state["logs"].append(log_entry)
313
 
314
- # 8) Advance week
315
- state["week"] += 1
316
 
317
  return state
318
 
319
- def state_snapshot_for_prompt(state):
320
- """
321
- Prepare a compact snapshot of state for LLM prompt (avoid sending huge objects).
322
- We'll include week, inventory and backlog for each role and incoming_orders for this week.
323
- """
324
- snap = {
325
- "week": state["week"],
326
- "inventory": state["inventory"].copy(),
327
- "backlog": state["backlog"].copy(),
328
- "incoming_orders": state["incoming_orders"].copy(),
329
- # pipeline front (arriving next week)
330
- "incoming_shipments_next_week": {r: (state["pipeline"][r][0] if state["pipeline"][r] else 0) for r in state["roles"]}
331
- }
332
- return snap
333
-
334
  # ---------------------------
335
- # Persistence: local + HF upload
336
  # ---------------------------
337
- def save_logs_local(state, participant_id):
338
- df = pd.json_normalize(state["logs"])
339
  fname = LOCAL_LOG_DIR / f"logs_{participant_id}_{int(time.time())}.csv"
340
  df.to_csv(fname, index=False)
341
  return fname
342
 
343
- def save_and_upload(state, participant_id):
344
- local_path = save_logs_local(state, participant_id)
345
- url = upload_log_to_hf(local_path, participant_id)
346
- return local_path, url
 
 
 
 
 
 
 
 
 
347
 
348
  # ---------------------------
349
- # Streamlit UI & session management
350
  # ---------------------------
351
- st.set_page_config(page_title="Beer Game Distributor (Human) + LLM Agents", layout="wide")
352
  st.title("🍺 Beer Game — Human Distributor vs LLM agents")
353
 
354
- # Participant id: prefer query param or user input
355
  qp = st.query_params
356
  pid_from_q = qp.get("participant_id", [None])[0] if qp else None
357
-
358
- pid_input = st.text_input("Participant ID (leave blank to auto-generate or use ?participant_id=ID in URL)", value=pid_from_q or "")
359
- if pid_input:
360
- participant_id = pid_input.strip()
361
- else:
362
- if "auto_pid" not in st.session_state:
363
- st.session_state["auto_pid"] = str(uuid.uuid4())[:8]
364
- participant_id = st.session_state["auto_pid"]
365
-
366
  st.sidebar.markdown(f"**Participant ID:** `{participant_id}`")
367
 
368
- # Multi-session container in st.session_state
369
  if "sessions" not in st.session_state:
370
  st.session_state["sessions"] = {}
371
 
 
 
 
 
 
 
 
372
  if participant_id not in st.session_state["sessions"]:
373
  st.session_state["sessions"][participant_id] = init_game(DEFAULT_WEEKS)
374
  st.session_state["sessions"][participant_id]["participant_id"] = participant_id
375
 
 
376
  state = st.session_state["sessions"][participant_id]
 
 
377
 
378
- # Sidebar controls: info sharing, demand history slider, quick config
379
  st.sidebar.header("Experiment controls")
380
- state["info_sharing"] = st.sidebar.checkbox("Enable Information Sharing (show customer demand to all roles)", value=state.get("info_sharing", False))
381
- state["info_history_weeks"] = st.sidebar.slider("How many past weeks of demand to share (0 = none)", 0, 8, value=state.get("info_history_weeks", 0))
382
  st.sidebar.markdown("---")
383
  st.sidebar.write("Model for LLM agents:")
384
  st.sidebar.write(OPENAI_MODEL)
@@ -387,84 +376,73 @@ st.sidebar.write("HF upload settings:")
387
  st.sidebar.write(f"- HF_REPO_ID: {HF_REPO_ID or 'NOT SET'}")
388
  st.sidebar.write(f"- HF_TOKEN: {'SET' if HF_TOKEN else 'NOT SET'}")
389
 
390
- # Main UI: show week, metrics, panels
391
- col_main, col_sidebar = st.columns([3, 1])
392
-
393
  with col_main:
394
  st.header(f"Week {state['week']} / {state['weeks_total']}")
395
- # show demand for this week (if info sharing or for distributor only?)
396
- demand_display = state["customer_demand"][state["week"] - 1] if state["week"] - 1 < len(state["customer_demand"]) else None
397
  st.subheader(f"Customer demand (retailer receives this week): {demand_display}")
398
 
399
- # show role panels in a grid
400
  roles = state["roles"]
401
  panels = st.columns(len(roles))
402
  for i, role in enumerate(roles):
403
  with panels[i]:
404
  st.markdown(f"### {role.title()}")
405
- st.metric("Inventory", state["inventory"][role])
406
- st.metric("Backlog", state["backlog"][role])
407
  incoming = state["incoming_orders"].get(role, 0)
408
  st.write(f"Incoming order (this week): **{incoming}**")
409
- next_shipment = state["pipeline"][role][0] if state["pipeline"][role] else 0
410
- st.write(f"Incoming shipment next week: **{next_shipment}**")
411
 
412
  st.markdown("---")
413
- # Distributor input box + submit button
414
  with st.form(key=f"order_form_{participant_id}", clear_on_submit=False):
415
  st.write("### Your (Distributor) decision this week")
416
  default_val = state["incoming_orders"].get("distributor", 4) or 4
417
  distributor_order = st.number_input("Order to place to upstream (Wholesaler):", min_value=0, step=1, value=default_val)
418
  submitted = st.form_submit_button("Submit Order (locks your decision)")
419
-
420
  if submitted:
421
- # store pending order in session until Next Week pressed
422
  st.session_state.setdefault("pending_orders", {})
423
  st.session_state["pending_orders"][participant_id] = int(distributor_order)
424
  st.success(f"Order submitted: {distributor_order}. Now click 'Next Week' to process the week.")
425
 
426
  st.markdown("---")
427
- # Next Week button: only enabled if pending order exists
428
  pending = st.session_state.get("pending_orders", {}).get(participant_id, None)
429
  if pending is None:
430
  st.info("Please submit your order first to enable Next Week processing.")
431
  else:
432
  if st.button("Next Week — process week and invoke LLM agents"):
433
- # step game with pending order
434
- try:
435
- state = step_game(state, pending)
436
- # save state back
437
- st.session_state["sessions"][participant_id] = state
438
- # auto-save logs to HF after each week (can change to only end of game)
439
- local_path = save_logs_local_and_return(state, participant_id) if 'save_logs_local_and_return' in globals() else None
440
- # default: immediate upload
441
- local_file = save_logs_local(state, participant_id)
442
- uploaded_url = None
443
- if HF_TOKEN and HF_REPO_ID:
444
- uploaded_url = upload_log_to_hf(local_file, participant_id)
445
- # remove pending order
446
- del st.session_state["pending_orders"][participant_id]
447
- st.success(f"Week processed. Advanced to week {state['week']}.")
448
- if uploaded_url:
449
- st.info(f"Logs uploaded to HF: {uploaded_url}")
450
- except Exception as e:
451
- st.error(f"Error during Next Week processing: {e}")
452
 
453
  st.markdown("### Recent logs")
454
- if state["logs"]:
455
- # show last 6 logs in a readable table
456
  df = pd.json_normalize(state["logs"][-6:])
457
  st.dataframe(df, use_container_width=True)
458
  else:
459
  st.write("No logs yet. Submit your first order and press Next Week.")
460
 
461
- with col_sidebar:
462
  st.subheader("Information Sharing (preview)")
463
- st.write("Toggle on to share real customer demand (current + recent weeks) with all LLM agents.")
464
- st.write(f"Sharing {state['info_history_weeks']} weeks of history (0 = only current week).")
465
- if state["info_sharing"]:
466
- # display recent demand history according to slider
467
- h = state["info_history_weeks"]
468
  start = max(0, (state["week"] - 1) - h)
469
  hist = state["customer_demand"][start: state["week"]]
470
  st.write("Demand visible to agents:", hist)
@@ -473,61 +451,32 @@ with col_sidebar:
473
  st.subheader("Admin / Debug")
474
  if st.button("Test LLM connection"):
475
  if not openai.api_key:
476
- st.error("OpenAI API key is missing. Set OPENAI_API_KEY in Space Secrets.")
477
  else:
478
- # quick test prompt
479
  try:
480
- test_prompt = "You are a helpful agent. Reply with '42'."
481
- resp = openai.ChatCompletion.create(
482
- model=OPENAI_MODEL,
483
- messages=[{"role":"user","content":test_prompt}],
484
- max_tokens=10
485
- )
486
  st.write("LLM raw:", resp.choices[0].message.get("content"))
487
  except Exception as e:
488
  st.error(f"LLM test failed: {e}")
489
 
490
- st.markdown("---")
491
  if st.button("Save logs now (manual)"):
492
- if not state["logs"]:
493
- st.info("No logs to save yet.")
494
  else:
495
  local_file = save_logs_local(state, participant_id)
496
- if HF_TOKEN and HF_REPO_ID:
497
- url = upload_log_to_hf(local_file, participant_id)
498
- if url:
499
- st.success("Logs uploaded.")
500
- else:
501
- st.success(f"Saved local file: {local_file}")
502
 
503
  # ---------------------------
504
- # Utility save functions (placed after UI to avoid NameError in some deployments)
505
  # ---------------------------
506
- def save_logs_local(state: dict, participant_id: str):
507
- """
508
- Save logs to local logs directory and return Path.
509
- """
510
- df = pd.json_normalize(state["logs"])
511
- fname = LOCAL_LOG_DIR / f"logs_{participant_id}_{int(time.time())}.csv"
512
- df.to_csv(fname, index=False)
513
- return fname
514
-
515
- # alias used earlier if present
516
- def save_logs_local_and_return(state: dict, participant_id: str):
517
- return save_logs_local(state, participant_id)
518
-
519
- # ---------------------------
520
- # End-of-game auto actions
521
- # ---------------------------
522
- # If game has finished for this participant, offer final download / upload
523
- if state["week"] > state["weeks_total"]:
524
  st.success("Game completed for this participant.")
525
- # prepare final CSV
526
  final_csv = save_logs_local(state, participant_id)
527
  with open(final_csv, "rb") as f:
528
  st.download_button("Download final logs CSV", data=f, file_name=final_csv.name, mime="text/csv")
529
  if HF_TOKEN and HF_REPO_ID:
530
- url = upload_log_to_hf(final_csv, participant_id)
531
  if url:
532
  st.write(f"Final logs uploaded to HF Hub: {url}")
533
-
 
1
  # app.py
2
  """
3
+ Beer Game — Robust full Streamlit app (fixed pipeline/Retailer KeyError)
4
+ - Uses old openai SDK style (openai==0.28.0) to avoid proxies/new-client issues on Spaces
5
+ - Only uploads logs to HF at end of game
6
+ - Ensures missing keys are initialized for backward compatibility
7
+ - Unified lowercase role keys: 'retailer','wholesaler','distributor','factory'
 
 
 
8
  """
9
 
10
  import os
 
13
  import uuid
14
  import random
15
  import json
16
+ import traceback
17
  from datetime import datetime
18
  from pathlib import Path
19
 
20
  import streamlit as st
21
  import pandas as pd
22
+ import openai # expects openai==0.28.0 in requirements.txt
23
  from huggingface_hub import upload_file, HfApi
24
 
25
  # ---------------------------
26
+ # CONFIG
27
  # ---------------------------
28
+ DEFAULT_WEEKS = 36 # 24 36 可选,默认 36(你可以改回 24)
29
+ TRANSPORT_DELAY = 2
 
 
30
  INITIAL_INVENTORY = 12
31
  INITIAL_BACKLOG = 0
32
 
33
+ OPENAI_MODEL = "gpt-4o-mini" # or "gpt-3.5-turbo" for cheaper/testing
 
34
 
 
35
  LOCAL_LOG_DIR = Path("logs")
36
  LOCAL_LOG_DIR.mkdir(exist_ok=True)
37
 
38
+ # HF settings (via Secrets)
39
+ HF_TOKEN = os.getenv("HF_TOKEN")
40
+ HF_REPO_ID = os.getenv("HF_REPO_ID") # e.g. "XinyuLi/beer-game-logs"
41
+ hf_api = HfApi()
42
+
43
+ # OpenAI key (old SDK usage)
44
+ openai.api_key = os.getenv("OPENAI_API_KEY")
45
+
46
  # ---------------------------
47
+ # HELPERS
48
  # ---------------------------
49
  def now_iso():
50
  return datetime.utcnow().isoformat(timespec="milliseconds") + "Z"
51
 
52
+ def make_classic_demand(weeks: int):
53
+ # first 4 weeks: 4, from week 5 onwards: 8 (classic shock)
54
+ demand = []
55
+ for t in range(weeks):
56
+ if t < 4:
57
+ demand.append(4)
58
+ else:
59
+ demand.append(8)
60
+ return demand
61
+
62
  def fmt(o):
63
  try:
64
  return json.dumps(o, ensure_ascii=False)
65
+ except:
66
  return str(o)
67
 
68
  # ---------------------------
69
+ # STATE COMPATIBILITY (关键:保证 pipeline / orders 等键存在)
70
  # ---------------------------
71
+ def ensure_state_compat(state: dict):
 
 
 
 
72
  """
73
+ Ensure a state dict has all required keys and sensible defaults.
74
+ This protects against old/incomplete session_state entries.
75
  """
76
+ roles = state.get("roles", ["retailer", "wholesaler", "distributor", "factory"])
77
+ state.setdefault("roles", roles)
78
+ state.setdefault("weeks_total", state.get("weeks_total", DEFAULT_WEEKS))
79
+ state.setdefault("week", state.get("week", 1))
80
+
81
+ # inventories/backlogs
82
+ state.setdefault("inventory", {r: INITIAL_INVENTORY for r in roles})
83
+ state.setdefault("backlog", {r: INITIAL_BACKLOG for r in roles})
84
+
85
+ # pipeline: ensure lists and proper length >= TRANSPORT_DELAY
86
+ if "pipeline" not in state:
87
+ state["pipeline"] = {r: [0] * TRANSPORT_DELAY for r in roles}
88
+ else:
89
+ for r in roles:
90
+ state["pipeline"].setdefault(r, [0] * TRANSPORT_DELAY)
91
+ # pad if shorter than TRANSPORT_DELAY
92
+ if len(state["pipeline"][r]) < TRANSPORT_DELAY:
93
+ state["pipeline"][r] = state["pipeline"][r] + [0] * (TRANSPORT_DELAY - len(state["pipeline"][r]))
94
+
95
+ # incoming_orders, orders_history, shipments_history
96
+ state.setdefault("incoming_orders", {r: 0 for r in roles})
97
+ state.setdefault("orders_history", {r: [] for r in roles})
98
+ state.setdefault("shipments_history", {r: [] for r in roles})
99
+ state.setdefault("logs", [])
100
+ state.setdefault("info_sharing", False)
101
+ state.setdefault("info_history_weeks", 0)
102
+ # demand
103
+ if "customer_demand" not in state:
104
+ state["customer_demand"] = make_classic_demand(state["weeks_total"])
105
+ else:
106
+ # if demand exists but wrong length, regenerate
107
+ if len(state["customer_demand"]) < state["weeks_total"]:
108
+ state["customer_demand"] = make_classic_demand(state["weeks_total"])
109
 
110
+ # ensure week in bounds
111
+ if state["week"] < 1:
112
+ state["week"] = 1
113
+ if state["week"] > state["weeks_total"] + 1:
114
+ state["week"] = state["weeks_total"] + 1
115
+
116
+ return state
 
 
 
 
 
 
 
117
 
118
  # ---------------------------
119
+ # LLM call (old openai SDK)
120
  # ---------------------------
 
 
121
  def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool, demand_history: list, max_tokens=40, temperature=0.7):
122
  """
123
+ role must be lowercase key matching state dicts (e.g., 'retailer').
124
  Returns (order_int, raw_text)
125
  """
126
+ # safety: ensure pipeline/inventory keys exist
127
+ pipeline_next = local_state.get("pipeline", {}).get(role, [0])[0] if local_state.get("pipeline", {}).get(role) else 0
128
+ inventory = local_state.get("inventory", {}).get(role, 0)
129
+ backlog = local_state.get("backlog", {}).get(role, 0)
130
+ incoming_order = local_state.get("incoming_orders", {}).get(role, 0)
131
+
132
  visible_history = demand_history if info_sharing_visible else []
133
 
134
+ # build prompt (concise)
135
  prompt = (
136
+ f"You are the {role.title()} in a 4-player Beer Game (Retailer -> Wholesaler -> Distributor -> Factory).\n"
137
+ f"Week: {local_state.get('week')}\n"
138
+ f"- Inventory: {inventory}\n"
139
+ f"- Backlog: {backlog}\n"
140
+ f"- Incoming shipment next week: {pipeline_next}\n"
141
+ f"- Incoming order this week: {incoming_order}\n"
 
142
  )
143
  if visible_history:
144
+ prompt += f"- Customer demand history (visible): {visible_history}\n"
145
+ prompt += "\nDecide a **non-negative integer** order quantity to place to your upstream supplier this week. Reply with an integer only."
 
 
 
146
 
147
  try:
148
  resp = openai.ChatCompletion.create(
149
  model=OPENAI_MODEL,
150
  messages=[
151
+ {"role": "system", "content": "You are an automated Beer Game agent."},
152
+ {"role": "user", "content": prompt},
153
  ],
154
  max_tokens=max_tokens,
155
  temperature=temperature,
 
157
  )
158
  raw = resp.choices[0].message.get("content", "").strip()
159
  except Exception as e:
160
+ raw = f"OPENAI_ERROR: {e}"
161
  # fallback later
162
 
163
+ # parse first integer
164
  m = re.search(r"(-?\d+)", raw or "")
165
  order = None
166
  if m:
 
171
  except:
172
  order = None
173
 
 
174
  if order is None:
175
+ # fallback heuristic
176
+ incoming = incoming_order or 0
177
  target = INITIAL_INVENTORY + incoming
178
+ order = max(0, target - inventory)
179
  raw = (raw + " | PARSE_FALLBACK").strip()
180
 
181
  return int(order), raw
182
 
183
  # ---------------------------
184
+ # GAME LOGIC (uses lowercase role keys)
185
  # ---------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  def init_game(weeks=DEFAULT_WEEKS):
 
 
 
187
  roles = ["retailer", "wholesaler", "distributor", "factory"]
188
  state = {
189
  "participant_id": None,
 
192
  "roles": roles,
193
  "inventory": {r: INITIAL_INVENTORY for r in roles},
194
  "backlog": {r: INITIAL_BACKLOG for r in roles},
 
 
195
  "pipeline": {r: [0] * TRANSPORT_DELAY for r in roles},
196
+ "incoming_orders": {r: 0 for r in roles},
197
  "orders_history": {r: [] for r in roles},
198
  "shipments_history": {r: [] for r in roles},
199
  "logs": [],
 
203
  }
204
  return state
205
 
206
+ def state_snapshot_for_prompt(state: dict):
207
+ # safe snapshot (keys lowercase)
208
+ return {
209
+ "week": state.get("week"),
210
+ "inventory": state.get("inventory", {}).copy(),
211
+ "backlog": state.get("backlog", {}).copy(),
212
+ "incoming_orders": state.get("incoming_orders", {}).copy(),
213
+ "incoming_shipments_next_week": {r: (state.get("pipeline", {}).get(r, [0])[0] if state.get("pipeline", {}).get(r) else 0) for r in state.get("roles", [])}
214
+ }
215
+
216
  def step_game(state: dict, distributor_order: int):
217
+ # defensive: ensure compatible keys
218
+ ensure_state_compat(state)
219
+
 
 
 
 
 
 
 
220
  week = state["week"]
221
  roles = state["roles"]
222
 
223
+ if week > state["weeks_total"]:
224
+ # already finished; do not advance further
225
+ return state
226
+
227
+ # 1) customer demand hits retailer
228
+ demand = state["customer_demand"][week - 1]
229
  state["incoming_orders"]["retailer"] = demand
230
 
231
+ # 2) shipments arrive (front of each pipeline)
232
  arriving = {}
233
  for r in roles:
 
234
  arr = 0
235
+ if state.get("pipeline", {}).get(r):
236
+ # pop front safely
237
+ try:
238
+ arr = state["pipeline"][r].pop(0)
239
+ except Exception:
240
+ arr = 0
241
+ state["inventory"][r] = state["inventory"].get(r, 0) + (arr or 0)
242
  arriving[r] = arr
243
 
244
+ # 3) fulfill incoming orders (downstream -> this role)
 
 
245
  shipments_out = {}
246
  for r in roles:
247
+ incoming = state.get("incoming_orders", {}).get(r, 0) or 0
248
+ inv = state.get("inventory", {}).get(r, 0) or 0
249
  shipped = min(inv, incoming)
250
+ state["inventory"][r] = inv - shipped
 
251
  unfilled = incoming - shipped
252
  if unfilled > 0:
253
+ state["backlog"][r] = state.get("backlog", {}).get(r, 0) + unfilled
254
  shipments_out[r] = shipped
255
+ state["shipments_history"].setdefault(r, []).append(shipped)
256
 
257
+ # 4) record human distributor order
 
 
258
  state["orders_history"]["distributor"].append(int(distributor_order))
 
259
  state["incoming_orders"]["wholesaler"] = int(distributor_order)
260
 
261
+ # 5) LLM decisions
262
  demand_history_visible = []
263
+ if state.get("info_sharing") and state.get("info_history_weeks", 0) > 0:
264
  start_idx = max(0, (week - 1) - state["info_history_weeks"])
265
+ demand_history_visible = state["customer_demand"][start_idx:(week - 1)]
266
 
267
  llm_outputs = {}
268
  for role in ["retailer", "wholesaler", "factory"]:
269
+ order_val, raw = call_llm_for_order(role, state_snapshot_for_prompt(state), state.get("info_sharing", False), demand_history_visible)
270
  order_val = max(0, int(order_val))
271
  state["orders_history"][role].append(order_val)
272
  llm_outputs[role] = {"order": order_val, "raw": raw}
273
+ # set incoming_orders for upstream parties (visible next week)
 
274
  if role == "retailer":
275
  state["incoming_orders"]["distributor"] = order_val
276
  elif role == "wholesaler":
277
  state["incoming_orders"]["factory"] = order_val
 
278
 
279
+ # 6) place orders into pipelines (will arrive after TRANSPORT_DELAY)
280
+ downstream_map = {"factory": "wholesaler", "wholesaler": "distributor", "distributor": "retailer", "retailer": None}
 
281
  for role in roles:
282
+ placed_order = state["orders_history"][role][-1] if state["orders_history"].get(role) else 0
283
  if role == "distributor":
284
  placed_order = int(distributor_order)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  downstream = downstream_map.get(role)
286
  if downstream:
287
+ state["pipeline"].setdefault(downstream, [])
 
288
  state["pipeline"][downstream].append(placed_order)
289
 
290
+ # 7) logging
291
  log_entry = {
292
  "timestamp": now_iso(),
293
  "week": week,
294
  "demand": demand,
295
  "arriving": arriving,
296
  "shipments_out": shipments_out,
297
+ "orders_submitted": {r: (state["orders_history"].get(r, [None])[-1] if state["orders_history"].get(r) else None) for r in roles},
298
+ "inventory": {r: state["inventory"].get(r, 0) for r in roles},
299
+ "backlog": {r: state["backlog"].get(r, 0) for r in roles},
300
+ "info_sharing": state.get("info_sharing", False),
301
+ "info_history_weeks": state.get("info_history_weeks", 0),
 
 
 
 
 
302
  "llm_raw": {k: v["raw"] for k, v in llm_outputs.items()}
303
  }
304
  state["logs"].append(log_entry)
305
 
306
+ # 8) advance week
307
+ state["week"] = state.get("week", 1) + 1
308
 
309
  return state
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  # ---------------------------
312
+ # Persistence helpers
313
  # ---------------------------
314
+ def save_logs_local(state: dict, participant_id: str):
315
+ df = pd.json_normalize(state.get("logs", []))
316
  fname = LOCAL_LOG_DIR / f"logs_{participant_id}_{int(time.time())}.csv"
317
  df.to_csv(fname, index=False)
318
  return fname
319
 
320
+ def upload_log_to_hf_at_end(local_file: Path, participant_id: str):
321
+ """
322
+ Only call this at the end of the game to upload final CSV to HF dataset.
323
+ """
324
+ if not HF_TOKEN or not HF_REPO_ID:
325
+ return None
326
+ dest = f"logs/{participant_id}/{local_file.name}"
327
+ try:
328
+ upload_file(path_or_fileobj=str(local_file), path_in_repo=dest, repo_id=HF_REPO_ID, repo_type="dataset", token=HF_TOKEN)
329
+ return f"https://huggingface.co/datasets/{HF_REPO_ID}/resolve/main/{dest}"
330
+ except Exception as e:
331
+ st.error(f"HF upload failed: {e}")
332
+ return None
333
 
334
  # ---------------------------
335
+ # STREAMLIT UI & session mgmt
336
  # ---------------------------
337
+ st.set_page_config(page_title="Beer Game (Distributor + LLMs)", layout="wide")
338
  st.title("🍺 Beer Game — Human Distributor vs LLM agents")
339
 
340
+ # participant id via query param or input
341
  qp = st.query_params
342
  pid_from_q = qp.get("participant_id", [None])[0] if qp else None
343
+ pid_input = st.text_input("Participant ID (leave blank to auto-generate or use ?participant_id=ID)", value=pid_from_q or "")
344
+ participant_id = pid_input.strip() if pid_input else st.session_state.setdefault("auto_pid", str(uuid.uuid4())[:8])
 
 
 
 
 
 
 
345
  st.sidebar.markdown(f"**Participant ID:** `{participant_id}`")
346
 
347
+ # sessions container
348
  if "sessions" not in st.session_state:
349
  st.session_state["sessions"] = {}
350
 
351
+ # reset button for debugging / clearing old sessions
352
+ if st.sidebar.button("Reset session (clear saved state)"):
353
+ if participant_id in st.session_state["sessions"]:
354
+ del st.session_state["sessions"][participant_id]
355
+ st.experimental_rerun()
356
+
357
+ # create or ensure session state
358
  if participant_id not in st.session_state["sessions"]:
359
  st.session_state["sessions"][participant_id] = init_game(DEFAULT_WEEKS)
360
  st.session_state["sessions"][participant_id]["participant_id"] = participant_id
361
 
362
+ # retrieve and ensure compatibility immediately
363
  state = st.session_state["sessions"][participant_id]
364
+ state = ensure_state_compat(state)
365
+ st.session_state["sessions"][participant_id] = state # write back
366
 
367
+ # sidebar controls
368
  st.sidebar.header("Experiment controls")
369
+ state["info_sharing"] = st.sidebar.checkbox("Enable Information Sharing (share demand)", value=state.get("info_sharing", False))
370
+ state["info_history_weeks"] = st.sidebar.slider("Weeks of demand history to share (0 = none)", 0, 8, value=state.get("info_history_weeks", 0))
371
  st.sidebar.markdown("---")
372
  st.sidebar.write("Model for LLM agents:")
373
  st.sidebar.write(OPENAI_MODEL)
 
376
  st.sidebar.write(f"- HF_REPO_ID: {HF_REPO_ID or 'NOT SET'}")
377
  st.sidebar.write(f"- HF_TOKEN: {'SET' if HF_TOKEN else 'NOT SET'}")
378
 
379
+ # main UI
380
+ col_main, col_side = st.columns([3,1])
 
381
  with col_main:
382
  st.header(f"Week {state['week']} / {state['weeks_total']}")
383
+ demand_display = state["customer_demand"][state["week"] - 1] if 0 <= (state["week"] - 1) < len(state["customer_demand"]) else None
 
384
  st.subheader(f"Customer demand (retailer receives this week): {demand_display}")
385
 
386
+ # role panels
387
  roles = state["roles"]
388
  panels = st.columns(len(roles))
389
  for i, role in enumerate(roles):
390
  with panels[i]:
391
  st.markdown(f"### {role.title()}")
392
+ st.metric("Inventory", state["inventory"].get(role, 0))
393
+ st.metric("Backlog", state["backlog"].get(role, 0))
394
  incoming = state["incoming_orders"].get(role, 0)
395
  st.write(f"Incoming order (this week): **{incoming}**")
396
+ next_ship = state["pipeline"].get(role, [0])[0] if state["pipeline"].get(role) else 0
397
+ st.write(f"Incoming shipment next week: **{next_ship}**")
398
 
399
  st.markdown("---")
400
+ # Distributor form
401
  with st.form(key=f"order_form_{participant_id}", clear_on_submit=False):
402
  st.write("### Your (Distributor) decision this week")
403
  default_val = state["incoming_orders"].get("distributor", 4) or 4
404
  distributor_order = st.number_input("Order to place to upstream (Wholesaler):", min_value=0, step=1, value=default_val)
405
  submitted = st.form_submit_button("Submit Order (locks your decision)")
 
406
  if submitted:
 
407
  st.session_state.setdefault("pending_orders", {})
408
  st.session_state["pending_orders"][participant_id] = int(distributor_order)
409
  st.success(f"Order submitted: {distributor_order}. Now click 'Next Week' to process the week.")
410
 
411
  st.markdown("---")
 
412
  pending = st.session_state.get("pending_orders", {}).get(participant_id, None)
413
  if pending is None:
414
  st.info("Please submit your order first to enable Next Week processing.")
415
  else:
416
  if st.button("Next Week — process week and invoke LLM agents"):
417
+ # Guard: don't step if game finished
418
+ if state["week"] > state["weeks_total"]:
419
+ st.info("Game already finished for this participant.")
420
+ else:
421
+ try:
422
+ state = step_game(state, pending)
423
+ # write back
424
+ st.session_state["sessions"][participant_id] = state
425
+ # remove pending
426
+ del st.session_state["pending_orders"][participant_id]
427
+ st.success(f"Week processed. Advanced to week {state['week']}.")
428
+ except Exception as e:
429
+ # show traceback for debugging
430
+ tb = traceback.format_exc()
431
+ st.error(f"Error during Next Week processing: {e}")
432
+ st.text_area("Traceback", tb, height=300)
 
 
 
433
 
434
  st.markdown("### Recent logs")
435
+ if state.get("logs"):
 
436
  df = pd.json_normalize(state["logs"][-6:])
437
  st.dataframe(df, use_container_width=True)
438
  else:
439
  st.write("No logs yet. Submit your first order and press Next Week.")
440
 
441
+ with col_side:
442
  st.subheader("Information Sharing (preview)")
443
+ st.write(f"Sharing {state.get('info_history_weeks', 0)} weeks of history (0 = only current).")
444
+ if state.get("info_sharing"):
445
+ h = state.get("info_history_weeks", 0)
 
 
446
  start = max(0, (state["week"] - 1) - h)
447
  hist = state["customer_demand"][start: state["week"]]
448
  st.write("Demand visible to agents:", hist)
 
451
  st.subheader("Admin / Debug")
452
  if st.button("Test LLM connection"):
453
  if not openai.api_key:
454
+ st.error("OpenAI API key missing (set OPENAI_API_KEY in secrets).")
455
  else:
 
456
  try:
457
+ test_prompt = "Reply with 42."
458
+ resp = openai.ChatCompletion.create(model=OPENAI_MODEL, messages=[{"role":"user","content":test_prompt}], max_tokens=10)
 
 
 
 
459
  st.write("LLM raw:", resp.choices[0].message.get("content"))
460
  except Exception as e:
461
  st.error(f"LLM test failed: {e}")
462
 
 
463
  if st.button("Save logs now (manual)"):
464
+ if not state.get("logs"):
465
+ st.info("No logs to save.")
466
  else:
467
  local_file = save_logs_local(state, participant_id)
468
+ st.success(f"Saved local file: {local_file}")
 
 
 
 
 
469
 
470
  # ---------------------------
471
+ # End-of-game upload (only when finished)
472
  # ---------------------------
473
+ # Note: check strictly greater than weeks_total (we advanced after final week)
474
+ if state.get("week", 1) > state.get("weeks_total", DEFAULT_WEEKS):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475
  st.success("Game completed for this participant.")
 
476
  final_csv = save_logs_local(state, participant_id)
477
  with open(final_csv, "rb") as f:
478
  st.download_button("Download final logs CSV", data=f, file_name=final_csv.name, mime="text/csv")
479
  if HF_TOKEN and HF_REPO_ID:
480
+ url = upload_log_to_hf_at_end(final_csv, participant_id)
481
  if url:
482
  st.write(f"Final logs uploaded to HF Hub: {url}")