Lilli98 commited on
Commit
e3cdb59
·
verified ·
1 Parent(s): 69b4277

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +352 -515
app.py CHANGED
@@ -1,534 +1,371 @@
1
  # app.py
2
- """
3
- Beer Game — Robust full Streamlit app (fixed pipeline/Retailer KeyError)
4
- - Uses old openai SDK style (openai==0.28.0) to avoid proxies/new-client issues on Spaces
5
- - Only uploads logs to HF at end of game
6
- - Ensures missing keys are initialized for backward compatibility
7
- - Unified lowercase role keys: 'retailer','wholesaler','distributor','factory'
8
- """
9
 
10
- import os
11
- import re
 
 
 
 
 
 
12
  import time
13
- import uuid
 
14
  import random
15
- import json
16
- import traceback
17
- from datetime import datetime
18
  from pathlib import Path
 
 
19
 
20
- import streamlit as st
21
- import pandas as pd
22
- import openai # expects openai==0.28.0 in requirements.txt
23
- from huggingface_hub import upload_file, HfApi
24
-
25
- # ---------------------------
26
- # CONFIG
27
- # ---------------------------
28
- DEFAULT_WEEKS = 36 # 24 或 36 可选,默认 36
29
-
30
- # Lead times
31
- ORDER_LEAD_TIME = 1 # Time for orders to reach supplier
32
- SHIPPING_LEAD_TIME = 2 # Time for shipments to arrive
33
- PRODUCTION_LEAD_TIME = 2 # Time for factory to produce goods
34
-
35
  INITIAL_INVENTORY = 12
36
  INITIAL_BACKLOG = 0
37
-
38
-
39
- OPENAI_MODEL = "gpt-4o-mini"
40
-
 
 
 
 
 
41
  LOCAL_LOG_DIR = Path("logs")
42
- LOCAL_LOG_DIR.mkdir(exist_ok=True)
43
-
44
- # HF settings (via Secrets)
45
- HF_TOKEN = os.getenv("HF_TOKEN")
46
- HF_REPO_ID = os.getenv("HF_REPO_ID") # e.g. "Lilli98/beer-game-logs"
47
- hf_api = HfApi()
48
-
49
- # OpenAI key (old SDK usage)
50
- openai.api_key = os.getenv("OPENAI_API_KEY")
51
-
52
- # ---------------------------
53
- # HELPERS
54
- # ---------------------------
55
- def now_iso():
56
- return datetime.utcnow().isoformat(timespec="milliseconds") + "Z"
57
-
58
- def make_classic_demand(weeks: int):
59
- # first 4 weeks: 4, from week 5 onwards: 8 (classic shock)
60
- demand = []
61
- for t in range(weeks):
62
- if t < 4:
63
- demand.append(4)
64
- else:
65
- demand.append(8)
66
- return demand
67
-
68
- def fmt(o):
69
- try:
70
- return json.dumps(o, ensure_ascii=False)
71
- except:
72
- return str(o)
73
-
74
- # ---------------------------
75
- # STATE COMPATIBILITY (关键:保证 pipeline / orders 等键存在)
76
- # ---------------------------
77
- def ensure_state_compat(state: dict):
78
- """
79
- Ensure a state dict has all required keys and sensible defaults.
80
- This protects against old/incomplete session_state entries.
81
- """
82
- roles = state.get("roles", ["retailer", "wholesaler", "distributor", "factory"])
83
- state.setdefault("roles", roles)
84
- state.setdefault("weeks_total", state.get("weeks_total", DEFAULT_WEEKS))
85
- state.setdefault("week", state.get("week", 1))
86
-
87
- # inventories/backlogs
88
- state.setdefault("inventory", {r: INITIAL_INVENTORY for r in roles})
89
- state.setdefault("backlog", {r: INITIAL_BACKLOG for r in roles})
90
-
91
- # pipeline: ensure lists and proper length >= SHIPPING_LEAD_TIME
92
- if "pipeline" not in state:
93
- state["pipeline"] = {r: [4] * SHIPPING_LEAD_TIME for r in roles}
94
- else:
95
- for r in roles:
96
- state["pipeline"].setdefault(r, [4] * SHIPPING_LEAD_TIME)
97
- # pad if shorter than SHIPPING_LEAD_TIME
98
- if len(state["pipeline"][r]) < SHIPPING_LEAD_TIME:
99
- state["pipeline"][r] = state["pipeline"][r] + [4] * (SHIPPING_LEAD_TIME - len(state["pipeline"][r]))
100
-
101
- # incoming_orders, orders_history, shipments_history
102
- state.setdefault("incoming_orders", {r: 0 for r in roles})
103
- state.setdefault("orders_history", {r: [] for r in roles})
104
- state.setdefault("shipments_history", {r: [] for r in roles})
105
- state.setdefault("logs", [])
106
- state.setdefault("info_sharing", False)
107
- state.setdefault("info_history_weeks", 0)
108
- # demand
109
- if "customer_demand" not in state:
110
- state["customer_demand"] = make_classic_demand(state["weeks_total"])
111
  else:
112
- # if demand exists but wrong length, regenerate
113
- if len(state["customer_demand"]) < state["weeks_total"]:
114
- state["customer_demand"] = make_classic_demand(state["weeks_total"])
115
-
116
- # ensure week in bounds
117
- if state["week"] < 1:
118
- state["week"] = 1
119
- if state["week"] > state["weeks_total"] + 1:
120
- state["week"] = state["weeks_total"] + 1
121
-
122
- return state
123
-
124
- # ---------------------------
125
- # LLM call (old openai SDK)
126
- # ---------------------------
127
- def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool, demand_history: list, max_tokens=40, temperature=0.7):
128
- """
129
- role must be lowercase key matching state dicts (e.g., 'retailer').
130
- Returns (order_int, raw_text)
131
- """
132
- # safety: ensure pipeline/inventory keys exist
133
- pipeline_next = local_state.get("pipeline", {}).get(role, [0])[0] if local_state.get("pipeline", {}).get(role) else 0
134
- inventory = local_state.get("inventory", {}).get(role, 0)
135
- backlog = local_state.get("backlog", {}).get(role, 0)
136
- incoming_order = local_state.get("incoming_orders", {}).get(role, 0)
137
-
138
- visible_history = demand_history if info_sharing_visible else []
139
-
140
- # build prompt (concise)
141
- prompt = (
142
- f"You are the {role.title()} in a 4-player Beer Game (Retailer -> Wholesaler -> Distributor -> Factory).\n"
143
- f"Your objective is to minimize the **total weekly cost**, defined as:\n"
144
- f"- Holding cost: $0.50 per unit of positive inventory per week.\n"
145
- f"- Shortage cost: $1.00 per unit of backlog (unfilled orders) per week.\n\n"
146
- f"Current state:\n"
147
- f"- Week: {local_state.get('week')} / {local_state.get('weeks_total')}\n"
148
- f"- Inventory on hand: {inventory}\n"
149
- f"- Backlog (unfilled demand): {backlog}\n"
150
- f"- Incoming shipment next week: {pipeline_next}\n"
151
- f"- Incoming order this week: {incoming_order}\n"
152
- )
153
- if visible_history:
154
- prompt += (
155
- f"- Recent customer demand history (visible): {visible_history}\n"
156
- )
157
-
158
- prompt += (
159
- "\nWhen deciding your order, consider that orders take time to arrive "
160
- "(1 week to reach upstream supplier + 2 weeks shipping + 2 weeks production for factory). "
161
- "Avoid blindly following last week's demand. Try to balance the trade-off between "
162
- "avoiding stockouts (backlog cost) and avoiding overstock (inventory cost).\n\n"
163
- "Decide one **non-negative integer** order quantity to place to your upstream supplier "
164
- "this week to minimize expected total cost. Reply with an integer only."
165
- )
166
-
167
- try:
168
- resp = openai.ChatCompletion.create(
169
- model=OPENAI_MODEL,
170
- messages=[
171
- {"role": "system", "content": "You are an automated Beer Game agent."},
172
- {"role": "user", "content": prompt},
173
- ],
174
- max_tokens=max_tokens,
175
- temperature=temperature,
176
- n=1
177
- )
178
- raw = resp.choices[0].message.get("content", "").strip()
179
- except Exception as e:
180
- raw = f"OPENAI_ERROR: {e}"
181
- # fallback later
182
-
183
- # parse first integer
184
- m = re.search(r"(-?\d+)", raw or "")
185
- order = None
186
- if m:
187
- try:
188
- order = int(m.group(1))
189
- if order < 0:
190
- order = 0
191
- except:
192
- order = None
193
-
194
- if order is None:
195
- # fallback heuristic
196
- incoming = incoming_order or 0
197
- target = INITIAL_INVENTORY + incoming
198
- order = max(0, target - inventory)
199
- raw = (raw + " | PARSE_FALLBACK").strip()
200
-
201
- return int(order), raw
202
-
203
- # ---------------------------
204
- # GAME LOGIC (uses lowercase role keys)
205
- # ---------------------------
206
- def init_game(weeks=DEFAULT_WEEKS):
207
- roles = ["retailer", "wholesaler", "distributor", "factory"]
208
- state = {
209
- "participant_id": None,
210
- "week": 1,
211
- "weeks_total": weeks,
212
- "roles": roles,
213
- "inventory": {r: INITIAL_INVENTORY for r in roles},
214
- "backlog": {r: INITIAL_BACKLOG for r in roles},
215
- "pipeline": {r: [4] * SHIPPING_LEAD_TIME for r in roles},
216
- "incoming_orders": {r: 0 for r in roles},
217
- "orders_history": {r: [] for r in roles},
218
- "shipments_history": {r: [] for r in roles},
219
- "logs": [],
220
- "info_sharing": False,
221
- "info_history_weeks": 0,
222
- "customer_demand": make_classic_demand(weeks),
223
- }
224
- return state
225
-
226
- def state_snapshot_for_prompt(state: dict):
227
- # safe snapshot (keys lowercase)
228
- return {
229
- "week": state.get("week"),
230
- "weeks_total": state.get("weeks_total"),
231
- "inventory": state.get("inventory", {}).copy(),
232
- "backlog": state.get("backlog", {}).copy(),
233
- "incoming_orders": state.get("incoming_orders", {}).copy(),
234
- "incoming_shipments_next_week": {r: (state.get("pipeline", {}).get(r, [0])[0] if state.get("pipeline", {}).get(r) else 0) for r in state.get("roles", [])}
235
  }
236
 
237
- def step_game(state: dict, distributor_order: int):
238
- # defensive: ensure compatible keys
239
- ensure_state_compat(state)
240
-
241
- week = state["week"]
242
- roles = state["roles"]
243
-
244
- if week > state["weeks_total"]:
245
- # already finished; do not advance further
246
- return state
247
-
248
- # 1) customer demand hits retailer
249
- demand = state["customer_demand"][week - 1]
250
- state["incoming_orders"]["retailer"] = demand
251
-
252
- # 2) shipments arrive (front of each pipeline)
253
- arriving = {}
254
- for r in roles:
255
- arr = 0
256
- if state.get("pipeline", {}).get(r):
257
- # pop front safely
258
- try:
259
- arr = state["pipeline"][r].pop(0)
260
- except Exception:
261
- arr = 0
262
- state["inventory"][r] = state["inventory"].get(r, 0) + (arr or 0)
263
- arriving[r] = arr
264
-
265
- # 3) fulfill incoming orders (downstream -> this role)
266
- shipments_out = {}
267
- for r in roles:
268
- incoming = state.get("incoming_orders", {}).get(r, 0) or 0
269
- inv = state.get("inventory", {}).get(r, 0) or 0
270
- shipped = min(inv, incoming)
271
- state["inventory"][r] = inv - shipped
272
- unfilled = incoming - shipped
273
- if unfilled > 0:
274
- state["backlog"][r] = state.get("backlog", {}).get(r, 0) + unfilled
275
- shipments_out[r] = shipped
276
- state["shipments_history"].setdefault(r, []).append(shipped)
277
-
278
- # 4) record human distributor order
279
- state["orders_history"]["distributor"].append(int(distributor_order))
280
- state["incoming_orders"]["wholesaler"] = int(distributor_order)
281
-
282
- # 5) LLM decisions
283
- demand_history_visible = []
284
- if state.get("info_sharing") and state.get("info_history_weeks", 0) > 0:
285
- start_idx = max(0, (week - 1) - state["info_history_weeks"])
286
- demand_history_visible = state["customer_demand"][start_idx:(week - 1)]
287
-
288
- llm_outputs = {}
289
- for role in ["retailer", "wholesaler", "factory", "distributor"]:
290
- order_val, raw = call_llm_for_order(
291
- role,
292
- state_snapshot_for_prompt(state),
293
- state.get("info_sharing", False),
294
- demand_history_visible
295
- )
296
- order_val = max(0, int(order_val))
297
- llm_outputs[role] = {"order": order_val, "raw": raw}
298
-
299
- if role != "distributor": # AI 决策直接生效
300
- state["orders_history"][role].append(order_val)
301
- if role == "retailer":
302
- state["incoming_orders"]["distributor"] = order_val
303
- elif role == "wholesaler":
304
- state["incoming_orders"]["factory"] = order_val
305
-
306
- # 人类 distributor 的真实 order 后面会覆盖
307
- state["orders_history"]["distributor"].append(int(distributor_order))
308
- state["incoming_orders"]["wholesaler"] = int(distributor_order)
309
-
310
- # 6) place orders into pipelines (will arrive after SHIPPING_LEAD_TIME)
311
- downstream_map = {"factory": "wholesaler", "wholesaler": "distributor", "distributor": "retailer", "retailer": None}
312
- for role in roles:
313
- placed_order = state["orders_history"][role][-1] if state["orders_history"].get(role) else 0
314
- if role == "distributor":
315
- placed_order = int(distributor_order)
316
- downstream = downstream_map.get(role)
317
- if downstream:
318
- state["pipeline"].setdefault(downstream, [0]*SHIPPING_LEAD_TIME)
319
- state["pipeline"][downstream].append(placed_order)
320
-
321
- # 6.5) cost calculation
322
- if "cost" not in state:
323
- state["cost"] = {r: 0.0 for r in roles}
324
-
325
- for r in roles:
326
- inv = state["inventory"].get(r, 0)
327
- backlog = state["backlog"].get(r, 0)
328
- inv_cost = inv * 0.5 # 每单位库存成本
329
- back_cost = backlog * 1.0 # 每单位缺货成本
330
- state["cost"][r] = state["cost"].get(r, 0) + inv_cost + back_cost
331
-
332
- # 7) logging
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  log_entry = {
334
- "timestamp": now_iso(),
335
- "week": week,
336
- "demand": demand,
337
- "arriving": arriving,
338
- "shipments_out": shipments_out,
339
- "orders_submitted": {r: (state["orders_history"].get(r, [None])[-1] if state["orders_history"].get(r) else None) for r in roles},
340
- "inventory": {r: state["inventory"].get(r, 0) for r in roles},
341
- "backlog": {r: state["backlog"].get(r, 0) for r in roles},
342
- "cost": {r: state["cost"].get(r, 0) for r in roles},
343
- "info_sharing": state.get("info_sharing", False),
344
- "info_history_weeks": state.get("info_history_weeks", 0),
345
- "llm_raw": {k: v["raw"] for k, v in llm_outputs.items()}
346
  }
347
- state["logs"].append(log_entry)
348
-
349
- # 8) advance week
350
- state["week"] = state.get("week", 1) + 1
351
-
352
- return state
353
-
354
- # ---------------------------
355
- # Persistence helpers
356
- # ---------------------------
357
- def save_logs_local(state: dict, participant_id: str):
358
- df = pd.json_normalize(state.get("logs", []))
359
- fname = LOCAL_LOG_DIR / f"logs_{participant_id}_{int(time.time())}.csv"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  df.to_csv(fname, index=False)
361
- return fname
362
-
363
- def upload_log_to_hf_at_end(local_file: Path, participant_id: str):
364
- """
365
- Only call this at the end of the game to upload final CSV to HF dataset.
366
- """
367
- if not HF_TOKEN or not HF_REPO_ID:
368
- return None
369
- dest = f"logs/{participant_id}/{local_file.name}"
370
- try:
371
- upload_file(path_or_fileobj=str(local_file), path_in_repo=dest, repo_id=HF_REPO_ID, repo_type="dataset", token=HF_TOKEN)
372
- return f"https://huggingface.co/datasets/{HF_REPO_ID}/resolve/main/{dest}"
373
- except Exception as e:
374
- st.error(f"HF upload failed: {e}")
375
- return None
376
-
377
- # ---------------------------
378
- # STREAMLIT UI & session mgmt
379
- # ---------------------------
380
- st.set_page_config(page_title="Beer Game (Distributor + LLMs)", layout="wide")
381
- st.title("🍺 Beer Game — Human Distributor vs LLM agents")
382
-
383
- # participant id via query param or input
384
- qp = st.query_params
385
- pid_from_q = qp.get("participant_id", [None])[0] if qp else None
386
- pid_input = st.text_input("Participant ID (leave blank to auto-generate or use ?participant_id=ID)", value=pid_from_q or "")
387
- participant_id = pid_input.strip() if pid_input else st.session_state.setdefault("auto_pid", str(uuid.uuid4())[:8])
388
- st.sidebar.markdown(f"**Participant ID:** `{participant_id}`")
389
-
390
- # sessions container
391
- if "sessions" not in st.session_state:
392
- st.session_state["sessions"] = {}
393
-
394
- # reset button for debugging / clearing old sessions
395
- if st.sidebar.button("Reset session (clear saved state)"):
396
- if participant_id in st.session_state["sessions"]:
397
- del st.session_state["sessions"][participant_id]
398
- st.experimental_rerun()
399
 
400
- # create or ensure session state
401
- if participant_id not in st.session_state["sessions"]:
402
- st.session_state["sessions"][participant_id] = init_game(DEFAULT_WEEKS)
403
- st.session_state["sessions"][participant_id]["participant_id"] = participant_id
404
 
405
- # retrieve and ensure compatibility immediately
406
- state = st.session_state["sessions"][participant_id]
407
- state = ensure_state_compat(state)
408
- st.session_state["sessions"][participant_id] = state # write back
409
-
410
- # sidebar controls
411
- st.sidebar.header("Experiment controls")
412
- state["info_sharing"] = st.sidebar.checkbox("Enable Information Sharing (share demand)", value=state.get("info_sharing", False))
413
- state["info_history_weeks"] = st.sidebar.slider("Weeks of demand history to share (0 = none)", 0, 8, value=state.get("info_history_weeks", 0))
414
- st.sidebar.markdown("---")
415
- st.sidebar.write("Model for LLM agents:")
416
- st.sidebar.write(OPENAI_MODEL)
417
- st.sidebar.markdown("---")
418
- st.sidebar.write("HF upload settings:")
419
- st.sidebar.write(f"- HF_REPO_ID: {HF_REPO_ID or 'NOT SET'}")
420
- st.sidebar.write(f"- HF_TOKEN: {'SET' if HF_TOKEN else 'NOT SET'}")
421
-
422
- # main UI
423
- col_main, col_side = st.columns([3,1])
424
- with col_main:
425
- st.header(f"Week {state['week']} / {state['weeks_total']}")
426
- demand_display = state["customer_demand"][state["week"] - 1] if 0 <= (state["week"] - 1) < len(state["customer_demand"]) else None
427
- st.subheader(f"Customer demand (retailer receives this week): {demand_display}")
428
-
429
- # role panels
430
- roles = state["roles"]
431
- panels = st.columns(len(roles))
432
- for i, role in enumerate(roles):
433
- with panels[i]:
434
- st.markdown(f"### {role.title()}")
435
- st.metric("Inventory", state["inventory"].get(role, 0))
436
- st.metric("Backlog", state["backlog"].get(role, 0))
437
- incoming = state["incoming_orders"].get(role, 0)
438
- st.write(f"Incoming order (this week): **{incoming}**")
439
- next_ship = state["pipeline"].get(role, [0])[0] if state["pipeline"].get(role) else 0
440
- st.write(f"Incoming shipment next week: **{next_ship}**")
441
-
442
- st.markdown("---")
443
- # Distributor form
444
- with st.form(key=f"order_form_{participant_id}", clear_on_submit=False):
445
- st.write("### Your (Distributor) decision this week")
446
-
447
- # 如果有 LLM 给的建议,就显示出来
448
- last_log = state["logs"][-1] if state.get("logs") else None
449
- if last_log and "llm_raw" in last_log and "distributor" in last_log["llm_raw"]:
450
- suggestion = last_log["llm_raw"]["distributor"]
451
- st.info(f"💡 AI suggests you order: **{suggestion}** units (you can follow or override)")
452
- else:
453
- st.info("💡 AI suggestion will appear after the first processed week.")
454
-
455
- default_val = state["incoming_orders"].get("distributor", 4) or 4
456
- distributor_order = st.number_input("Order to place to upstream (Wholesaler):", min_value=0, step=1, value=default_val)
457
- submitted = st.form_submit_button("Submit Order (locks your decision)")
458
- if submitted:
459
- st.session_state.setdefault("pending_orders", {})
460
- st.session_state["pending_orders"][participant_id] = int(distributor_order)
461
- st.success(f"Order submitted: {distributor_order}. Now click 'Next Week' to process the week.")
462
-
463
- st.markdown("---")
464
- pending = st.session_state.get("pending_orders", {}).get(participant_id, None)
465
- if pending is None:
466
- st.info("Please submit your order first to enable Next Week processing.")
467
- else:
468
- if st.button("Next Week — process week and invoke LLM agents"):
469
- # Guard: don't step if game finished
470
- if state["week"] > state["weeks_total"]:
471
- st.info("Game already finished for this participant.")
472
- else:
473
- try:
474
- state = step_game(state, pending)
475
- # write back
476
- st.session_state["sessions"][participant_id] = state
477
- # remove pending
478
- del st.session_state["pending_orders"][participant_id]
479
- st.success(f"Week processed. Advanced to week {state['week']}.")
480
- except Exception as e:
481
- # show traceback for debugging
482
- tb = traceback.format_exc()
483
- st.error(f"Error during Next Week processing: {e}")
484
- st.text_area("Traceback", tb, height=300)
485
-
486
- st.markdown("### Recent logs")
487
- if state.get("logs"):
488
- df = pd.json_normalize(state["logs"][-6:])
489
- st.dataframe(df, use_container_width=True)
490
- else:
491
- st.write("No logs yet. Submit your first order and press Next Week.")
492
-
493
- with col_side:
494
- st.subheader("Information Sharing (preview)")
495
- st.write(f"Sharing {state.get('info_history_weeks', 0)} weeks of history (0 = only current).")
496
- if state.get("info_sharing"):
497
- h = state.get("info_history_weeks", 0)
498
- start = max(0, (state["week"] - 1) - h)
499
- hist = state["customer_demand"][start: state["week"]]
500
- st.write("Demand visible to agents:", hist)
501
-
502
- st.markdown("---")
503
- st.subheader("Admin / Debug")
504
- if st.button("Test LLM connection"):
505
- if not openai.api_key:
506
- st.error("OpenAI API key missing (set OPENAI_API_KEY in secrets).")
507
- else:
508
  try:
509
- test_prompt = "Reply with 42."
510
- resp = openai.ChatCompletion.create(model=OPENAI_MODEL, messages=[{"role":"user","content":test_prompt}], max_tokens=10)
511
- st.write("LLM raw:", resp.choices[0].message.get("content"))
 
 
 
 
 
 
512
  except Exception as e:
513
- st.error(f"LLM test failed: {e}")
514
-
515
- if st.button("Save logs now (manual)"):
516
- if not state.get("logs"):
517
- st.info("No logs to save.")
518
- else:
519
- local_file = save_logs_local(state, participant_id)
520
- st.success(f"Saved local file: {local_file}")
521
-
522
- # ---------------------------
523
- # End-of-game upload (only when finished)
524
- # ---------------------------
525
- # Note: check strictly greater than weeks_total (we advanced after final week)
526
- if state.get("week", 1) > state.get("weeks_total", DEFAULT_WEEKS):
527
- st.success("Game completed for this participant.")
528
- final_csv = save_logs_local(state, participant_id)
529
- with open(final_csv, "rb") as f:
530
- st.download_button("Download final logs CSV", data=f, file_name=final_csv.name, mime="text/csv")
531
- if HF_TOKEN and HF_REPO_ID:
532
- url = upload_log_to_hf_at_end(final_csv, participant_id)
533
- if url:
534
- st.write(f"Final logs uploaded to HF Hub: {url}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # app.py
2
+ # @title 啤酒游戏最终整合版 (Streamlit 交互应用 + Hugging Face 日志上传)
 
 
 
 
 
 
3
 
4
+ # -----------------------------------------------------------------------------
5
+ # 1. 导入必要的库
6
+ # -----------------------------------------------------------------------------
7
+ import streamlit as st
8
+ import pandas as pd
9
+ import matplotlib.pyplot as plt
10
+ import numpy as np
11
+ from collections import deque
12
  import time
13
+ import openai
14
+ import re
15
  import random
16
+ import uuid
17
+ import os
 
18
  from pathlib import Path
19
+ from datetime import datetime
20
+ from huggingface_hub import HfApi, upload_file
21
 
22
+ # -----------------------------------------------------------------------------
23
+ # 2. 配置游戏核心参数和API密钥
24
+ # -----------------------------------------------------------------------------
25
+ # --- 游戏参数 ---
26
+ WEEKS = 24
 
 
 
 
 
 
 
 
 
 
27
  INITIAL_INVENTORY = 12
28
  INITIAL_BACKLOG = 0
29
+ ORDER_PASSING_DELAY = 1
30
+ SHIPPING_DELAY = 2
31
+ FACTORY_LEAD_TIME = 1
32
+ FACTORY_SHIPPING_DELAY = 1
33
+ HOLDING_COST = 0.5
34
+ BACKLOG_COST = 1.0
35
+
36
+ # --- 模型和日志配置 ---
37
+ OPENAI_MODEL = "gpt-4o-mini"
38
  LOCAL_LOG_DIR = Path("logs")
39
+ LOCAL_LOG_DIR.mkdir(exist_ok=True) # Ensure the log directory exists
40
+
41
+ # --- API & Secrets 配置 ( Streamlit Secrets 读取) ---
42
+ try:
43
+ # OpenAI
44
+ client = openai.OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
45
+ # Hugging Face
46
+ HF_TOKEN = st.secrets.get("HF_TOKEN")
47
+ HF_REPO_ID = st.secrets.get("HF_REPO_ID") # e.g., "YourUser/beer-game-logs"
48
+ if HF_TOKEN:
49
+ hf_api = HfApi()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  else:
51
+ hf_api = None
52
+ except Exception as e:
53
+ st.error(f"启动时读取Secrets出错: {e}")
54
+ st.info("请确保在Streamlit的Secrets中设置了 OPENAI_API_KEY。可选设置 HF_TOKEN 和 HF_REPO_ID 用于上传日志。")
55
+ client = None
56
+ HF_TOKEN = None
57
+ HF_REPO_ID = None
58
+ hf_api = None
59
+
60
+
61
+ # -----------------------------------------------------------------------------
62
+ # 3. 游戏核心逻辑函数 (大部分源自代码1, 并为Streamlit适配)
63
+ # -----------------------------------------------------------------------------
64
+
65
+ def get_customer_demand(week: int) -> int:
66
+ """定义终端客户需求函数"""
67
+ return 4 if week <= 4 else 8
68
+
69
+ def init_game_state(llm_personality: str, info_sharing: str):
70
+ """初始化或重置游戏状态,并储存在 st.session_state 中"""
71
+ roles = ["Retailer", "Wholesaler", "Distributor", "Factory"]
72
+ human_role = random.choice(roles)
73
+ participant_id = str(uuid.uuid4())[:8] # Generate a unique ID for this game session
74
+
75
+ st.session_state.game_state = {
76
+ 'game_running': True,
77
+ 'participant_id': participant_id,
78
+ 'week': 1,
79
+ 'human_role': human_role,
80
+ 'llm_personality': llm_personality,
81
+ 'info_sharing': info_sharing,
82
+ 'logs': [], # Changed from 'history' to 'logs' for more detailed logging
83
+ 'echelons': {},
84
+ 'factory_production_pipeline': deque([0] * FACTORY_LEAD_TIME, maxlen=FACTORY_LEAD_TIME),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  }
86
 
87
+ # 为每个角色初始化状态
88
+ for i, name in enumerate(roles):
89
+ upstream = roles[i + 1] if i + 1 < len(roles) else None
90
+ downstream = roles[i - 1] if i - 1 >= 0 else None
91
+ if name == "Distributor": shipping_weeks = FACTORY_SHIPPING_DELAY
92
+ elif name == "Factory": shipping_weeks = 0
93
+ else: shipping_weeks = SHIPPING_DELAY
94
+
95
+ st.session_state.game_state['echelons'][name] = {
96
+ 'name': name, 'upstream_name': upstream, 'downstream_name': downstream,
97
+ 'inventory': INITIAL_INVENTORY, 'backlog': INITIAL_BACKLOG,
98
+ 'order_pipeline': deque([0] * ORDER_PASSING_DELAY, maxlen=ORDER_PASSING_DELAY),
99
+ 'incoming_shipments': deque([0] * shipping_weeks, maxlen=shipping_weeks),
100
+ 'incoming_order': 0, 'order_placed': 0, 'shipment_sent': 0,
101
+ 'weekly_cost': 0, 'total_cost': 0,
102
+ }
103
+ st.info(f"新游戏开始!AI模式: **{llm_personality} / {info_sharing}**。您的角色: **{human_role}**。本次游戏ID: `{participant_id}`")
104
+
105
+
106
+ def get_llm_order_decision(prompt: str, echelon_name: str, current_week: int, personality: str) -> (int, str):
107
+ """调用 OpenAI API 获取决策,并返回决策和原始文本"""
108
+ if not client:
109
+ st.warning("API Key未设置,LLM将使用默认值8。")
110
+ return 8, "NO_API_KEY_DEFAULT"
111
+
112
+ with st.spinner(f"正在为 {echelon_name} 获取AI决策..."):
113
+ temp = 0.1 if personality == 'perfect_rational' else 0.7
114
+ try:
115
+ response = client.chat.completions.create(
116
+ model=OPENAI_MODEL,
117
+ messages=[
118
+ {"role": "system", "content": "You are a supply chain manager playing the Beer Game. Your response must be only an integer number representing your order quantity and nothing else. For example: 8"},
119
+ {"role": "user", "content": prompt}
120
+ ],
121
+ temperature=temp,
122
+ max_tokens=10
123
+ )
124
+ raw_text = response.choices[0].message.content.strip()
125
+ match = re.search(r'\d+', raw_text)
126
+ if match:
127
+ return int(match.group(0)), raw_text
128
+ else:
129
+ st.warning(f"LLM for {echelon_name} 未返回有效数字,将使用默认值 8。原始返回: '{raw_text}'")
130
+ return 8, raw_text
131
+ except Exception as e:
132
+ st.error(f"API调用失败 for {echelon_name}。错误: {e}。将使用默认值 8。")
133
+ return 8, f"API_ERROR: {e}"
134
+
135
+ def get_llm_prompt(echelon_state: dict, week: int, llm_personality: str, info_sharing: str, all_echelons_state: dict) -> str:
136
+ """生成LLM的提示词 (核心逻辑完全来自代码1)"""
137
+ # (此函数内容与上一版完全相同,为简洁省略,实际代码中应完整保留)
138
+ base_info = f"""
139
+ Your Current Status at the **{echelon_state['name']}** for **Week {week}**:
140
+ - On-hand inventory: {echelon_state['inventory']} units.
141
+ - Backlog (unfilled orders): {echelon_state['backlog']} units.
142
+ - Incoming order this week (from your customer): {echelon_state['incoming_order']} units.
143
+ - Shipments on the way to you: {list(echelon_state['incoming_shipments'])}
144
+ - Orders you have placed being processed by your supplier: {list(echelon_state['order_pipeline'])}
145
+ """
146
+ # 场景 1: 完美理性 x 完全信息
147
+ if llm_personality == 'perfect_rational' and info_sharing == 'full':
148
+ stable_demand = 8; total_lead_time = ORDER_PASSING_DELAY + SHIPPING_DELAY; safety_stock = 4
149
+ target_inventory_level = (stable_demand * total_lead_time) + safety_stock
150
+ inventory_position = (echelon_state['inventory'] - echelon_state['backlog'] + sum(echelon_state['incoming_shipments']) + sum(echelon_state['order_pipeline']))
151
+ optimal_order = max(0, int(target_inventory_level - inventory_position))
152
+ return f"**You are a perfectly rational supply chain AI with full system visibility.**\nYour only goal is to maintain stability and minimize costs based on mathematical optimization.\n**System Analysis:**\n* **Known Stable End-Customer Demand:** {stable_demand} units/week.\n* **Your Current Total Inventory Position:** {inventory_position} units.\n* **Optimal Target Inventory Level:** {target_inventory_level} units.\n* **Mathematically Optimal Order:** The optimal order is **{optimal_order} units**.\n**Your Task:** Confirm this optimal quantity. Respond with a single integer."
153
+ # 场景 2: 完美理性 x 本地信息
154
+ elif llm_personality == 'perfect_rational' and info_sharing == 'local':
155
+ safety_stock = 4; anchor_demand = echelon_state['incoming_order']
156
+ inventory_correction = safety_stock - (echelon_state['inventory'] - echelon_state['backlog'])
157
+ supply_line = sum(echelon_state['incoming_shipments']) + sum(echelon_state['order_pipeline'])
158
+ calculated_order = anchor_demand + inventory_correction - supply_line
159
+ rational_local_order = max(0, int(calculated_order))
160
+ return f"**You are a perfectly rational supply chain AI with ONLY LOCAL information.**\nYou must use a logical heuristic to make a stable decision. A proven method is \"Anchoring and Adjustment\".\n\n{base_info}\n\n**Rational Calculation (Anchoring & Adjustment):**\n1. **Anchor on Demand:** Your best guess for future demand is your last incoming order: **{anchor_demand} units**.\n2. **Adjust for Inventory:** You want to hold a safety stock of {safety_stock} units. Your current stock is {echelon_state['inventory'] - echelon_state['backlog']}. You need to order an extra **{inventory_correction} units** to correct this.\n3. **Account for Supply Line:** You already have **{supply_line} units** in transit or being processed. These should be subtracted from your new order.\n\n**Final Calculation:**\n* Order = (Anchor Demand) + (Inventory Adjustment) - (Supply Line)\n* Order = {anchor_demand} + {inventory_correction} - {supply_line} = **{rational_local_order} units**.\n\n**Your Task:** Confirm this locally rational quantity. Respond with a single integer."
161
+ # 场景 3: 类人 x 完全信息
162
+ elif llm_personality == 'human_like' and info_sharing == 'full':
163
+ full_info_str = f"\n**Full Supply Chain Information:**\n- End-Customer Demand this week: {get_customer_demand(week)} units.\n"
164
+ for name, e_state in all_echelons_state.items():
165
+ if name != echelon_state['name']: full_info_str += f"- {name}: Inventory={e_state['inventory']}, Backlog={e_state['backlog']}\n"
166
+ return f"**You are a supply chain manager with full visibility across the entire system.**\nYou can see everyone's inventory and the real customer demand. Your goal is to use this information to make a smart, coordinated decision. However, you are still human and might get anxious about your own stock levels.\n{base_info}\n{full_info_str}\n**Your Task:** Look at the full picture, especially the stable end-customer demand. Try to avoid causing the bullwhip effect. However, also consider your own inventory pressure. What quantity should you order this week? Respond with a single integer."
167
+ # 场景 4: 类人 x 本地信息
168
+ elif llm_personality == 'human_like' and info_sharing == 'local':
169
+ return f"**You are a reactive supply chain manager for the {echelon_state['name']}.** You have a limited view and tend to over-correct based on fear.\n\n**Your Mindset: **Your top priority is try to not have a backlog.\n\n{base_info}\n\n**Your Task:** You just saw your own inventory and a new order coming. Your gut instinct is to panic and order enough to ensure you are never caught with a backlog again.\n\n**React emotionally.** What is your knee-jerk order quantity? Respond with a single integer."
170
+
171
+ def step_game(human_final_order: int):
172
+ """推进一周的游戏进程,并记录详细日志"""
173
+ state = st.session_state.game_state
174
+ week = state['week']
175
+ echelons = state['echelons']
176
+ human_role = state['human_role']
177
+ llm_personality = state['llm_personality']
178
+ info_sharing = state['info_sharing']
179
+ echelon_order = ["Retailer", "Wholesaler", "Distributor", "Factory"]
180
+ llm_raw_responses = {}
181
+
182
+ # --- 游戏流程 ---
183
+ # 1. 工厂生产完成 & 2. 各环节接收货物
184
+ factory_state = echelons["Factory"]
185
+ if state['factory_production_pipeline']: factory_state['inventory'] += state['factory_production_pipeline'].popleft()
186
+ for name in ["Retailer", "Wholesaler", "Distributor"]:
187
+ if echelons[name]['incoming_shipments']: echelons[name]['inventory'] += echelons[name]['incoming_shipments'].popleft()
188
+ # 3. 各环节接收订单
189
+ for name in echelon_order:
190
+ if name == "Retailer": echelons[name]['incoming_order'] = get_customer_demand(week)
191
+ else:
192
+ downstream = echelons[name]['downstream_name']
193
+ if downstream and echelons[downstream]['order_pipeline']:
194
+ echelons[name]['incoming_order'] = echelons[downstream]['order_pipeline'].popleft()
195
+ # 4. 满足订单并发货
196
+ for name in echelon_order:
197
+ e = echelons[name]
198
+ demand = e['incoming_order'] + e['backlog']
199
+ e['shipment_sent'] = min(e['inventory'], demand)
200
+ e['inventory'] -= e['shipment_sent']
201
+ e['backlog'] = demand - e['shipment_sent']
202
+ # 5. 发货在途
203
+ for sender in ["Factory", "Distributor", "Wholesaler"]:
204
+ receiver = echelons[sender]['downstream_name']
205
+ if receiver: echelons[receiver]['incoming_shipments'].append(echelons[sender]['shipment_sent'])
206
+
207
+ # 6. 各环节下订单
208
+ for name in echelon_order:
209
+ e = echelons[name]
210
+ if name == human_role:
211
+ order_amount, raw_resp = human_final_order, "HUMAN_INPUT"
212
+ st.sidebar.write(f"✔️ 你 ({name}) 的最终订单: {order_amount}")
213
+ else:
214
+ prompt = get_llm_prompt(e, week, llm_personality, info_sharing, echelons)
215
+ order_amount, raw_resp = get_llm_order_decision(prompt, name, week, llm_personality)
216
+ st.sidebar.write(f"🤖 AI ({name}) 的订单: {order_amount}")
217
+ llm_raw_responses[name] = raw_resp
218
+ e['order_placed'] = max(0, order_amount)
219
+ if name != "Factory": e['order_pipeline'].append(e['order_placed'])
220
+
221
+ # 7. 工厂安排生产
222
+ state['factory_production_pipeline'].append(echelons["Factory"]['order_placed'])
223
+
224
+ # 8. 更新成本
225
+ for name in echelon_order:
226
+ e = echelons[name]
227
+ e['weekly_cost'] = (e['inventory'] * HOLDING_COST) + (e['backlog'] * BACKLOG_COST)
228
+ e['total_cost'] += e['weekly_cost']
229
+
230
+ # 9. 记录详细日志
231
  log_entry = {
232
+ 'timestamp': datetime.utcnow().isoformat() + "Z", 'week': week,
233
+ 'participant_id': state['participant_id'], 'human_role': human_role,
234
+ 'llm_personality': llm_personality, 'info_sharing': info_sharing,
235
+ 'customer_demand': get_customer_demand(week),
 
 
 
 
 
 
 
 
236
  }
237
+ for name in echelon_order:
238
+ e = echelons[name]
239
+ log_entry[f'{name}.inventory'] = e['inventory']
240
+ log_entry[f'{name}.backlog'] = e['backlog']
241
+ log_entry[f'{name}.incoming_order'] = e['incoming_order']
242
+ log_entry[f'{name}.order_placed'] = e['order_placed']
243
+ log_entry[f'{name}.shipment_sent'] = e['shipment_sent']
244
+ log_entry[f'{name}.weekly_cost'] = e['weekly_cost']
245
+ log_entry[f'{name}.total_cost'] = e['total_cost']
246
+ log_entry[f'{name}.llm_raw_response'] = llm_raw_responses.get(name, "")
247
+ state['logs'].append(log_entry)
248
+
249
+ # 10. 推进周数
250
+ state['week'] += 1
251
+ if state['week'] > WEEKS:
252
+ state['game_running'] = False
253
+
254
+ def plot_results(df: pd.DataFrame, title: str):
255
+ """绘制结果图表 (源自代码1)"""
256
+ fig, axes = plt.subplots(3, 1, figsize=(12, 16)); fig.suptitle(title, fontsize=16)
257
+ echelons = ['Retailer', 'Wholesaler', 'Distributor', 'Factory']
258
+ # 提取用于绘图的数据
259
+ plot_data = []
260
+ for _, row in df.iterrows():
261
+ for e in echelons:
262
+ plot_data.append({
263
+ 'week': row['week'], 'echelon': e,
264
+ 'inventory': row[f'{e}.inventory'], 'order_placed': row[f'{e}.order_placed'],
265
+ 'total_cost': row[f'{e}.total_cost']
266
+ })
267
+ plot_df = pd.DataFrame(plot_data)
268
+ # 绘图逻辑 (与之前版本相同)
269
+ inventory_pivot = plot_df.pivot(index='week', columns='echelon', values='inventory').reindex(columns=echelons)
270
+ inventory_pivot.plot(ax=axes[0], kind='line', marker='o', markersize=4); axes[0].set_title('Inventory Levels'); axes[0].grid(True, linestyle='--')
271
+ order_pivot = plot_df.pivot(index='week', columns='echelon', values='order_placed').reindex(columns=echelons)
272
+ order_pivot.plot(ax=axes[1], style='--'); axes[1].plot(range(1, WEEKS + 1), [get_customer_demand(w) for w in range(1, WEEKS + 1)], label='Customer Demand', color='black', lw=2.5); axes[1].set_title('Order Quantities (Bullwhip Effect)'); axes[1].grid(True, linestyle='--'); axes[1].legend()
273
+ total_costs = plot_df.groupby('echelon')['total_cost'].max().reindex(echelons)
274
+ total_costs.plot(kind='bar', ax=axes[2], rot=0); axes[2].set_title('Total Cumulative Cost')
275
+ plt.tight_layout(rect=[0, 0, 1, 0.96]); return fig
276
+
277
+ def save_logs_and_upload(state: dict):
278
+ """在游戏结束后,保存日志到本地并尝试上传到Hugging Face"""
279
+ if not state.get('logs'):
280
+ st.warning("没有可保存的日志。")
281
+ return
282
+
283
+ participant_id = state['participant_id']
284
+ df = pd.json_normalize(state['logs'])
285
+ fname = LOCAL_LOG_DIR / f"log_{participant_id}_{int(time.time())}.csv"
286
  df.to_csv(fname, index=False)
287
+ st.success(f"日志已成功保存到本地: `{fname}`")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
+ # 提供下载按钮
290
+ with open(fname, "rb") as f:
291
+ st.download_button("📥 下载日志CSV文件", data=f, file_name=fname.name, mime="text/csv")
 
292
 
293
+ # 尝试上传到Hugging Face
294
+ if HF_TOKEN and HF_REPO_ID and hf_api:
295
+ with st.spinner("正在上传日志到 Hugging Face Hub..."):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  try:
297
+ dest_path = f"logs/{fname.name}"
298
+ url = hf_api.upload_file(
299
+ path_or_fileobj=str(fname),
300
+ path_in_repo=dest_path,
301
+ repo_id=HF_REPO_ID,
302
+ repo_type="dataset",
303
+ token=HF_TOKEN
304
+ )
305
+ st.success(f"✅ 日志已成功上传到 Hugging Face! [查看文件]({url})")
306
  except Exception as e:
307
+ st.error(f"上传到 Hugging Face 失败: {e}")
308
+ else:
309
+ st.info("未配置Hugging Face的 HF_TOKEN 或 HF_REPO_ID, 将跳过上传。")
310
+
311
+ # -----------------------------------------------------------------------------
312
+ # 4. Streamlit UI 界面
313
+ # -----------------------------------------------------------------------------
314
+ st.set_page_config(page_title="啤酒游戏-人机协作版", layout="wide")
315
+ st.title("🍺 啤酒游戏:人机协作挑战")
316
+ st.markdown("你将扮演供应链中的一个角色,与另外三个由大语言模型(LLM)驱动的AI代理合作。")
317
+
318
+ # --- 游戏设置和初始化 ---
319
+ if 'game_state' not in st.session_state or not st.session_state.game_state.get('game_running', False):
320
+ st.header("🎮 开始新游戏")
321
+ col1, col2 = st.columns(2)
322
+ with col1:
323
+ llm_personality = st.selectbox("AI '性格'", ('human_like', 'perfect_rational'), format_func=lambda x: x.replace('_', ' ').title())
324
+ with col2:
325
+ info_sharing = st.selectbox("信息共享", ('local', 'full'), format_func=lambda x: x.title())
326
+ if st.button("🚀 开始游戏", type="primary"):
327
+ init_game_state(llm_personality, info_sharing)
328
+ st.rerun()
329
+
330
+ # --- 游戏主界面 ---
331
+ elif 'game_state' in st.session_state and st.session_state.game_state.get('game_running'):
332
+ state = st.session_state.game_state
333
+ week, human_role, echelons = state['week'], state['human_role'], state['echelons']
334
+ st.header(f"第 {week} 周 / 共 {WEEKS} 周")
335
+ st.subheader(f"你的角色: **{human_role}** | AI模式: **{state['llm_personality'].replace('_', ' ')}** | 信息: **{state['info_sharing']}**")
336
+ cols = st.columns(4)
337
+ for i, name in enumerate(["Retailer", "Wholesaler", "Distributor", "Factory"]):
338
+ with cols[i]:
339
+ e, title_icon = echelons[name], "👤" if name == human_role else "🤖"
340
+ st.markdown(f"### {title_icon} {name} {'(你)' if name == human_role else '(AI)'}")
341
+ st.metric("库存", e['inventory']); st.metric("缺货/积压", e['backlog'])
342
+ st.write(f"本周收到订单: **{e['incoming_order']}**")
343
+ st.write(f"下周到货: **{list(e['incoming_shipments'])[0] if e['incoming_shipments'] else 0}**")
344
+ st.markdown("---")
345
+ st.header("你的决策")
346
+ human_echelon_state = echelons[human_role]
347
+ prompt_sugg = get_llm_prompt(human_echelon_state, week, state['llm_personality'], state['info_sharing'], echelons)
348
+ ai_suggestion, _ = get_llm_order_decision(prompt_sugg, f"{human_role} (Suggestion)", week, state['llm_personality'])
349
+ st.info(f"💡 AI建议你 ({human_role}) 本周向上游订购 **{ai_suggestion}** 单位。")
350
+ with st.form(key="order_form"):
351
+ final_order = st.number_input("请输入你的最终订单数量:", min_value=0, step=1, value=ai_suggestion)
352
+ if st.form_submit_button(label="✅ 提交订单并进入下一周"):
353
+ step_game(int(final_order)); st.rerun()
354
+ st.sidebar.header("游戏信息")
355
+ st.sidebar.markdown(f"**游戏ID**: `{state['participant_id']}`")
356
+ st.sidebar.markdown(f"**当前周**: {week-1} (已完成)")
357
+ if st.sidebar.button("🔄 重置游戏"):
358
+ del st.session_state.game_state; st.rerun()
359
+
360
+ # --- 游戏结束界面 ---
361
+ if 'game_state' in st.session_state and not st.session_state.game_state.get('game_running', False) and st.session_state.game_state['week'] > WEEKS:
362
+ st.header("🎉 游戏结束!")
363
+ state = st.session_state.game_state
364
+ logs_df = pd.json_normalize(state['logs'])
365
+ title = f"Beer Game (Human: {state['human_role']})\n(AI: {state['llm_personality'].replace('_', ' ').title()} | Info: {state['info_sharing'].title()})"
366
+ fig = plot_results(logs_df, title)
367
+ st.pyplot(fig)
368
+ # 保存并上传日志
369
+ save_logs_and_upload(state)
370
+ if st.button("✨ 开始一局新游戏"):
371
+ del st.session_state.game_state; st.rerun()