Lilli98 commited on
Commit
e601abf
·
verified ·
1 Parent(s): 881d83b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -14
app.py CHANGED
@@ -18,16 +18,17 @@ import random
18
  import json
19
  from datetime import datetime
20
  from pathlib import Path
21
- from openai import OpenAI
22
  import streamlit as st
23
  import pandas as pd
 
24
  from huggingface_hub import upload_file, HfApi
25
 
26
  # ---------------------------
27
  # CONFIGURABLE PARAMETERS
28
  # ---------------------------
29
  # Classic Beer Game choices: choose 24 or 36 depending on experiment design
30
- DEFAULT_WEEKS = 36
31
  TRANSPORT_DELAY = 2 # shipments take 2 weeks to arrive
32
  ORDER_DELAY = 1 # orders incur 1-week processing delay (modeled via pipeline)
33
  INITIAL_INVENTORY = 12
@@ -56,7 +57,7 @@ def fmt(o):
56
  # Hugging Face upload helper
57
  # ---------------------------
58
  HF_TOKEN = os.getenv("HF_TOKEN")
59
- HF_REPO_ID = os.getenv("HF_REPO_ID") # "Lilli98/beer-game-logs"
60
  hf_api = HfApi()
61
 
62
  def upload_log_to_hf(local_path: Path, participant_id: str):
@@ -86,13 +87,7 @@ def upload_log_to_hf(local_path: Path, participant_id: str):
86
  # ---------------------------
87
  # OpenAI helper
88
  # ---------------------------
89
-
90
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
91
-
92
- if not OPENAI_API_KEY:
93
- raise ValueError("Missing OPENAI_API_KEY environment variable!")
94
-
95
- client = OpenAI(api_key=OPENAI_API_KEY)
96
 
97
  def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool, demand_history: list, max_tokens=40, temperature=0.7):
98
  """
@@ -119,7 +114,7 @@ def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool,
119
  )
120
 
121
  try:
122
- resp = client.chat.completions.create(
123
  model=OPENAI_MODEL,
124
  messages=[
125
  {"role": "system", "content": "You are an automated Beer Game agent who decides weekly orders."},
@@ -129,7 +124,7 @@ def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool,
129
  temperature=temperature,
130
  n=1
131
  )
132
- raw = resp.choices[0].message.content.strip()
133
  except Exception as e:
134
  raw = f"OPENAI_ERROR: {str(e)}"
135
  # fallback later
@@ -483,12 +478,12 @@ with col_sidebar:
483
  # quick test prompt
484
  try:
485
  test_prompt = "You are a helpful agent. Reply with '42'."
486
- resp = client.chat.completions.create(
487
  model=OPENAI_MODEL,
488
  messages=[{"role":"user","content":test_prompt}],
489
  max_tokens=10
490
  )
491
- st.write("LLM raw:", resp.choices[0].message.content)
492
  except Exception as e:
493
  st.error(f"LLM test failed: {e}")
494
 
 
18
  import json
19
  from datetime import datetime
20
  from pathlib import Path
21
+
22
  import streamlit as st
23
  import pandas as pd
24
+ import openai
25
  from huggingface_hub import upload_file, HfApi
26
 
27
  # ---------------------------
28
  # CONFIGURABLE PARAMETERS
29
  # ---------------------------
30
  # Classic Beer Game choices: choose 24 or 36 depending on experiment design
31
+ DEFAULT_WEEKS = 24
32
  TRANSPORT_DELAY = 2 # shipments take 2 weeks to arrive
33
  ORDER_DELAY = 1 # orders incur 1-week processing delay (modeled via pipeline)
34
  INITIAL_INVENTORY = 12
 
57
  # Hugging Face upload helper
58
  # ---------------------------
59
  HF_TOKEN = os.getenv("HF_TOKEN")
60
+ HF_REPO_ID = os.getenv("HF_REPO_ID") # e.g., "XinyuLi/beer-game-logs"
61
  hf_api = HfApi()
62
 
63
  def upload_log_to_hf(local_path: Path, participant_id: str):
 
87
  # ---------------------------
88
  # OpenAI helper
89
  # ---------------------------
90
+ openai.api_key = os.getenv("OPENAI_API_KEY")
 
 
 
 
 
 
91
 
92
  def call_llm_for_order(role: str, local_state: dict, info_sharing_visible: bool, demand_history: list, max_tokens=40, temperature=0.7):
93
  """
 
114
  )
115
 
116
  try:
117
+ resp = openai.ChatCompletion.create(
118
  model=OPENAI_MODEL,
119
  messages=[
120
  {"role": "system", "content": "You are an automated Beer Game agent who decides weekly orders."},
 
124
  temperature=temperature,
125
  n=1
126
  )
127
+ raw = resp.choices[0].message.get("content", "").strip()
128
  except Exception as e:
129
  raw = f"OPENAI_ERROR: {str(e)}"
130
  # fallback later
 
478
  # quick test prompt
479
  try:
480
  test_prompt = "You are a helpful agent. Reply with '42'."
481
+ resp = openai.ChatCompletion.create(
482
  model=OPENAI_MODEL,
483
  messages=[{"role":"user","content":test_prompt}],
484
  max_tokens=10
485
  )
486
+ st.write("LLM raw:", resp.choices[0].message.get("content"))
487
  except Exception as e:
488
  st.error(f"LLM test failed: {e}")
489