QuantumLearner commited on
Commit
0feb25a
·
verified ·
1 Parent(s): 765eafe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -15
app.py CHANGED
@@ -16,30 +16,45 @@ from gpt_researcher import GPTResearcher
16
  # -------------------------
17
  st.set_page_config(layout="wide", page_title="GPT Researcher")
18
 
19
- # Providers & models — set safe defaults to avoid `o1-preview`
20
  os.environ.setdefault("LLM_PROVIDER", "openai")
21
  os.environ.setdefault("EMBEDDING_PROVIDER", "openai")
22
  os.environ.setdefault("EMBEDDING_MODEL", "text-embedding-3-small")
23
- os.environ.setdefault("STRATEGIC_LLM", "gpt-4o")
24
- os.environ.setdefault("SMART_LLM", "gpt-4o-mini")
25
- # Compatibility aliases some versions of gpt_researcher read
26
- os.environ.setdefault("STRATEGIC_MODEL", os.environ["STRATEGIC_LLM"])
27
- os.environ.setdefault("SMART_MODEL", os.environ["SMART_LLM"])
28
- os.environ.setdefault("STRATEGY_LLM", os.environ["STRATEGIC_LLM"])
29
- os.environ.setdefault("STRATEGY_MODEL", os.environ["STRATEGIC_LLM"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  # Allow asyncio.run inside Streamlit
32
  nest_asyncio.apply()
33
 
 
34
  # -------------------------
35
  # Small helpers
36
  # -------------------------
37
  def _apply_model_env(strategic_model: str, smart_model: str):
38
- """Apply model choices to environment for gpt_researcher."""
39
- for k in ("STRATEGIC_LLM", "STRATEGIC_MODEL", "STRATEGY_LLM", "STRATEGY_MODEL"):
40
- os.environ[k] = strategic_model
41
- for k in ("SMART_LLM", "SMART_MODEL"):
42
- os.environ[k] = smart_model
43
 
44
  def _clean_logs(text: str) -> str:
45
  """Optionally hide noisy lines about unavailable models, keep everything else."""
@@ -185,7 +200,7 @@ report_type = st.sidebar.selectbox(
185
  help="Choose the format of the final report.",
186
  )
187
 
188
- # Model choices (so you never hit `o1-preview`)
189
  with st.sidebar.expander("Model Settings", expanded=False):
190
  strategic_choice = st.selectbox(
191
  "Strategic model",
@@ -238,7 +253,7 @@ if run_clicked:
238
  # Retriever back-end (Tavily)
239
  os.environ["RETRIEVER"] = "tavily"
240
 
241
- # Apply model selections so gpt_researcher never tries `o1-preview`
242
  _apply_model_env(strategic_choice, smart_choice)
243
 
244
  # Decide the report source
 
16
  # -------------------------
17
  st.set_page_config(layout="wide", page_title="GPT Researcher")
18
 
19
+ # Base providers & defaults
20
  os.environ.setdefault("LLM_PROVIDER", "openai")
21
  os.environ.setdefault("EMBEDDING_PROVIDER", "openai")
22
  os.environ.setdefault("EMBEDDING_MODEL", "text-embedding-3-small")
23
+
24
+ # IMPORTANT: gpt_researcher expects "<provider>:<model>" for SMART_LLM / FAST_LLM / STRATEGIC_LLM
25
+ _provider = os.environ.get("LLM_PROVIDER", "openai")
26
+ _default_strategic = "gpt-4o"
27
+ _default_smart = "gpt-4o-mini"
28
+
29
+ # Seed all the variants some releases look for
30
+ def _seed_llm_env(strategic_model: str, smart_model: str, provider: str = _provider):
31
+ strategic = f"{provider}:{strategic_model}"
32
+ smart = f"{provider}:{smart_model}"
33
+ # Required (newer versions check these):
34
+ os.environ["STRATEGIC_LLM"] = strategic
35
+ os.environ["SMART_LLM"] = smart
36
+ os.environ["FAST_LLM"] = smart # alias some builds use
37
+
38
+ # Back-compat aliases some releases read:
39
+ os.environ["STRATEGY_LLM"] = strategic
40
+ os.environ["STRATEGIC_MODEL"] = strategic_model
41
+ os.environ["SMART_MODEL"] = smart_model
42
+
43
+ # Embeddings (some builds accept both split and combined)
44
+ os.environ["EMBEDDING"] = f"{os.environ.get('EMBEDDING_PROVIDER','openai')}:{os.environ.get('EMBEDDING_MODEL','text-embedding-3-small')}"
45
+
46
+ _seed_llm_env(_default_strategic, _default_smart)
47
 
48
  # Allow asyncio.run inside Streamlit
49
  nest_asyncio.apply()
50
 
51
+
52
  # -------------------------
53
  # Small helpers
54
  # -------------------------
55
  def _apply_model_env(strategic_model: str, smart_model: str):
56
+ """Apply model choices in the provider-qualified format required by gpt_researcher."""
57
+ _seed_llm_env(strategic_model, smart_model, provider=os.environ.get("LLM_PROVIDER", "openai"))
 
 
 
58
 
59
  def _clean_logs(text: str) -> str:
60
  """Optionally hide noisy lines about unavailable models, keep everything else."""
 
200
  help="Choose the format of the final report.",
201
  )
202
 
203
+ # Model choices (ensure we never hit `o1-preview`)
204
  with st.sidebar.expander("Model Settings", expanded=False):
205
  strategic_choice = st.selectbox(
206
  "Strategic model",
 
253
  # Retriever back-end (Tavily)
254
  os.environ["RETRIEVER"] = "tavily"
255
 
256
+ # Apply model selections so gpt_researcher gets "<provider>:<model>"
257
  _apply_model_env(strategic_choice, smart_choice)
258
 
259
  # Decide the report source