Guiyom commited on
Commit
e082495
·
verified ·
1 Parent(s): df0ce20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -1659,8 +1659,7 @@ def clean_content(raw_content: str) -> str:
1659
  def display_image():
1660
  return "visual.png"
1661
 
1662
- def openai_call(prompt: str, messages: list = None, model: str = "o3-mini",
1663
- temperature: float = 0.0, max_tokens_param: int = 10000) -> str:
1664
  if messages is None or len(messages) == 0:
1665
  messages = [{"role": "user", "content": prompt}]
1666
  if len(messages[0]['content']) > MAX_MESSAGE_LENGTH:
@@ -1671,12 +1670,11 @@ def openai_call(prompt: str, messages: list = None, model: str = "o3-mini",
1671
  "model": model,
1672
  "messages": messages
1673
  }
1674
- # Note: Adjust token names based on the model
1675
  if model == "o3-mini":
1676
  params["max_completion_tokens"] = max_tokens_param
1677
  else:
1678
  params["max_tokens"] = max_tokens_param
1679
- params["temperature"]: temperature
1680
  response = client.chat.completions.create(**params)
1681
  result = response.choices[0].message.content
1682
  result = result.strip().strip("json").strip("```").strip()
@@ -2293,18 +2291,19 @@ def generate_query_tree(context: str, breadth: int, depth: int, selected_engines
2293
  list_engines = "all relevant search engines" if selected_engines is None else ','.join(map(str, selected_engines))
2294
 
2295
  prompt = f"""
 
2296
 
2297
- Generate a list of {breadth} search queries relevant to the following context:
2298
-
2299
- "{context}"
2300
 
2301
  // Requirements
2302
  - The queries should be suitable for a search engine.
2303
  - Each query should combine terms using logical operators (AND, OR) where appropriate.
2304
  - Do not include explanations or introductory phrases,
2305
- - just output a JSON object containing a list of strings named 'queries'.
 
2306
  // IMPORTANT:
2307
  - Return only valid JSON without any markdown code fences (```) or mention of json
 
2308
  // EXAMPLE (if breadth = 4):
2309
  {{
2310
  "queries": [
@@ -2314,11 +2313,12 @@ def generate_query_tree(context: str, breadth: int, depth: int, selected_engines
2314
  "Statistics" AND "data analysis" AND "machine learning algorithms"
2315
  ]
2316
  }}
 
2317
  Do not include any extra text, markdown formatting, or commentary. Output the JSON starting from "{{" and ending with "}}".
2318
  Now generate the result.
2319
  """
2320
  messages = []
2321
- llm_response = openai_call(prompt=prompt, messages=messages, model="o3-mini", temperature=0, max_tokens_param=150)
2322
  logging.info(f"Generated query tree: {llm_response}")
2323
 
2324
  cleaned_response = llm_response.strip().strip("`").strip()
 
1659
  def display_image():
1660
  return "visual.png"
1661
 
1662
+ def openai_call(prompt: str, messages: list = None, model: str = "o3-mini", temperature: float = 0.0, max_tokens_param: int = 10000) -> str:
 
1663
  if messages is None or len(messages) == 0:
1664
  messages = [{"role": "user", "content": prompt}]
1665
  if len(messages[0]['content']) > MAX_MESSAGE_LENGTH:
 
1670
  "model": model,
1671
  "messages": messages
1672
  }
 
1673
  if model == "o3-mini":
1674
  params["max_completion_tokens"] = max_tokens_param
1675
  else:
1676
  params["max_tokens"] = max_tokens_param
1677
+ params["temperature"]= temperature
1678
  response = client.chat.completions.create(**params)
1679
  result = response.choices[0].message.content
1680
  result = result.strip().strip("json").strip("```").strip()
 
2291
  list_engines = "all relevant search engines" if selected_engines is None else ','.join(map(str, selected_engines))
2292
 
2293
  prompt = f"""
2294
+ Generate a list of {breadth} search queries relevant to the following context:
2295
 
2296
+ "{context}"
 
 
2297
 
2298
  // Requirements
2299
  - The queries should be suitable for a search engine.
2300
  - Each query should combine terms using logical operators (AND, OR) where appropriate.
2301
  - Do not include explanations or introductory phrases,
2302
+ - Just output a JSON object containing a list of strings named 'queries'.
2303
+
2304
  // IMPORTANT:
2305
  - Return only valid JSON without any markdown code fences (```) or mention of json
2306
+
2307
  // EXAMPLE (if breadth = 4):
2308
  {{
2309
  "queries": [
 
2313
  "Statistics" AND "data analysis" AND "machine learning algorithms"
2314
  ]
2315
  }}
2316
+
2317
  Do not include any extra text, markdown formatting, or commentary. Output the JSON starting from "{{" and ending with "}}".
2318
  Now generate the result.
2319
  """
2320
  messages = []
2321
+ llm_response = openai_call(prompt=prompt, messages=messages, model="o3-mini", temperature=0, max_tokens_param=800)
2322
  logging.info(f"Generated query tree: {llm_response}")
2323
 
2324
  cleaned_response = llm_response.strip().strip("`").strip()