Guiyom commited on
Commit
fe9f22d
·
verified ·
1 Parent(s): ed0c60a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -1670,21 +1670,21 @@ def openai_call(prompt: str, messages: list = None, model: str = "o3-mini",
1670
  params = {
1671
  "model": model,
1672
  "messages": messages,
 
1673
  }
1674
  # Note: Adjust token names based on the model
1675
  if model == "o3-mini":
1676
  params["max_completion_tokens"] = max_tokens_param
1677
  else:
1678
  params["max_tokens"] = max_tokens_param
1679
- params["temperature"] = temperature
1680
  response = client.chat.completions.create(**params)
1681
- result = response.choices[0].message.content.strip()
 
1682
 
1683
  if not result:
1684
  logging.error("Empty response from LLM for prompt: " + prompt)
1685
  return "Error: empty response"
1686
 
1687
- result = result.strip().strip("json").strip("```").strip()
1688
  logging.info(f"openai_call completed with model {model}. Response preview: {result}")
1689
  return result
1690
  except Exception as e:
@@ -2324,26 +2324,24 @@ Thet list of engines selected is the following:
2324
 
2325
  Now generate the result.
2326
  """
2327
- messages = [] # Use prompt directly in openai_call, messages is handled there.
2328
- llm_response = openai_call(prompt=prompt, messages=messages, model="o3-mini", temperature=0, max_tokens_param=50)
2329
  logging.info(f"Generated query tree: {llm_response}")
2330
- cleaned_response = llm_response.strip()
2331
- if cleaned_response.startswith("```"):
2332
- cleaned_response = cleaned_response.strip("`").strip()
2333
  try:
2334
  queries = json.loads(cleaned_response)['queries']
2335
- final_queries = queries[:min(len(queries), breadth)] # Ensures the output respect the breadth parameter.
2336
  except (json.JSONDecodeError, KeyError, TypeError) as e:
2337
  logging.error(f"Error parsing LLM response in generate_query_tree: {e}")
2338
  final_queries = [] # Return empty list if parsing fails
2339
  return final_queries
2340
 
2341
- logging.info(f"generate_query_tree: Generated queries: {final_queries}")
2342
  return final_queries
2343
 
2344
  def generate_serp_queries(context: str, breadth: int, depth: int, initial_query: str,
2345
  selected_engines=None, results_per_query: int = 10) -> list:
2346
  queries = generate_query_tree(context, breadth, depth, selected_engines)
 
2347
  prompt = f"""The research topic is: "{initial_query}".
2348
  Based on this query and the overall context: "{context}", suggest one or several languages (other than English) that might be relevant.
2349
 
@@ -2369,6 +2367,8 @@ Which are most relevant? Output a comma separated list (e.g., "google,baidu").
2369
  If none are found, output "google".
2370
  """
2371
  identified_engines = openai_call(prompt_engines, model="o3-mini", temperature=0, max_tokens_param=50)
 
 
2372
  # Split and strip engines; if result is empty (or all empty strings), default to ["google"]
2373
  selected_engines = [e.strip() for e in identified_engines.split(",") if e.strip()]
2374
  if not selected_engines:
 
1670
  params = {
1671
  "model": model,
1672
  "messages": messages,
1673
+ "temperature": temperature
1674
  }
1675
  # Note: Adjust token names based on the model
1676
  if model == "o3-mini":
1677
  params["max_completion_tokens"] = max_tokens_param
1678
  else:
1679
  params["max_tokens"] = max_tokens_param
 
1680
  response = client.chat.completions.create(**params)
1681
+ result = response.choices[0].message.content
1682
+ result = result.strip().strip("json").strip("```").strip()
1683
 
1684
  if not result:
1685
  logging.error("Empty response from LLM for prompt: " + prompt)
1686
  return "Error: empty response"
1687
 
 
1688
  logging.info(f"openai_call completed with model {model}. Response preview: {result}")
1689
  return result
1690
  except Exception as e:
 
2324
 
2325
  Now generate the result.
2326
  """
2327
+ messages = []
2328
+ llm_response = openai_call(prompt=prompt, model="o3-mini", temperature=0, max_tokens_param=50)
2329
  logging.info(f"Generated query tree: {llm_response}")
2330
+ cleaned_response = llm_response.strip().strip("`").strip()
 
 
2331
  try:
2332
  queries = json.loads(cleaned_response)['queries']
2333
+ final_queries = queries[:min(len(queries), breadth)]
2334
  except (json.JSONDecodeError, KeyError, TypeError) as e:
2335
  logging.error(f"Error parsing LLM response in generate_query_tree: {e}")
2336
  final_queries = [] # Return empty list if parsing fails
2337
  return final_queries
2338
 
 
2339
  return final_queries
2340
 
2341
  def generate_serp_queries(context: str, breadth: int, depth: int, initial_query: str,
2342
  selected_engines=None, results_per_query: int = 10) -> list:
2343
  queries = generate_query_tree(context, breadth, depth, selected_engines)
2344
+ logging.info(f"Queries generated from generate_query_tree:{queries}")
2345
  prompt = f"""The research topic is: "{initial_query}".
2346
  Based on this query and the overall context: "{context}", suggest one or several languages (other than English) that might be relevant.
2347
 
 
2367
  If none are found, output "google".
2368
  """
2369
  identified_engines = openai_call(prompt_engines, model="o3-mini", temperature=0, max_tokens_param=50)
2370
+ logging.info(f"Identified engines are:{identified_engines}")
2371
+
2372
  # Split and strip engines; if result is empty (or all empty strings), default to ["google"]
2373
  selected_engines = [e.strip() for e in identified_engines.split(",") if e.strip()]
2374
  if not selected_engines: