Guiyom commited on
Commit
ed0c60a
·
verified ·
1 Parent(s): d879330

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -20
app.py CHANGED
@@ -2203,28 +2203,14 @@ Now, produce the report please.
2203
  # Post-processing
2204
  report = re.sub(r'\{\[\{(.*?)\}\]\}', r'\1', report)
2205
  report = re.sub(r'\[\{(.*?)\}\]', r'\1', report)
2206
-
2207
-
2208
 
2209
  # If the report is too long, compress it.
2210
  if len(report) > MAX_MESSAGE_LENGTH:
2211
- report = compress_text(report, MAX_MESSAGE_LENGTH) # Generate the reference table from the actual references_list
2212
- # Ref table
2213
- ref_table_html = generate_reference_table(references)
2214
- soup = BeautifulSoup(report, "html.parser")
2215
- ref_heading = soup.find(lambda tag: tag.name in ["h1", "h2", "h3", "h4"] and "Reference Summary Table" in tag.get_text())
2216
- if ref_heading:
2217
- next_sibling = ref_heading.find_next_sibling()
2218
- if next_sibling:
2219
- next_sibling.replace_with(BeautifulSoup(ref_table_html, "html.parser"))
2220
- else:
2221
- # If not found, append below the heading:
2222
- ref_heading.insert_after(BeautifulSoup(ref_table_html, "html.parser"))
2223
- report = str(soup)
2224
- else:
2225
- # Alternatively, append the table at the end of the report
2226
- report += "<h2>Reference Summary Table</h2>" + ref_table_html
2227
-
2228
  return report
2229
 
2230
  def filter_search_results(results: list, visited_urls: set, query: str, clarifications: str) -> list:
@@ -2340,6 +2326,7 @@ Now generate the result.
2340
  """
2341
  messages = [] # Use prompt directly in openai_call, messages is handled there.
2342
  llm_response = openai_call(prompt=prompt, messages=messages, model="o3-mini", temperature=0, max_tokens_param=50)
 
2343
  cleaned_response = llm_response.strip()
2344
  if cleaned_response.startswith("```"):
2345
  cleaned_response = cleaned_response.strip("`").strip()
@@ -2347,7 +2334,7 @@ Now generate the result.
2347
  queries = json.loads(cleaned_response)['queries']
2348
  final_queries = queries[:min(len(queries), breadth)] # Ensures the output respect the breadth parameter.
2349
  except (json.JSONDecodeError, KeyError, TypeError) as e:
2350
- logging.error(f"Error parsing LLM response: {e}")
2351
  final_queries = [] # Return empty list if parsing fails
2352
  return final_queries
2353
 
@@ -2367,6 +2354,7 @@ Output either:
2367
  Output only the result.
2368
  """
2369
  languages_detected = openai_call(prompt, model="gpt-4o-mini", temperature=0, max_tokens_param=50)
 
2370
  if languages_detected != "No local attributes detected":
2371
  queries = [make_multilingual_query(q, context, languages_detected) for q in queries]
2372
  if not selected_engines or len(selected_engines) == 0:
 
2203
  # Post-processing
2204
  report = re.sub(r'\{\[\{(.*?)\}\]\}', r'\1', report)
2205
  report = re.sub(r'\[\{(.*?)\}\]', r'\1', report)
 
 
2206
 
2207
  # If the report is too long, compress it.
2208
  if len(report) > MAX_MESSAGE_LENGTH:
2209
+ report = compress_text(report, MAX_MESSAGE_LENGTH)
2210
+ if report.startswith("Error calling OpenAI API"):
2211
+ logging.error(f"generate_final_report error: {report}")
2212
+ return f"Error generating report: {report}"
2213
+ logging.info("generate_final_report: Report generated successfully.")
 
 
 
 
 
 
 
 
 
 
 
 
2214
  return report
2215
 
2216
  def filter_search_results(results: list, visited_urls: set, query: str, clarifications: str) -> list:
 
2326
  """
2327
  messages = [] # Use prompt directly in openai_call, messages is handled there.
2328
  llm_response = openai_call(prompt=prompt, messages=messages, model="o3-mini", temperature=0, max_tokens_param=50)
2329
+ logging.info(f"Generated query tree: {llm_response}")
2330
  cleaned_response = llm_response.strip()
2331
  if cleaned_response.startswith("```"):
2332
  cleaned_response = cleaned_response.strip("`").strip()
 
2334
  queries = json.loads(cleaned_response)['queries']
2335
  final_queries = queries[:min(len(queries), breadth)] # Ensures the output respect the breadth parameter.
2336
  except (json.JSONDecodeError, KeyError, TypeError) as e:
2337
+ logging.error(f"Error parsing LLM response in generate_query_tree: {e}")
2338
  final_queries = [] # Return empty list if parsing fails
2339
  return final_queries
2340
 
 
2354
  Output only the result.
2355
  """
2356
  languages_detected = openai_call(prompt, model="gpt-4o-mini", temperature=0, max_tokens_param=50)
2357
+ logging.info(f"languages detected: {languages_detected}")
2358
  if languages_detected != "No local attributes detected":
2359
  queries = [make_multilingual_query(q, context, languages_detected) for q in queries]
2360
  if not selected_engines or len(selected_engines) == 0: