Guiyom commited on
Commit
c553142
·
verified ·
1 Parent(s): 2c5e8ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -2945,7 +2945,7 @@ def iterative_deep_research_gen(initial_query: str, reportstyle: str, breadth: i
2945
  "title": title,
2946
  "summary": analysis_summary,
2947
  "clean_content": clean_content,
2948
- "raw_excerpt": raw_content[:200]
2949
  })
2950
 
2951
  if analysis.get("relevant", "no").lower() == "yes":
@@ -2973,11 +2973,11 @@ def iterative_deep_research_gen(initial_query: str, reportstyle: str, breadth: i
2973
  process_log += "Appended additional clarifications to the context.\n"
2974
  progress_pct = int((iteration / depth) * 100)
2975
  yield (f"Progress: {progress_pct}%", None, None, None, None)
2976
-
2977
- # chunk and filter all crumbs if breadth>3 and depth>2
2978
- filtered_crumbs_list = crumbs_list
2979
  if breadth > 3 and depth > 2:
2980
  filtered_crumbs_list = filter_crumbs_in_batches(crumbs_list, initial_query, followup_clarifications)
 
 
2981
 
2982
  # Now build aggregated crumb text from filtered_crumbs_list only
2983
  aggregated_crumbs = "\n\n".join([
@@ -3000,7 +3000,7 @@ def iterative_deep_research_gen(initial_query: str, reportstyle: str, breadth: i
3000
  # We convert processed_queries to a string suitable for storing
3001
  all_processed_queries_str = "\n".join(sorted(processed_queries))
3002
 
3003
- yield ("", final_report, process_log, crumbs_list, all_processed_queries_str)
3004
 
3005
  def filter_crumbs_in_batches(crumbs_list: list, initial_query: str, clarifications: str) -> list:
3006
  accepted = []
@@ -3020,11 +3020,11 @@ And the provided clarifications:
3020
 
3021
  Previously selected batch:
3022
 
3023
- Now provide the JSCON
3024
  """
3025
  listing = []
3026
  for idx, c in enumerate(batch):
3027
- snippet_for_prompt = c["summary"][:500] # short snippet
3028
  listing.append(f"Crumb {idx}: {snippet_for_prompt}")
3029
  prompt += "\n".join(listing)
3030
 
@@ -3035,6 +3035,8 @@ Return a JSON object with structure:
3035
  "1": "yes" or "no",
3036
  ...
3037
  }
 
 
3038
  """
3039
  decision_str = openai_call(prompt, model="o3-mini", temperature=0, max_tokens_param=1500)
3040
  # parse JSON
 
2945
  "title": title,
2946
  "summary": analysis_summary,
2947
  "clean_content": clean_content,
2948
+ "raw_excerpt": raw_content[:1000]
2949
  })
2950
 
2951
  if analysis.get("relevant", "no").lower() == "yes":
 
2973
  process_log += "Appended additional clarifications to the context.\n"
2974
  progress_pct = int((iteration / depth) * 100)
2975
  yield (f"Progress: {progress_pct}%", None, None, None, None)
2976
+
 
 
2977
  if breadth > 3 and depth > 2:
2978
  filtered_crumbs_list = filter_crumbs_in_batches(crumbs_list, initial_query, followup_clarifications)
2979
+ else:
2980
+ filtered_crumbs_list = crumbs_list
2981
 
2982
  # Now build aggregated crumb text from filtered_crumbs_list only
2983
  aggregated_crumbs = "\n\n".join([
 
3000
  # We convert processed_queries to a string suitable for storing
3001
  all_processed_queries_str = "\n".join(sorted(processed_queries))
3002
 
3003
+ yield ("", final_report, process_log, aggregated_crumbs, all_processed_queries_str)
3004
 
3005
  def filter_crumbs_in_batches(crumbs_list: list, initial_query: str, clarifications: str) -> list:
3006
  accepted = []
 
3020
 
3021
  Previously selected batch:
3022
 
3023
+ Now provide the JSON
3024
  """
3025
  listing = []
3026
  for idx, c in enumerate(batch):
3027
+ snippet_for_prompt = c["summary"][:1000] # short snippet
3028
  listing.append(f"Crumb {idx}: {snippet_for_prompt}")
3029
  prompt += "\n".join(listing)
3030
 
 
3035
  "1": "yes" or "no",
3036
  ...
3037
  }
3038
+
3039
+ No code fences, no 'json' mentioned before the result, only the json result formatted.
3040
  """
3041
  decision_str = openai_call(prompt, model="o3-mini", temperature=0, max_tokens_param=1500)
3042
  # parse JSON