Spaces:
Runtime error
Runtime error
Huy
commited on
Commit
·
c0b2adc
1
Parent(s):
aeb0b1f
Fix bug
Browse files- llamaindex_utils.py +2 -4
llamaindex_utils.py
CHANGED
|
@@ -350,8 +350,7 @@ async def asynthesize_results(queries: List[SubQuestion], contexts: Dict[str, Se
|
|
| 350 |
for idx, response in enumerate(responses):
|
| 351 |
# Parse json string to dictionary
|
| 352 |
json_dict = parse_json_markdown(response)
|
| 353 |
-
|
| 354 |
-
if len(json_dict["choices"]) > 1:
|
| 355 |
for choice in json_dict["choices"]:
|
| 356 |
new_contexts[json_dict["summarized_text"]] = new_contexts[json_dict["summarized_text"]].union(contexts[contexts_batches[idx][choice - 1]])
|
| 357 |
else:
|
|
@@ -527,14 +526,13 @@ class CustomQueryEngine:
|
|
| 527 |
|
| 528 |
for task, image in answers:
|
| 529 |
response2images_mapping[str(task.result())].add(image)
|
| 530 |
-
|
| 531 |
# Synthesize results
|
| 532 |
synthesized_text, source_images = await asynthesize_results(queries=sub_queries,
|
| 533 |
contexts=response2images_mapping,
|
| 534 |
llm=self._llm,
|
| 535 |
num_children=self._num_children)
|
| 536 |
|
| 537 |
-
|
| 538 |
final_answer = await self._llm.apredict(self._qa_prompt,
|
| 539 |
context_str=synthesized_text,
|
| 540 |
query_str=query_str)
|
|
|
|
| 350 |
for idx, response in enumerate(responses):
|
| 351 |
# Parse json string to dictionary
|
| 352 |
json_dict = parse_json_markdown(response)
|
| 353 |
+
if len(json_dict["choices"]) > 0:
|
|
|
|
| 354 |
for choice in json_dict["choices"]:
|
| 355 |
new_contexts[json_dict["summarized_text"]] = new_contexts[json_dict["summarized_text"]].union(contexts[contexts_batches[idx][choice - 1]])
|
| 356 |
else:
|
|
|
|
| 526 |
|
| 527 |
for task, image in answers:
|
| 528 |
response2images_mapping[str(task.result())].add(image)
|
| 529 |
+
|
| 530 |
# Synthesize results
|
| 531 |
synthesized_text, source_images = await asynthesize_results(queries=sub_queries,
|
| 532 |
contexts=response2images_mapping,
|
| 533 |
llm=self._llm,
|
| 534 |
num_children=self._num_children)
|
| 535 |
|
|
|
|
| 536 |
final_answer = await self._llm.apredict(self._qa_prompt,
|
| 537 |
context_str=synthesized_text,
|
| 538 |
query_str=query_str)
|