Spaces:
Sleeping
Sleeping
Update utils.py
Browse files
utils.py
CHANGED
|
@@ -362,6 +362,18 @@ class SQLDatabaseChainPatched(SQLDatabaseChain):
|
|
| 362 |
).strip()
|
| 363 |
# self.llm_chain = self.revert_to_small_model(chain=self.llm_chain)
|
| 364 |
self.intermediate_steps['result'] = final_result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
|
| 366 |
#if 'result' in self.intermediate_steps:
|
| 367 |
# self.intermediate_steps['translated_result'] = translate(self.llms['4k'], orig_question, self.intermediate_steps['result'])
|
|
@@ -379,6 +391,18 @@ class SQLDatabaseChainPatched(SQLDatabaseChain):
|
|
| 379 |
).strip()
|
| 380 |
# self.llm_chain = self.revert_to_small_model(chain=self.llm_chain)
|
| 381 |
self.intermediate_steps['result'] = final_result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 382 |
#if 'result' in self.intermediate_steps:
|
| 383 |
# self.intermediate_steps['translated_result'] = translate(self.llms['4k'], orig_question, self.intermediate_steps['result'])
|
| 384 |
|
|
|
|
| 362 |
).strip()
|
| 363 |
# self.llm_chain = self.revert_to_small_model(chain=self.llm_chain)
|
| 364 |
self.intermediate_steps['result'] = final_result
|
| 365 |
+
|
| 366 |
+
# provide explanation
|
| 367 |
+
input_text += f"{final_result}\nExplanation:"
|
| 368 |
+
llm_inputs["input"] = input_text
|
| 369 |
+
self.llm_chain, n_tokens4 = self.prepare_llm(llm_inputs, chain=self.llm_chain)
|
| 370 |
+
# self.intermediate_steps['n_tokens_list'].append(n_tokens3)
|
| 371 |
+
explanation = self.llm_chain.predict(
|
| 372 |
+
callbacks=_run_manager.get_child(),
|
| 373 |
+
**llm_inputs,
|
| 374 |
+
).strip()
|
| 375 |
+
# self.llm_chain = self.revert_to_small_model(chain=self.llm_chain)
|
| 376 |
+
self.intermediate_steps['query_explanation'] = explanation
|
| 377 |
|
| 378 |
#if 'result' in self.intermediate_steps:
|
| 379 |
# self.intermediate_steps['translated_result'] = translate(self.llms['4k'], orig_question, self.intermediate_steps['result'])
|
|
|
|
| 391 |
).strip()
|
| 392 |
# self.llm_chain = self.revert_to_small_model(chain=self.llm_chain)
|
| 393 |
self.intermediate_steps['result'] = final_result
|
| 394 |
+
|
| 395 |
+
# provide explanation
|
| 396 |
+
input_text += f"{final_result}\nExplanation:"
|
| 397 |
+
llm_inputs["input"] = input_text
|
| 398 |
+
self.llm_chain, n_tokens4 = self.prepare_llm(llm_inputs, chain=self.llm_chain)
|
| 399 |
+
# self.intermediate_steps['n_tokens_list'].append(n_tokens3)
|
| 400 |
+
explanation = self.llm_chain.predict(
|
| 401 |
+
callbacks=_run_manager.get_child(),
|
| 402 |
+
**llm_inputs,
|
| 403 |
+
).strip()
|
| 404 |
+
# self.llm_chain = self.revert_to_small_model(chain=self.llm_chain)
|
| 405 |
+
self.intermediate_steps['query_explanation'] = explanation
|
| 406 |
#if 'result' in self.intermediate_steps:
|
| 407 |
# self.intermediate_steps['translated_result'] = translate(self.llms['4k'], orig_question, self.intermediate_steps['result'])
|
| 408 |
|