Spaces:
Paused
Paused
Update output with two spaces before a newline
Browse files
app.py
CHANGED
|
@@ -423,34 +423,31 @@ def text_to_text_generation(verbose, prompt):
|
|
| 423 |
res_semantic_search_prompt, res_semantic_search = cleanup(*gen_semantic_search)
|
| 424 |
res_google_search_prompt, res_google_search = cleanup(*gen_google_search)
|
| 425 |
|
| 426 |
-
print("GOOGLE SEARCH RES: ", str_to_md(res_google_search))
|
| 427 |
-
print("SEMANTIC SEARCH RES: ", str_to_md(res_semantic_search))
|
| 428 |
-
|
| 429 |
if verbose:
|
| 430 |
return (
|
| 431 |
f"# 📚KubeWizard📚\n"
|
| 432 |
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
| 433 |
f"--------------------------------------------\n"
|
| 434 |
-
f"# Classified your prompt as
|
| 435 |
f"{modes}\n\n"
|
| 436 |
f"--------------------------------------------\n"
|
| 437 |
-
f"# Answer with finetuned model\n"
|
| 438 |
-
f"## Prompt given to the model
|
| 439 |
f"{str_to_md(res_prompt)}\n\n"
|
| 440 |
-
f"## Model's answer
|
| 441 |
f"{str_to_md(res_normal)}\n\n"
|
| 442 |
f"--------------------------------------------\n"
|
| 443 |
-
f"# Answer with RAG\n"
|
| 444 |
-
f"## Section 1: Preparing for generation\n"
|
| 445 |
-
f"{res_semantic_search_md}\n"
|
| 446 |
-
f"## Section 2: Generating answer\n"
|
| 447 |
-
f"
|
| 448 |
f"--------------------------------------------\n"
|
| 449 |
-
f"# Answer with Google search\n"
|
| 450 |
-
f"## Section 1: Preparing for generation\n"
|
| 451 |
-
f"{res_google_search_md}\n"
|
| 452 |
-
f"## Section 2: Generating answer\n"
|
| 453 |
-
f"
|
| 454 |
)
|
| 455 |
else:
|
| 456 |
return (
|
|
|
|
| 423 |
res_semantic_search_prompt, res_semantic_search = cleanup(*gen_semantic_search)
|
| 424 |
res_google_search_prompt, res_google_search = cleanup(*gen_google_search)
|
| 425 |
|
|
|
|
|
|
|
|
|
|
| 426 |
if verbose:
|
| 427 |
return (
|
| 428 |
f"# 📚KubeWizard📚\n"
|
| 429 |
f"#### A helpful Kubernetes Assistant powered by Component Soft\n"
|
| 430 |
f"--------------------------------------------\n"
|
| 431 |
+
f"# Classified your prompt as: \n"
|
| 432 |
f"{modes}\n\n"
|
| 433 |
f"--------------------------------------------\n"
|
| 434 |
+
f"# Answer with finetuned model \n"
|
| 435 |
+
f"## Prompt given to the model: \n"
|
| 436 |
f"{str_to_md(res_prompt)}\n\n"
|
| 437 |
+
f"## Model's answer: \n"
|
| 438 |
f"{str_to_md(res_normal)}\n\n"
|
| 439 |
f"--------------------------------------------\n"
|
| 440 |
+
f"# Answer with RAG \n"
|
| 441 |
+
f"## Section 1: Preparing for generation \n"
|
| 442 |
+
f"{res_semantic_search_md} \n"
|
| 443 |
+
f"## Section 2: Generating answer \n"
|
| 444 |
+
f"{str_to_md(res_semantic_search)} \n"
|
| 445 |
f"--------------------------------------------\n"
|
| 446 |
+
f"# Answer with Google search \n"
|
| 447 |
+
f"## Section 1: Preparing for generation \n"
|
| 448 |
+
f"{res_google_search_md} \n"
|
| 449 |
+
f"## Section 2: Generating answer \n"
|
| 450 |
+
f"{str_to_md(res_google_search)} \n"
|
| 451 |
)
|
| 452 |
else:
|
| 453 |
return (
|