Update app.py
Browse files
app.py
CHANGED
|
@@ -104,7 +104,7 @@ hf_hub_download(
|
|
| 104 |
|
| 105 |
# Set the title and description
|
| 106 |
title = "Gemma3-1B llama.cpp on cpu rag"
|
| 107 |
-
description = """This is prompt version rag.\n fast and stable than [smolagent version](https://huggingface.co/spaces/Akjava/Gemma3-1B-llamacpp-cpu-rag-smolagents)"""
|
| 108 |
|
| 109 |
|
| 110 |
llm = None
|
|
@@ -277,7 +277,7 @@ Answer:
|
|
| 277 |
returns_streaming_generator=True,
|
| 278 |
print_output=False,
|
| 279 |
)
|
| 280 |
-
|
| 281 |
# Log the success
|
| 282 |
logging.info("Response stream generated successfully")
|
| 283 |
|
|
|
|
| 104 |
|
| 105 |
# Set the title and description
|
| 106 |
title = "Gemma3-1B llama.cpp on cpu rag"
|
| 107 |
+
description = """This is prompt version rag.\n fast and stable than [smolagent version](https://huggingface.co/spaces/Akjava/Gemma3-1B-llamacpp-cpu-rag-smolagents).but the prompt needs significant improvement."""
|
| 108 |
|
| 109 |
|
| 110 |
llm = None
|
|
|
|
| 277 |
returns_streaming_generator=True,
|
| 278 |
print_output=False,
|
| 279 |
)
|
| 280 |
+
yield f"result of query({query})"
|
| 281 |
# Log the success
|
| 282 |
logging.info("Response stream generated successfully")
|
| 283 |
|