Update app.py
Browse files
app.py
CHANGED
|
@@ -211,9 +211,8 @@ class WeeboAssistant:
|
|
| 211 |
return output_path
|
| 212 |
|
| 213 |
def get_llm_response(self, chat_history):
|
| 214 |
-
prompt_lines = [
|
| 215 |
-
|
| 216 |
-
"" # Empty string or add intended string content here
|
| 217 |
]
|
| 218 |
|
| 219 |
|
|
@@ -224,8 +223,7 @@ class WeeboAssistant:
|
|
| 224 |
if assistant_msg:
|
| 225 |
prompt_lines.append("Assistant: " + assistant_msg)
|
| 226 |
prompt_lines.append("Assistant: ")
|
| 227 |
-
prompt = "
|
| 228 |
-
".join(prompt_lines)
|
| 229 |
|
| 230 |
inputs = self.llm_tokenizer(prompt, return_tensors="pt")
|
| 231 |
try:
|
|
|
|
| 211 |
return output_path
|
| 212 |
|
| 213 |
def get_llm_response(self, chat_history):
|
| 214 |
+
prompt_lines = [self.SYSTEM_PROMPT.strip(),
|
| 215 |
+
"" # Empty string or add intended string content here
|
|
|
|
| 216 |
]
|
| 217 |
|
| 218 |
|
|
|
|
| 223 |
if assistant_msg:
|
| 224 |
prompt_lines.append("Assistant: " + assistant_msg)
|
| 225 |
prompt_lines.append("Assistant: ")
|
| 226 |
+
prompt = "".join(prompt_lines)
|
|
|
|
| 227 |
|
| 228 |
inputs = self.llm_tokenizer(prompt, return_tensors="pt")
|
| 229 |
try:
|