Update app.py
Browse files
app.py
CHANGED
|
@@ -94,7 +94,7 @@ def format_prompt(prompt,retrieved_documents,k):
|
|
| 94 |
|
| 95 |
#return formatted_message
|
| 96 |
|
| 97 |
-
# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string
|
| 98 |
|
| 99 |
def talk(prompt, history):
|
| 100 |
k = 2 # number of retrieved documents
|
|
@@ -118,22 +118,17 @@ def talk(prompt, history):
|
|
| 118 |
# print(f"{stream}")
|
| 119 |
print("check 7")
|
| 120 |
# print(stream['choices'][0]['message']['content'])
|
| 121 |
-
|
| 122 |
-
text = ""
|
| 123 |
-
for output in stream:
|
| 124 |
-
text += output['choices'][0]['message']['content']
|
| 125 |
-
print(f"{output}")
|
| 126 |
-
print("check3H")
|
| 127 |
-
print(text)
|
| 128 |
-
yield text
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
# yield "".join(text)
|
| 132 |
-
# print(text)
|
| 133 |
|
| 134 |
-
# preparing tokens for model input
|
| 135 |
-
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
|
| 136 |
-
|
| 137 |
# calling the model to generate response based on message/ input
|
| 138 |
# do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
|
| 139 |
# temperature controls randomness. more renadomness with higher temperature
|
|
|
|
| 94 |
|
| 95 |
#return formatted_message
|
| 96 |
|
| 97 |
+
# Called by talk function to add retrieved documents to the prompt. Keeps adding text of retrieved documents to string that are retreived
|
| 98 |
|
| 99 |
def talk(prompt, history):
|
| 100 |
k = 2 # number of retrieved documents
|
|
|
|
| 118 |
# print(f"{stream}")
|
| 119 |
print("check 7")
|
| 120 |
# print(stream['choices'][0]['message']['content'])
|
| 121 |
+
return(stream['choices'][0]['message']['content'])
|
| 122 |
+
# text = ""
|
| 123 |
+
# for output in stream:
|
| 124 |
+
# text += output['choices'][0]['message']['content']
|
| 125 |
+
# print(f"{output}")
|
| 126 |
+
# print("check3H")
|
| 127 |
+
# print(text)
|
| 128 |
+
# yield text
|
| 129 |
+
|
| 130 |
+
|
|
|
|
|
|
|
| 131 |
|
|
|
|
|
|
|
|
|
|
| 132 |
# calling the model to generate response based on message/ input
|
| 133 |
# do_sample if set to True uses strategies to select the next token from the probability distribution over the entire vocabulary
|
| 134 |
# temperature controls randomness. more renadomness with higher temperature
|